diff options
Diffstat (limited to 'drivers')
784 files changed, 30646 insertions, 6880 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 002838d23b86..cc57bab146b5 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS config ACPI_PROCESSOR_CSTATE def_bool y + depends on ACPI_PROCESSOR depends on IA64 || X86 config ACPI_PROCESSOR_IDLE diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 2c4dda0787e8..5379bc3f275d 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -705,3 +705,185 @@ void __init acpi_processor_init(void) acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); acpi_scan_add_handler(&processor_container_handler); } + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +/** + * acpi_processor_claim_cst_control - Request _CST control from the platform. + */ +bool acpi_processor_claim_cst_control(void) +{ + static bool cst_control_claimed; + acpi_status status; + + if (!acpi_gbl_FADT.cst_control || cst_control_claimed) + return true; + + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, + acpi_gbl_FADT.cst_control, 8); + if (ACPI_FAILURE(status)) { + pr_warn("ACPI: Failed to claim processor _CST control\n"); + return false; + } + + cst_control_claimed = true; + return true; +} +EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); + +/** + * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. + * @handle: ACPI handle of the processor object containing the _CST. + * @cpu: The numeric ID of the target CPU. + * @info: Object write the C-states information into. + * + * Extract the C-state information for the given CPU from the output of the _CST + * control method under the corresponding ACPI processor object (or processor + * device object) and populate @info with it. + * + * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke + * acpi_processor_ffh_cstate_probe() to verify them and update the + * cpu_cstate_entry data for @cpu. + */ +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *cst; + acpi_status status; + u64 count; + int last_index = 0; + int i, ret = 0; + + status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "No _CST\n"); + return -ENODEV; + } + + cst = buffer.pointer; + + /* There must be at least 2 elements. */ + if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { + acpi_handle_warn(handle, "Invalid _CST output\n"); + ret = -EFAULT; + goto end; + } + + count = cst->package.elements[0].integer.value; + + /* Validate the number of C-states. */ + if (count < 1 || count != cst->package.count - 1) { + acpi_handle_warn(handle, "Inconsistent _CST data\n"); + ret = -EFAULT; + goto end; + } + + for (i = 1; i <= count; i++) { + union acpi_object *element; + union acpi_object *obj; + struct acpi_power_register *reg; + struct acpi_processor_cx cx; + + /* + * If there is not enough space for all C-states, skip the + * excess ones and log a warning. + */ + if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { + acpi_handle_warn(handle, + "No room for more idle states (limit: %d)\n", + ACPI_PROCESSOR_MAX_POWER - 1); + break; + } + + memset(&cx, 0, sizeof(cx)); + + element = &cst->package.elements[i]; + if (element->type != ACPI_TYPE_PACKAGE) + continue; + + if (element->package.count != 4) + continue; + + obj = &element->package.elements[0]; + + if (obj->type != ACPI_TYPE_BUFFER) + continue; + + reg = (struct acpi_power_register *)obj->buffer.pointer; + + obj = &element->package.elements[1]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.type = obj->integer.value; + /* + * There are known cases in which the _CST output does not + * contain C1, so if the type of the first state found is not + * C1, leave an empty slot for C1 to be filled in later. + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + last_index = 1; + + cx.address = reg->address; + cx.index = last_index + 1; + + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { + /* + * In the majority of cases _CST describes C1 as + * a FIXED_HARDWARE C-state, but if the command + * line forbids using MWAIT, use CSTATE_HALT for + * C1 regardless. + */ + if (cx.type == ACPI_STATE_C1 && + boot_option_idle_override == IDLE_NOMWAIT) { + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + cx.entry_method = ACPI_CSTATE_FFH; + } + } else if (cx.type == ACPI_STATE_C1) { + /* + * In the special case of C1, FIXED_HARDWARE can + * be handled by executing the HLT instruction. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + continue; + } + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } else { + continue; + } + + if (cx.type == ACPI_STATE_C1) + cx.valid = 1; + + obj = &element->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.latency = obj->integer.value; + + obj = &element->package.elements[3]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + memcpy(&info->states[++last_index], &cx, sizeof(cx)); + } + + acpi_handle_info(handle, "Found %d idle states\n", last_index); + + info->count = last_index; + + end: + kfree(buffer.pointer); + + return ret; +} +EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); +#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 2f380e7381d6..15c5b272e698 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -2187,7 +2187,7 @@ int acpi_video_register(void) if (register_count) { /* * if the function of acpi_video_register is already called, - * don't register the acpi_vide_bus again and return no error. + * don't register the acpi_video_bus again and return no error. */ goto leave; } diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index 863ade9add6d..173447d50acf 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -17,7 +17,7 @@ /* Common info for tool signons */ #define ACPICA_NAME "Intel ACPI Component Architecture" -#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation" +#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation" #if ACPI_MACHINE_WIDTH == 64 #define ACPI_WIDTH " (64-bit version)" diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index 54f81eac7ec9..89101e53324b 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h @@ -3,7 +3,7 @@ * * Name: accommon.h - Common include files for generation of ACPICA source * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h index d5478cd4a857..ede4b9cc9e85 100644 --- a/drivers/acpi/acpica/acconvert.h +++ b/drivers/acpi/acpica/acconvert.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 694cf206fa9a..a676daaa2da5 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -3,7 +3,7 @@ * * Name: acdebug.h - ACPI/AML debugger * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 82f81501566b..7ba6e308f146 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -3,7 +3,7 @@ * * Name: acdispat.h - dispatcher (parser to interpreter interface) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index c8652f91054e..79f292687bd6 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -3,7 +3,7 @@ * * Name: acevents.h - Event subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index fd3beea93421..38ffa2c0a496 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -3,7 +3,7 @@ * * Name: acglobal.h - Declarations for global variables * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index bcf8f7501db7..67f282e9e0af 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -3,7 +3,7 @@ * * Name: achware.h -- hardware specific interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 20706adbc148..a6d896cda2a5 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -3,7 +3,7 @@ * * Name: acinterp.h - Interpreter subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 1ea52576f0a2..af58cd2dc9d3 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -3,7 +3,7 @@ * * Name: aclocal.h - Internal data types used across the ACPI subsystem * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 283614e82a20..2269e10bc21b 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -3,7 +3,7 @@ * * Name: acmacros.h - C macros for the entire subsystem. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 7da1864798a0..e618ddfab2fd 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -3,7 +3,7 @@ * * Name: acnamesp.h - Namespace subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 8def0e3d690f..9f0219a8cb98 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -3,7 +3,7 @@ * * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -260,7 +260,8 @@ struct acpi_object_index_field { /* The buffer_field is different in that it is part of a Buffer, not an op_region */ struct acpi_object_buffer_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */ + ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */ + union acpi_operand_object *buffer_obj; /* Containing Buffer object */ }; /****************************************************************************** diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 9d78134428e3..8825394be9ab 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -3,7 +3,7 @@ * * Name: acopcode.h - AML opcode information for the AML parser and interpreter * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 6e32c97cba6c..bc00b85c0a8f 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -3,7 +3,7 @@ * * Module Name: acparser.h - AML Parser subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 387163b962a7..cd0f5df0ea23 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -3,7 +3,7 @@ * * Name: acpredef - Information table for ACPI predefined methods and objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index 422cd8f2b92e..6de8a1650d3d 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -3,7 +3,7 @@ * * Name: acresrc.h - Resource Manager function prototypes * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 2043dff370b1..4c900c108f3f 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -3,7 +3,7 @@ * * Name: acstruct.h - Internal structs * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index dfbf1dbd4033..734624facda3 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -3,7 +3,7 @@ * * Name: actables.h - ACPI table management * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 5fb50634e08e..7c89b470ec81 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -3,7 +3,7 @@ * * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 49e412edd7c6..1d541bbac4a3 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -5,7 +5,7 @@ * Declarations and definitions contained herein are derived * directly from the ACPI specification. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 7c3bd4ab60fc..e5234e001acf 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -3,7 +3,7 @@ * * Module Name: amlresrc.h - AML resource descriptors * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c index 47d2e5059849..bb9600b867ee 100644 --- a/drivers/acpi/acpica/dbhistry.c +++ b/drivers/acpi/acpica/dbhistry.c @@ -3,7 +3,7 @@ * * Module Name: dbhistry - debugger HISTORY command * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index e1632b340182..aa71f65395d2 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -816,7 +816,7 @@ acpi_db_command_dispatch(char *input_buffer, if (ACPI_FAILURE(status) || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) { acpi_os_printf - ("Invalid adress space ID: must be between 0 and %u inclusive\n", + ("Invalid address space ID: must be between 0 and %u inclusive\n", ACPI_NUM_PREDEFINED_REGIONS - 1); return (AE_OK); } diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 85b34d02233e..ad17f62e51d9 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -4,7 +4,7 @@ * Module Name: dsargs - Support for execution of dynamic arguments for static * objects (regions, fields, buffer fields, etc.) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 5034fab9cf69..4b5b6e859f62 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -4,7 +4,7 @@ * Module Name: dscontrol - Support for execution control opcodes - * if/else/while/return * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 0d3e1ced1f57..63bc5f19fb82 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -3,7 +3,7 @@ * * Module Name: dsdebug - Parser/Interpreter interface - debugging * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index faa38a22263a..c901f5aec739 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -3,7 +3,7 @@ * * Module Name: dsfield - Dispatcher field routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -243,7 +243,7 @@ cleanup: * FUNCTION: acpi_ds_get_field_names * * PARAMETERS: info - create_field info structure - * ` walk_state - Current method state + * walk_state - Current method state * arg - First parser arg for the field name list * * RETURN: Status diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index a1ffed29903b..9be2a309424c 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -3,7 +3,7 @@ * * Module Name: dsinit - Object initialization namespace walk * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index f59b4d944f7f..cf67caff878a 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -3,7 +3,7 @@ * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 179129a2deb1..c0a14a6a2c20 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -3,7 +3,7 @@ * * Module Name: dsobject - Dispatcher object management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 10f32b62608e..d9c26e720cb7 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -3,7 +3,7 @@ * * Module Name: dsopcode - Dispatcher support for regions and fields * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -217,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, } obj_desc->buffer_field.buffer_obj = buffer_desc; + obj_desc->buffer_field.is_create_field = + aml_opcode == AML_CREATE_FIELD_OP; /* Reference count for buffer_desc inherits obj_desc count */ diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c index 997faa10f615..d869568d55c2 100644 --- a/drivers/acpi/acpica/dspkginit.c +++ b/drivers/acpi/acpica/dspkginit.c @@ -3,7 +3,7 @@ * * Module Name: dspkginit - Completion of deferred package initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index d75aae304595..5e81a1ae44cf 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -4,7 +4,7 @@ * Module Name: dswexec - Dispatcher method execution callbacks; * dispatch to interpreter. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index c88fd31208a5..697974e37edf 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -3,7 +3,7 @@ * * Module Name: dswload - Dispatcher first pass namespace load callbacks * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); + /* + * Disassembler: handle create field operators here. + * + * create_buffer_field is a deferred op that is typically processed in load + * pass 2. However, disassembly of control method contents walk the parse + * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed + * in a later walk. This is a problem when there is a control method that + * has the same name as the AML_CREATE object. In this case, any use of the + * name segment will be detected as a method call rather than a reference + * to a buffer field. + * + * This earlier creation during disassembly solves this issue by inserting + * the named object in the ACPI namespace so that references to this name + * would be a name string rather than a method call. + */ + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) && + (walk_state->op_info->flags & AML_CREATE)) { + status = acpi_ds_create_buffer_field(op, walk_state); + return_ACPI_STATUS(status); + } + /* We are only interested in opcodes that have an associated name */ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 935a8e2623e4..b31457ca926c 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -3,7 +3,7 @@ * * Module Name: dswload2 - Dispatcher second pass namespace load callbacks * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 39acf7b286da..9c397642fed7 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -3,7 +3,7 @@ * * Module Name: dswscope - Scope stack manipulation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index de79f835a373..809a0c0536b5 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -3,7 +3,7 @@ * * Module Name: dswstate - Dispatcher parse tree walk management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index 9e2f5a05c066..8c83d8c620dc 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -3,7 +3,7 @@ * * Module Name: evevent - Fixed Event handling and dispatch * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 5c77bee5d31f..0ced84ae13e4 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -3,7 +3,7 @@ * * Module Name: evglock - Global Lock support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 344feba29063..3e39907fedd9 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -3,7 +3,7 @@ * * Module Name: evgpe - General Purpose Event handling and dispatch * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 9c7adaa7b582..132adff1e131 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -3,7 +3,7 @@ * * Module Name: evgpeblk - GPE block creation and initialization. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 70d21d5ec5f3..6effd8076dcc 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -3,7 +3,7 @@ * * Module Name: evgpeinit - System GPE initialization and update * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 917892227e09..738873e876ca 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -3,7 +3,7 @@ * * Module Name: evgpeutil - GPE utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 3ef4e27995f0..5884eba047f7 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -3,7 +3,7 @@ * * Module Name: evhandler - Support for Address Space handlers * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index aa98fe07cd1b..ce1eda6beb84 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -3,7 +3,7 @@ * * Module Name: evmisc - Miscellaneous event manager support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 1ff126460007..738d4b231f34 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -3,7 +3,7 @@ * * Module Name: evregion - Operation Region support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index aee09640d710..aefc0145e583 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -3,7 +3,7 @@ * * Module Name: evrgnini- ACPI address_space (op_region) init * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 279ef0557aa3..e4e012297eee 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -3,7 +3,7 @@ * * Module Name: evxface - External interfaces for ACPI events * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index e528fe56b755..1a15b0087379 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -3,7 +3,7 @@ * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 04a40d563dd6..2c39ff2a7406 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -3,7 +3,7 @@ * * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 47265b073e6f..da97fd0c6b51 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -4,7 +4,7 @@ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index c7af07566b7b..43711412722f 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -3,7 +3,7 @@ * * Module Name: exconcat - Concatenate-type AML operators * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 46a8baf28bd0..68efd704e2dc 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -3,7 +3,7 @@ * * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index ca2966bacb50..50c7aad2e86d 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -3,7 +3,7 @@ * * Module Name: exconvrt - Object conversion routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index f376fc00064e..a17482428b46 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -3,7 +3,7 @@ * * Module Name: excreate - Named object creation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index b1aeec8cac55..a5223dcaee70 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -3,7 +3,7 @@ * * Module Name: exdebug - Support for stores to the AML Debug Object * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index a9bc938a3b55..47a4d9a40d6b 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -3,7 +3,7 @@ * * Module Name: exdump - Interpreter debug output routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index d3d2dbfba680..e85eb31e5075 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -3,7 +3,7 @@ * * Module Name: exfield - AML execution - field_unit read/write * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -96,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length) * RETURN: Status * * DESCRIPTION: Read from a named field. Returns either an Integer or a - * Buffer, depending on the size of the field. + * Buffer, depending on the size of the field and whether if a + * field is created by the create_field() operator. * ******************************************************************************/ @@ -154,12 +155,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, * the use of arithmetic operators on the returned value if the * field size is equal or smaller than an Integer. * + * However, all buffer fields created by create_field operator needs to + * remain as a buffer to match other AML interpreter implementations. + * * Note: Field.length is in bits. */ buffer_length = (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length); - if (buffer_length > acpi_gbl_integer_byte_width) { + if (buffer_length > acpi_gbl_integer_byte_width || + (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD && + obj_desc->buffer_field.is_create_field)) { /* Field is too large for an Integer, create a Buffer instead */ diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 95a0dcb4f7b9..ade35ff1c7ba 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -3,7 +3,7 @@ * * Module Name: exfldio - Aml Field I/O * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 60e854965af9..717e3998fd77 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -3,7 +3,7 @@ * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index 775cd62af5b3..9ff247cba571 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -3,7 +3,7 @@ * * Module Name: exmutex - ASL Mutex Acquire/Release functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index 6b76be5212a4..74f8b0d0452b 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -3,7 +3,7 @@ * * Module Name: exnames - interpreter/scanner name load/execute * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 06e35ea09823..a46d685a3ffc 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -3,7 +3,7 @@ * * Module Name: exoparg1 - AML execution - opcodes with 1 argument * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 5e4a31a11df4..03241d18ac1d 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -3,7 +3,7 @@ * * Module Name: exoparg2 - AML execution - opcodes with 2 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index a4ebce417930..c8d0d75fc450 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -3,7 +3,7 @@ * * Module Name: exoparg3 - AML execution - opcodes with 3 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 31385a0b2dab..55d0fa056fe7 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -3,7 +3,7 @@ * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 728d752f7adc..a4e306690a21 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -3,7 +3,7 @@ * * Module Name: exprep - ACPI AML field prep utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index c08521194b29..d15a66de26c0 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -3,7 +3,7 @@ * * Module Name: exregion - ACPI default op_region (address space) handlers * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index b223d01e6bf8..3e4018678c09 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -3,7 +3,7 @@ * * Module Name: exresnte - AML Interpreter object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 36da5c0ef69c..912a078c60a4 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -3,7 +3,7 @@ * * Module Name: exresolv - AML Interpreter object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index bdfe4d33b483..4d1b22971d58 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -3,7 +3,7 @@ * * Module Name: exresop - AML Interpreter operand/object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c index c5aa4b0deb70..760bc7cef55a 100644 --- a/drivers/acpi/acpica/exserial.c +++ b/drivers/acpi/acpica/exserial.c @@ -3,7 +3,7 @@ * * Module Name: exserial - field_unit support for serial address spaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 7f3c3571c292..3adc0a29d890 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -3,7 +3,7 @@ * * Module Name: exstore - AML Interpreter object store support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 4e43c8277f07..8c34f4e2ab8f 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -4,7 +4,7 @@ * Module Name: exstoren - AML Interpreter object store support, * Store to Node (namespace object) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index dc9e2b1c1ad9..dc66696080a5 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -3,7 +3,7 @@ * * Module Name: exstorob - AML object store support, store to object * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index a538f7799b78..f329b01672bb 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -3,7 +3,7 @@ * * Module Name: exsystem - Interface to OS services * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index db7f93ca539f..832a47885b99 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -3,7 +3,7 @@ * * Module Name: extrace - Support for interpreter execution tracing * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 75380be1c2ef..8fefa6feac2f 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -3,7 +3,7 @@ * * Module Name: exutils - interpreter/scanner utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 926f7e080f22..9b9aac27ff7e 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -3,7 +3,7 @@ * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index dee3affaca49..d9be5d0545d4 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -4,7 +4,7 @@ * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the * extended FADT-V5 sleep registers. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 565bd3f29f31..1b4252bdcd0b 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -3,7 +3,7 @@ * * Module Name: hwgpe - Low level GPE enable/disable/clear functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index b62db8ec446f..243a25add28f 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -4,7 +4,7 @@ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the * original/legacy sleep/PM registers. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 2fb9f75d71c5..07473ddfa9a9 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -3,7 +3,7 @@ * * Name: hwtimer.c - ACPI Power Management Timer Interface * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index cd576153257c..4d94861e6093 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -3,7 +3,7 @@ * * Module Name: hwvalid - I/O request validation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index c4fd97104024..134dbfadcd15 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -3,7 +3,7 @@ * * Module Name: hwxface - Public ACPICA hardware interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 2919746c9041..a4b66f4b2714 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -3,7 +3,7 @@ * * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index 0e97ed38973f..d5e8405e9d8f 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c @@ -3,7 +3,7 @@ * * Module Name: nsarguments - Validation of args for ACPI predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index c86d0770ed6e..c86c82939ebb 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -4,7 +4,7 @@ * Module Name: nsconvert - Object conversions for objects returned by * predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 9ad340f644a1..994f0b556c60 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 73e5c83c8c9f..b691fe20e384 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 61e9dfc9fe8c..e16f6a0c2c3f 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -3,7 +3,7 @@ * * Module Name: nsinit - namespace initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index d7c4d6e8e21e..9ba17891edb6 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -3,7 +3,7 @@ * * Module Name: nsload - namespace loading/expanding/contracting procedures * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index f16cf5e4742c..7e74a765e785 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -3,7 +3,7 @@ * * Module Name: nsparse - namespace interface to AML parser * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 2f9d93122d0c..0cea9c363ace 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -3,7 +3,7 @@ * * Module Name: nspredef - Validation of ACPI predefined methods and objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 9a80e3b23496..237b3ddeb075 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -3,7 +3,7 @@ * * Module Name: nsprepkg - Validation of package objects for predefined names * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index be86fea8e4d4..90db2d85e7f5 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -3,7 +3,7 @@ * * Module Name: nsrepair - Repair for objects returned by predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 663d85e0adba..125143c41bb8 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -4,7 +4,7 @@ * Module Name: nsrepair2 - Repair for objects returned by specific * predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index b8d007c84d32..e66abdab8f31 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -4,7 +4,7 @@ * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index ceea6af79d12..b7f3e8603ad8 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -3,7 +3,7 @@ * * Module Name: nswalk - Functions for walking the ACPI namespace * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 161e60ddfb69..984129dcaa0c 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -4,7 +4,7 @@ * Module Name: nsxfname - Public interfaces to the ACPI subsystem * ACPI Namespace oriented interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index e62c7897fdf1..3b40db4ad9f3 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -3,7 +3,7 @@ * * Module Name: psargs - Parse AML opcode arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 207805047bc4..3cf0687b9915 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -3,7 +3,7 @@ * * Module Name: psloop - Main AML parse loop * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index ded2779fc8ea..2480c26c5171 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -3,7 +3,7 @@ * * Module Name: psobject - Support for parse objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 43775c5ce17c..28af49263ebf 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -3,7 +3,7 @@ * * Module Name: psopcode - Parser/Interpreter opcode information table * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index 15e7563829f1..ab9327f6a63c 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -3,7 +3,7 @@ * * Module Name: psopinfo - AML opcode information functions and dispatch tables * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 9b386530ffbe..c780046bf294 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -3,7 +3,7 @@ * * Module Name: psparse - Parser top level AML parse routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index f153ca804740..fceb311995e9 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c @@ -3,7 +3,7 @@ * * Module Name: psscope - Parser scope stack management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index 22d8a2becdd0..c8aef0694864 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -3,7 +3,7 @@ * * Module Name: pstree - Parser op tree manipulation/traversal/search * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 2512f584fa3c..00efae2f95ba 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -3,7 +3,7 @@ * * Module Name: psutils - Parser miscellaneous utilities (Parser only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index cf91841297c2..0fe3adf6b0e5 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -3,7 +3,7 @@ * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index ee2ee2c858f2..1bbfc8def388 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -3,7 +3,7 @@ * * Module Name: psxface - Parser external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 2cf36451e46f..523b1e9b98d4 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -3,7 +3,7 @@ * * Module Name: tbdata - Table manager data structure functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 0041bfba9abc..907edc5edba7 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -3,7 +3,7 @@ * * Module Name: tbfadt - FADT table utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index b2abb40023a6..56d81e490a5c 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -3,7 +3,7 @@ * * Module Name: tbfind - find table * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index ef1ffd36ab3f..0bb15add2245 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -3,7 +3,7 @@ * * Module Name: tbinstal - ACPI table installation and removal * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index 4764f849cb78..0b3494ad9a70 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -3,7 +3,7 @@ * * Module Name: tbprint - Table output utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index c5f0b8ec70cc..dfe1ac3ae34a 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -3,7 +3,7 @@ * * Module Name: tbutils - ACPI Table utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 1640685bf4ae..f8403d480318 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -3,7 +3,7 @@ * * Module Name: tbxface - ACPI table-oriented external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 0782acf85722..bcba993d4dac 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -3,7 +3,7 @@ * * Module Name: tbxfload - Table load/unload external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index e2859d09ca2e..0edc6ef5d46d 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -3,7 +3,7 @@ * * Module Name: tbxfroot - Find the root ACPI table (RSDT) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index bb260376bd59..99fa48722cf6 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -3,7 +3,7 @@ * * Module Name: utaddress - op_region address range check * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index d64da4d9e8d0..303ab51b4fcf 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -3,7 +3,7 @@ * * Module Name: utalloc - local memory allocation routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c index f6cd7d4f698b..d78656d960e8 100644 --- a/drivers/acpi/acpica/utascii.c +++ b/drivers/acpi/acpica/utascii.c @@ -3,7 +3,7 @@ * * Module Name: utascii - Utility ascii functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index db897af1de05..f2ec427f4e29 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c @@ -3,7 +3,7 @@ * * Module Name: utbuffer - Buffer dump routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 8533fce7fa93..1b03a2747401 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c @@ -3,7 +3,7 @@ * * Module Name: utcache - local cache allocation routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index 1fb8327f3c3b..41bdd0278dd8 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -3,7 +3,7 @@ * * Module Name: utcopy - Internal to external object translation utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index 5b169b5f0f1a..0c8cb0612414 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -3,7 +3,7 @@ * * Module Name: utdebug - Debug print/trace routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 65beaa237669..befdd13b403b 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -3,7 +3,7 @@ * * Module Name: utdecode - Utility decoding routines (value-to-string) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 558a9f3b0678..8180d1a458f5 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -3,7 +3,7 @@ * * Module Name: uteval - Object evaluation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index b0622ec4bb85..e6dcbdc3fc6e 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -3,7 +3,7 @@ * * Module Name: utglobal - Global variables for the ACPI subsystem * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index b6da135d5f41..0e02f12513dc 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -3,7 +3,7 @@ * * Module Name: uthex -- Hex/ASCII support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 30198c828ab6..3bb06935a2ad 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -3,7 +3,7 @@ * * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 6f33e7c72327..fdbc397c038d 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -3,7 +3,7 @@ * * Module Name: utinit - Common ACPI subsystem initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index 8b4ff11d617a..46be549539e7 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c @@ -3,7 +3,7 @@ * * Module Name: utlock - Reader/Writer lock interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index eee97a902696..3e60bdac2200 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -3,7 +3,7 @@ * * Module Name: utobject - ACPI object create/delete/size/cache routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index ad2b218039d0..0a01c08dad8a 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -3,7 +3,7 @@ * * Module Name: utosi - Support for the _OSI predefined control method * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 1b0f68f5ed8c..05fe3470fb93 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -3,7 +3,7 @@ * * Module Name: utpredef - support functions for predefined names * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index 5839f2fa7400..a874dac7db5c 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -3,7 +3,7 @@ * * Module Name: utprint - Formatted printing routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 14de4d15e618..d366be431a84 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -3,7 +3,7 @@ * * Module Name: uttrack - Memory allocation tracking routines (debug only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c index 0a7cf8007643..b8039954b0d1 100644 --- a/drivers/acpi/acpica/utuuid.c +++ b/drivers/acpi/acpica/utuuid.c @@ -3,7 +3,7 @@ * * Module Name: utuuid -- UUID support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index f497c4b30e65..ca7c9f0144ef 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -3,7 +3,7 @@ * * Module Name: utxface - External interfaces, miscellaneous utility functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index cf769e94fe0f..653e3bb20036 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -3,7 +3,7 @@ * * Module Name: utxfinit - External interfaces for ACPICA initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 8906c80175e6..103acbbfcf9a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1180,7 +1180,7 @@ static int ghes_probe(struct platform_device *ghes_dev) switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: - timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); + timer_setup(&ghes->timer, ghes_poll_func, 0); ghes_add_timer(ghes); break; case ACPI_HEST_NOTIFY_EXTERNAL: diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 33f71983e001..6078064684c6 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -298,6 +298,59 @@ out: return status; } +struct iort_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool apply_id_count_workaround; + +static struct iort_workaround_oem_info wa_info[] __initdata = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0, + } +}; + +static void __init +iort_check_id_count_workaround(struct acpi_table_header *tbl) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + wa_info[i].oem_revision == tbl->oem_revision) { + apply_id_count_workaround = true; + pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n"); + break; + } + } +} + +static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map) +{ + u32 map_max = map->input_base + map->id_count; + + /* + * The IORT specification revision D (Section 3, table 4, page 9) says + * Number of IDs = The number of IDs in the range minus one, but the + * IORT code ignored the "minus one", and some firmware did that too, + * so apply a workaround here to keep compatible with both the spec + * compliant and non-spec compliant firmwares. + */ + if (apply_id_count_workaround) + map_max--; + + return map_max; +} + static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) { @@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, return -ENXIO; } - if (rid_in < map->input_base || - (rid_in >= map->input_base + map->id_count)) + if (rid_in < map->input_base || rid_in > iort_get_map_max(map)) return -ENXIO; *rid_out = map->output_base + (rid_in - map->input_base); @@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void) return; } + iort_check_id_count_workaround(iort_table); iort_init_platform_devices(); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 8f0e0c8d8c3d..15cc7d5a6185 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -38,6 +38,8 @@ #define PREFIX "ACPI: " #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF +#define ACPI_BATTERY_CAPACITY_VALID(capacity) \ + ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN) #define ACPI_BATTERY_DEVICE_NAME "Battery" @@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) static bool acpi_battery_is_degraded(struct acpi_battery *battery) { - return battery->full_charge_capacity && battery->design_capacity && + return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) && battery->full_charge_capacity < battery->design_capacity; } @@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { - int ret = 0; + int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0; struct acpi_battery *battery = to_acpi_battery(psy); if (acpi_battery_present(battery)) { @@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: - if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) ret = -ENODEV; else val->intval = battery->design_capacity * 1000; break; case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL: - if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) ret = -ENODEV; else val->intval = battery->full_charge_capacity * 1000; @@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = battery->capacity_now * 1000; break; case POWER_SUPPLY_PROP_CAPACITY: - if (battery->capacity_now && battery->full_charge_capacity) - val->intval = battery->capacity_now * 100/ - battery->full_charge_capacity; + if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) + full_capacity = battery->full_charge_capacity; + else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_capacity = battery->design_capacity; + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN || + full_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + ret = -ENODEV; else - val->intval = 0; + val->intval = battery->capacity_now * 100/ + full_capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: if (battery->state & ACPI_BATTERY_STATE_CRITICAL) @@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; +static enum power_supply_property charge_battery_full_cap_broken_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void) static int sysfs_add_battery(struct acpi_battery *battery) { struct power_supply_config psy_cfg = { .drv_data = battery, }; + bool full_cap_broken = false; + + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_cap_broken = true; if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) { - battery->bat_desc.properties = charge_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(charge_battery_props); - } else if (battery->full_charge_capacity == 0) { - battery->bat_desc.properties = - energy_battery_full_cap_broken_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_full_cap_broken_props); + if (full_cap_broken) { + battery->bat_desc.properties = + charge_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = charge_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_props); + } } else { - battery->bat_desc.properties = energy_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_props); + if (full_cap_broken) { + battery->bat_desc.properties = + energy_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = energy_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_props); + } } battery->bat_desc.name = acpi_device_bid(battery->device); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index b758b45737f5..f6925f16c4a2 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = { }, .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, }, + { + /* + * Razer Blade Stealth 13 late 2019, notification of the LID device + * only happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, {} }; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 5e4a8860a9c0..b64c62bfcea5 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1321,6 +1321,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) */ static const struct acpi_device_id special_pm_ids[] = { {"PNP0C0B", }, /* Generic ACPI fan */ + {"INT1044", }, /* Fan for Tiger Lake generation */ {"INT3404", }, /* Fan */ {} }; diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index eb58fc475a03..387f27ef3368 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -97,6 +97,7 @@ static int dptf_power_remove(struct platform_device *pdev) } static const struct acpi_device_id int3407_device_ids[] = { + {"INT1047", 0}, {"INT3407", 0}, {"", 0}, }; diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 5c7a90186e3c..1ec7b6900662 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -13,6 +13,10 @@ #define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { + {"INT1040"}, + {"INT1043"}, + {"INT1044"}, + {"INT1047"}, {"INT3400"}, {"INT3401", INT3401_DEVICE}, {"INT3402"}, diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d05be13c1022..08bc9751fe66 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1053,28 +1053,20 @@ void acpi_ec_unblock_transactions(void) Event Management -------------------------------------------------------------------------- */ static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - -static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 816b0803f7fb..aaf4e8f348cf 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -25,6 +25,7 @@ static int acpi_fan_remove(struct platform_device *pdev); static const struct acpi_device_id fan_device_ids[] = { {"PNP0C0B", 0}, + {"INT1044", 0}, {"INT3404", 0}, {"", 0}, }; @@ -44,12 +45,16 @@ static const struct dev_pm_ops acpi_fan_pm = { #define FAN_PM_OPS_PTR NULL #endif +#define ACPI_FPS_NAME_LEN 20 + struct acpi_fan_fps { u64 control; u64 trip_point; u64 speed; u64 noise_level; u64 power; + char name[ACPI_FPS_NAME_LEN]; + struct device_attribute dev_attr; }; struct acpi_fan_fif { @@ -265,6 +270,39 @@ static int acpi_fan_speed_cmp(const void *a, const void *b) return fps1->speed - fps2->speed; } +static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr); + int count; + + if (fps->control == 0xFFFFFFFF || fps->control > 100) + count = snprintf(buf, PAGE_SIZE, "not-defined:"); + else + count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control); + + if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point); + + if (fps->speed == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed); + + if (fps->noise_level == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100); + + if (fps->power == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power); + + return count; +} + static int acpi_fan_get_fps(struct acpi_device *device) { struct acpi_fan *fan = acpi_driver_data(device); @@ -295,12 +333,13 @@ static int acpi_fan_get_fps(struct acpi_device *device) } for (i = 0; i < fan->fps_count; i++) { struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; - struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] }; + struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name), + &fan->fps[i] }; status = acpi_extract_package(&obj->package.elements[i + 1], &format, &fps); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Invalid _FPS element\n"); - break; + goto err; } } @@ -308,6 +347,24 @@ static int acpi_fan_get_fps(struct acpi_device *device) sort(fan->fps, fan->fps_count, sizeof(*fan->fps), acpi_fan_speed_cmp, NULL); + for (i = 0; i < fan->fps_count; ++i) { + struct acpi_fan_fps *fps = &fan->fps[i]; + + snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i); + fps->dev_attr.show = show_state; + fps->dev_attr.store = NULL; + fps->dev_attr.attr.name = fps->name; + fps->dev_attr.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr); + if (status) { + int j; + + for (j = 0; j < i; ++j) + sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr); + break; + } + } + err: kfree(obj); return status; @@ -330,14 +387,20 @@ static int acpi_fan_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fan); if (acpi_fan_is_acpi4(device)) { - if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device)) - goto end; + result = acpi_fan_get_fif(device); + if (result) + return result; + + result = acpi_fan_get_fps(device); + if (result) + return result; + fan->acpi4 = true; } else { result = acpi_device_update_power(device, NULL); if (result) { dev_err(&device->dev, "Failed to set initial power state\n"); - goto end; + goto err_end; } } @@ -350,7 +413,7 @@ static int acpi_fan_probe(struct platform_device *pdev) &fan_cooling_ops); if (IS_ERR(cdev)) { result = PTR_ERR(cdev); - goto end; + goto err_end; } dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id); @@ -365,10 +428,21 @@ static int acpi_fan_probe(struct platform_device *pdev) result = sysfs_create_link(&cdev->device.kobj, &pdev->dev.kobj, "device"); - if (result) + if (result) { dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); + goto err_end; + } + + return 0; + +err_end: + if (fan->acpi4) { + int i; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + } -end: return result; } @@ -376,6 +450,13 @@ static int acpi_fan_remove(struct platform_device *pdev) { struct acpi_fan *fan = platform_get_drvdata(pdev); + if (fan->acpi4) { + struct acpi_device *device = ACPI_COMPANION(&pdev->dev); + int i; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + } sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); sysfs_remove_link(&fan->cdev->device.kobj, "device"); thermal_cooling_device_unregister(fan->cdev); diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index f31544d3656e..4ae93350b70d 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -98,11 +98,11 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * * Return: The cache structure and the level we terminated with. */ -static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, - int local_level, - struct acpi_subtable_header *res, - struct acpi_pptt_cache **found, - int level, int type) +static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, + unsigned int local_level, + struct acpi_subtable_header *res, + struct acpi_pptt_cache **found, + unsigned int level, int type) { struct acpi_pptt_cache *cache; @@ -119,7 +119,7 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, if (*found != NULL && cache != *found) pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); - pr_debug("Found cache @ level %d\n", level); + pr_debug("Found cache @ level %u\n", level); *found = cache; /* * continue looking at this node's resource list @@ -132,16 +132,17 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, return local_level; } -static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node, - int *starting_level, int level, - int type) +static struct acpi_pptt_cache * +acpi_find_cache_level(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + unsigned int *starting_level, unsigned int level, + int type) { struct acpi_subtable_header *res; - int number_of_levels = *starting_level; + unsigned int number_of_levels = *starting_level; int resource = 0; struct acpi_pptt_cache *ret = NULL; - int local_level; + unsigned int local_level; /* walk down from processor node */ while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) { @@ -321,12 +322,12 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta unsigned int level, struct acpi_pptt_processor **node) { - int total_levels = 0; + unsigned int total_levels = 0; struct acpi_pptt_cache *found = NULL; struct acpi_pptt_processor *cpu_node; u8 acpi_type = acpi_cache_type(type); - pr_debug("Looking for CPU %d's level %d cache type %d\n", + pr_debug("Looking for CPU %d's level %u cache type %d\n", acpi_cpu_id, level, acpi_type); cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2ae95df2e74f..dcc289e30166 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr) static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { - acpi_status status; - u64 count; - int current_count; - int i, ret = 0; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *cst; + int ret; if (nocst) return -ENODEV; - current_count = 0; - - status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return -ENODEV; - } - - cst = buffer.pointer; - - /* There must be at least 2 elements */ - if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - pr_err("not enough elements in _CST\n"); - ret = -EFAULT; - goto end; - } - - count = cst->package.elements[0].integer.value; + ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); + if (ret) + return ret; - /* Validate number of power states. */ - if (count < 1 || count != cst->package.count - 1) { - pr_err("count given by _CST is not valid\n"); - ret = -EFAULT; - goto end; - } + /* + * It is expected that there will be at least 2 states, C1 and + * something else (C2 or C3), so fail if that is not the case. + */ + if (pr->power.count < 2) + return -EFAULT; - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; - - for (i = 1; i <= count; i++) { - union acpi_object *element; - union acpi_object *obj; - struct acpi_power_register *reg; - struct acpi_processor_cx cx; - - memset(&cx, 0, sizeof(cx)); - - element = &(cst->package.elements[i]); - if (element->type != ACPI_TYPE_PACKAGE) - continue; - - if (element->package.count != 4) - continue; - - obj = &(element->package.elements[0]); - - if (obj->type != ACPI_TYPE_BUFFER) - continue; - - reg = (struct acpi_power_register *)obj->buffer.pointer; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && - (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) - continue; - - /* There should be an easy way to extract an integer... */ - obj = &(element->package.elements[1]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.type = obj->integer.value; - /* - * Some buggy BIOSes won't list C1 in _CST - - * Let acpi_processor_get_power_info_default() handle them later - */ - if (i == 1 && cx.type != ACPI_STATE_C1) - current_count++; - - cx.address = reg->address; - cx.index = current_count + 1; - - cx.entry_method = ACPI_CSTATE_SYSTEMIO; - if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (acpi_processor_ffh_cstate_probe - (pr->id, &cx, reg) == 0) { - cx.entry_method = ACPI_CSTATE_FFH; - } else if (cx.type == ACPI_STATE_C1) { - /* - * C1 is a special case where FIXED_HARDWARE - * can be handled in non-MWAIT way as well. - * In that case, save this _CST entry info. - * Otherwise, ignore this info and continue. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } else { - continue; - } - if (cx.type == ACPI_STATE_C1 && - (boot_option_idle_override == IDLE_NOMWAIT)) { - /* - * In most cases the C1 space_id obtained from - * _CST object is FIXED_HARDWARE access mode. - * But when the option of idle=halt is added, - * the entry_method type should be changed from - * CSTATE_FFH to CSTATE_HALT. - * When the option of idle=nomwait is added, - * the C1 entry_method type should be - * CSTATE_HALT. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } - } else { - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", - cx.address); - } - - if (cx.type == ACPI_STATE_C1) { - cx.valid = 1; - } - - obj = &(element->package.elements[2]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.latency = obj->integer.value; - - obj = &(element->package.elements[3]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - current_count++; - memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - - /* - * We support total ACPI_PROCESSOR_MAX_POWER - 1 - * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) - */ - if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { - pr_warn("Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - break; - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - current_count)); - - /* Validate number of power states discovered */ - if (current_count < 2) - ret = -EFAULT; - - end: - kfree(buffer.pointer); - - return ret; + return 0; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, @@ -909,7 +769,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) static inline void acpi_processor_cstate_first_run_checks(void) { - acpi_status status; static int first_run; if (first_run) @@ -921,13 +780,10 @@ static inline void acpi_processor_cstate_first_run_checks(void) max_cstate); first_run++; - if (acpi_gbl_FADT.cst_control && !nocst) { - status = acpi_os_write_port(acpi_gbl_FADT.smi_command, - acpi_gbl_FADT.cst_control, 8); - if (ACPI_FAILURE(status)) - ACPI_EXCEPTION((AE_INFO, status, - "Notifying BIOS of _CST ability failed")); - } + if (nocst) + return; + + acpi_processor_claim_cst_control(); } #else diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 31014c7d3793..419f814d596a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -303,6 +303,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { + .callback = video_detect_force_native, + .ident = "Lenovo E41-25", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FS"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Lenovo E41-45", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82BK"), + }, + }, + { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, .ident = "Apple MacBook Pro 12,1", @@ -336,6 +352,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + + /* + * Desktops which falsely report a backlight and which our heuristics + * for this do not catch. + */ { .callback = video_detect_force_none, .ident = "Dell OptiPlex 9020M", @@ -344,6 +365,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"), }, }, + { + .callback = video_detect_force_none, + .ident = "MSI MS-7721", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MSI"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"), + }, + }, { }, }; diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 46dc54d18f0b..2a04e8abd397 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -218,7 +218,6 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) void *cmd_tbl; u32 opts; const u32 cmd_fis_len = 5; /* five dwords */ - unsigned int n_elem; /* * Fill in command table information. First, the header, @@ -232,9 +231,8 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); } - n_elem = 0; if (qc->flags & ATA_QCFLAG_DMAMAP) - n_elem = acard_ahci_fill_sg(qc, cmd_tbl); + acard_ahci_fill_sg(qc, cmd_tbl); /* * Fill in command slot information. diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 66a570d0da83..6853dbb4131d 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -73,6 +73,7 @@ enum brcm_ahci_version { BRCM_SATA_BCM7425 = 1, BRCM_SATA_BCM7445, BRCM_SATA_NSP, + BRCM_SATA_BCM7216, }; enum brcm_ahci_quirks { @@ -337,24 +338,39 @@ static const struct ata_port_info ahci_brcm_port_info = { .port_ops = &ahci_brcm_platform_ops, }; -#ifdef CONFIG_PM_SLEEP static int brcm_ahci_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret; brcm_sata_phys_disable(priv); - return ahci_platform_suspend(dev); + if (IS_ENABLED(CONFIG_PM_SLEEP)) + ret = ahci_platform_suspend(dev); + else + ret = 0; + + if (priv->version != BRCM_SATA_BCM7216) + reset_control_assert(priv->rcdev); + + return ret; } -static int brcm_ahci_resume(struct device *dev) +static int __maybe_unused brcm_ahci_resume(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; - int ret; + int ret = 0; + + if (priv->version == BRCM_SATA_BCM7216) + ret = reset_control_reset(priv->rcdev); + else + ret = reset_control_deassert(priv->rcdev); + if (ret) + return ret; /* Make sure clocks are turned on before re-configuration */ ret = ahci_platform_enable_clks(hpriv); @@ -393,7 +409,6 @@ out_disable_phys: ahci_platform_disable_clks(hpriv); return ret; } -#endif static struct scsi_host_template ahci_platform_sht = { AHCI_SHT(DRV_NAME), @@ -404,6 +419,7 @@ static const struct of_device_id ahci_of_match[] = { {.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445}, {.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445}, {.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP}, + {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216}, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); @@ -412,6 +428,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) { const struct of_device_id *of_id; struct device *dev = &pdev->dev; + const char *reset_name = NULL; struct brcm_ahci_priv *priv; struct ahci_host_priv *hpriv; struct resource *res; @@ -433,16 +450,19 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); - /* Reset is optional depending on platform */ - priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci"); - if (!IS_ERR_OR_NULL(priv->rcdev)) - reset_control_deassert(priv->rcdev); + /* Reset is optional depending on platform and named differently */ + if (priv->version == BRCM_SATA_BCM7216) + reset_name = "rescal"; + else + reset_name = "ahci"; + + priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name); + if (IS_ERR(priv->rcdev)) + return PTR_ERR(priv->rcdev); hpriv = ahci_platform_get_resources(pdev, 0); - if (IS_ERR(hpriv)) { - ret = PTR_ERR(hpriv); - goto out_reset; - } + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); hpriv->plat_data = priv; hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; @@ -459,6 +479,13 @@ static int brcm_ahci_probe(struct platform_device *pdev) break; } + if (priv->version == BRCM_SATA_BCM7216) + ret = reset_control_reset(priv->rcdev); + else + ret = reset_control_deassert(priv->rcdev); + if (ret) + return ret; + ret = ahci_platform_enable_clks(hpriv); if (ret) goto out_reset; @@ -500,7 +527,7 @@ out_disable_phys: out_disable_clks: ahci_platform_disable_clks(hpriv); out_reset: - if (!IS_ERR_OR_NULL(priv->rcdev)) + if (priv->version != BRCM_SATA_BCM7216) reset_control_assert(priv->rcdev); return ret; } @@ -521,11 +548,26 @@ static int brcm_ahci_remove(struct platform_device *pdev) return 0; } +static void brcm_ahci_shutdown(struct platform_device *pdev) +{ + int ret; + + /* All resources releasing happens via devres, but our device, unlike a + * proper remove is not disappearing, therefore using + * brcm_ahci_suspend() here which does explicit power management is + * appropriate. + */ + ret = brcm_ahci_suspend(&pdev->dev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume); static struct platform_driver brcm_ahci_driver = { .probe = brcm_ahci_probe, .remove = brcm_ahci_remove, + .shutdown = brcm_ahci_shutdown, .driver = { .name = DRV_NAME, .of_match_table = ahci_of_match, diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 1bfd0154dad5..e47a28271f5b 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -979,7 +979,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv) priv->aapl_bus_id = bidp ? *bidp : 0; /* Fixup missing Apple bus ID in case of media-bay */ - if (priv->mediabay && bidp == 0) + if (priv->mediabay && !bidp) priv->aapl_bus_id = 1; } diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 8fad56f185ba..17d47ad03ab7 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -368,7 +368,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, here = (eni_vcc->descr+skip) & (eni_vcc->words-1); dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); if (!eff) size += skip; @@ -441,7 +441,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, if (size != eff) { dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } if (!j || j > 2*RX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n"); diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index aad00d2b28f5..cc87004d5e2d 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc) } if (!to) { printk ("No more free channels for FS50..\n"); + kfree(vcc); return -EBUSY; } vcc->channo = dev->channo; @@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc) if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) || ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) { printk ("Channel is in use for FS155.\n"); + kfree(vcc); return -EBUSY; } } @@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc) tc, sizeof (struct fs_transmit_config)); if (!tc) { fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n"); + kfree(vcc); return -ENOMEM; } diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile index 4a66888e7253..5fa7ce3745a0 100644 --- a/drivers/base/firmware_loader/builtin/Makefile +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -17,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits filechk_fwbin = \ echo "/* Generated by $(src)/Makefile */" ;\ echo " .section .rodata" ;\ - echo " .p2align $(ASM_ALIGN)" ;\ + echo " .p2align 4" ;\ echo "_fw_$(FWSTR)_bin:" ;\ echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ echo "_fw_end:" ;\ diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 48616f358854..16134a69bf6f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1006,8 +1006,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); return 0; + } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); @@ -1038,8 +1040,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); return 0; + } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); @@ -1101,6 +1105,7 @@ int pm_runtime_get_if_in_use(struct device *dev) retval = dev->power.disable_depth > 0 ? -EINVAL : dev->power.runtime_status == RPM_ACTIVE && atomic_inc_not_zero(&dev->power.usage_count); + trace_rpm_usage_rcuidle(dev, 0); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } @@ -1434,6 +1439,8 @@ void pm_runtime_allow(struct device *dev) dev->power.runtime_auto = true; if (atomic_dec_and_test(&dev->power.usage_count)) rpm_idle(dev, RPM_AUTO | RPM_ASYNC); + else + trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); out: spin_unlock_irq(&dev->power.lock); @@ -1501,6 +1508,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use) if (!old_use || old_delay >= 0) { atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); + } else { + trace_rpm_usage_rcuidle(dev, 0); } } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 70a9edb5f525..27f3e60608e5 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m, break; } + if (!next_ws) + print_wakeup_source_stats(m, &deleted_ws); + return next_ws; } diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index ac9b31c57967..008f8da69d97 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, return i2c_smbus_write_byte_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_byte = { +static const struct regmap_bus regmap_smbus_byte = { .reg_write = regmap_smbus_byte_reg_write, .reg_read = regmap_smbus_byte_reg_read, }; @@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg, return i2c_smbus_write_word_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word = { +static const struct regmap_bus regmap_smbus_word = { .reg_write = regmap_smbus_word_reg_write, .reg_read = regmap_smbus_word_reg_read, }; @@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, return i2c_smbus_write_word_swapped(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word_swapped = { +static const struct regmap_bus regmap_smbus_word_swapped = { .reg_write = regmap_smbus_word_write_swapped, .reg_read = regmap_smbus_word_read_swapped, }; @@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context, return -EIO; } -static struct regmap_bus regmap_i2c = { +static const struct regmap_bus regmap_i2c = { .write = regmap_i2c_write, .gather_write = regmap_i2c_gather_write, .read = regmap_i2c_read, @@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, return -EIO; } -static struct regmap_bus regmap_i2c_smbus_i2c_block = { +static const struct regmap_bus regmap_i2c_smbus_i2c_block = { .write = regmap_i2c_smbus_i2c_write, .read = regmap_i2c_smbus_i2c_read, .max_raw_read = I2C_SMBUS_BLOCK_MAX, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 19f57ccfbe1d..59f911e57719 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, WARN_ON(!map->bus); - /* Check for unwritable registers before we start */ - for (i = 0; i < val_len / map->format.val_bytes; i++) - if (!regmap_writeable(map, - reg + regmap_get_offset(map, i))) - return -EINVAL; + /* Check for unwritable or noinc registers in range + * before we start + */ + if (!regmap_writeable_noinc(map, reg)) { + for (i = 0; i < val_len / map->format.val_bytes; i++) { + unsigned int element = + reg + regmap_get_offset(map, i); + if (!regmap_writeable(map, element) || + regmap_writeable_noinc(map, element)) + return -EINVAL; + } + } if (!map->cache_bypass && map->format.parse_val) { unsigned int ival; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index d8d0dc0ca5ac..0b081dee1e95 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -108,10 +108,7 @@ static const void *property_get_pointer(const struct property_entry *prop) if (!prop->length) return NULL; - if (prop->is_array) - return prop->pointer; - - return &prop->value; + return prop->is_inline ? &prop->value : prop->pointer; } static const void *property_entry_find(const struct property_entry *props, @@ -201,92 +198,91 @@ static int property_entry_read_string_array(const struct property_entry *props, static void property_entry_free_data(const struct property_entry *p) { - const void *pointer = property_get_pointer(p); const char * const *src_str; size_t i, nval; - if (p->is_array) { - if (p->type == DEV_PROP_STRING && p->pointer) { - src_str = p->pointer; - nval = p->length / sizeof(const char *); - for (i = 0; i < nval; i++) - kfree(src_str[i]); - } - kfree(pointer); - } else if (p->type == DEV_PROP_STRING) { - kfree(p->value.str); + if (p->type == DEV_PROP_STRING) { + src_str = property_get_pointer(p); + nval = p->length / sizeof(*src_str); + for (i = 0; i < nval; i++) + kfree(src_str[i]); } + + if (!p->is_inline) + kfree(p->pointer); + kfree(p->name); } -static const char * const * -property_copy_string_array(const struct property_entry *src) +static bool property_copy_string_array(const char **dst_ptr, + const char * const *src_ptr, + size_t nval) { - const char **d; - const char * const *src_str = src->pointer; - size_t nval = src->length / sizeof(*d); int i; - d = kcalloc(nval, sizeof(*d), GFP_KERNEL); - if (!d) - return NULL; - for (i = 0; i < nval; i++) { - d[i] = kstrdup(src_str[i], GFP_KERNEL); - if (!d[i] && src_str[i]) { + dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL); + if (!dst_ptr[i] && src_ptr[i]) { while (--i >= 0) - kfree(d[i]); - kfree(d); - return NULL; + kfree(dst_ptr[i]); + return false; } } - return d; + return true; } static int property_entry_copy_data(struct property_entry *dst, const struct property_entry *src) { const void *pointer = property_get_pointer(src); - const void *new; - - if (src->is_array) { - if (!src->length) - return -ENODATA; - - if (src->type == DEV_PROP_STRING) { - new = property_copy_string_array(src); - if (!new) - return -ENOMEM; - } else { - new = kmemdup(pointer, src->length, GFP_KERNEL); - if (!new) - return -ENOMEM; - } + void *dst_ptr; + size_t nval; + + /* + * Properties with no data should not be marked as stored + * out of line. + */ + if (!src->is_inline && !src->length) + return -ENODATA; + + /* + * Reference properties are never stored inline as + * they are too big. + */ + if (src->type == DEV_PROP_REF && src->is_inline) + return -EINVAL; - dst->is_array = true; - dst->pointer = new; - } else if (src->type == DEV_PROP_STRING) { - new = kstrdup(src->value.str, GFP_KERNEL); - if (!new && src->value.str) + if (src->length <= sizeof(dst->value)) { + dst_ptr = &dst->value; + dst->is_inline = true; + } else { + dst_ptr = kmalloc(src->length, GFP_KERNEL); + if (!dst_ptr) return -ENOMEM; + dst->pointer = dst_ptr; + } - dst->value.str = new; + if (src->type == DEV_PROP_STRING) { + nval = src->length / sizeof(const char *); + if (!property_copy_string_array(dst_ptr, pointer, nval)) { + if (!dst->is_inline) + kfree(dst->pointer); + return -ENOMEM; + } } else { - dst->value = src->value; + memcpy(dst_ptr, pointer, src->length); } dst->length = src->length; dst->type = src->type; dst->name = kstrdup(src->name, GFP_KERNEL); - if (!dst->name) - goto out_free_data; + if (!dst->name) { + property_entry_free_data(dst); + return -ENOMEM; + } return 0; - -out_free_data: - property_entry_free_data(dst); - return -ENOMEM; } /** @@ -483,31 +479,49 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, struct fwnode_reference_args *args) { struct swnode *swnode = to_swnode(fwnode); - const struct software_node_reference *ref; + const struct software_node_ref_args *ref_array; + const struct software_node_ref_args *ref; const struct property_entry *prop; struct fwnode_handle *refnode; + u32 nargs_prop_val; + int error; int i; - if (!swnode || !swnode->node->references) + if (!swnode) return -ENOENT; - for (ref = swnode->node->references; ref->name; ref++) - if (!strcmp(ref->name, propname)) - break; + prop = property_entry_get(swnode->node->properties, propname); + if (!prop) + return -ENOENT; + + if (prop->type != DEV_PROP_REF) + return -EINVAL; - if (!ref->name || index > (ref->nrefs - 1)) + /* + * We expect that references are never stored inline, even + * single ones, as they are too big. + */ + if (prop->is_inline) + return -EINVAL; + + if (index * sizeof(*ref) >= prop->length) return -ENOENT; - refnode = software_node_fwnode(ref->refs[index].node); + ref_array = prop->pointer; + ref = &ref_array[index]; + + refnode = software_node_fwnode(ref->node); if (!refnode) return -ENOENT; if (nargs_prop) { - prop = property_entry_get(swnode->node->properties, nargs_prop); - if (!prop) - return -EINVAL; + error = property_entry_read_int_array(swnode->node->properties, + nargs_prop, sizeof(u32), + &nargs_prop_val, 1); + if (error) + return error; - nargs = prop->value.u32_data; + nargs = nargs_prop_val; } if (nargs > NR_FWNODE_REFERENCE_ARGS) @@ -517,7 +531,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, args->nargs = nargs; for (i = 0; i < nargs; i++) - args->args[i] = ref->refs[index].args[i]; + args->args[i] = ref->args[i]; return 0; } diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig index 86e85daa80bf..305c7751184a 100644 --- a/drivers/base/test/Kconfig +++ b/drivers/base/test/Kconfig @@ -8,3 +8,6 @@ config TEST_ASYNC_DRIVER_PROBE The module name will be test_async_driver_probe.ko If unsure say N. +config KUNIT_DRIVER_PE_TEST + bool "KUnit Tests for property entry API" + depends on KUNIT=y diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile index 0f1f7277a013..3ca56367c84b 100644 --- a/drivers/base/test/Makefile +++ b/drivers/base/test/Makefile @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o + +obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c new file mode 100644 index 000000000000..abe03315180f --- /dev/null +++ b/drivers/base/test/property-entry-test.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +// Unit tests for property entries API +// +// Copyright 2019 Google LLC. + +#include <kunit/test.h> +#include <linux/property.h> +#include <linux/types.h> + +static void pe_test_uints(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8("prop-u8", 8), + PROPERTY_ENTRY_U16("prop-u16", 16), + PROPERTY_ENTRY_U32("prop-u32", 32), + PROPERTY_ENTRY_U64("prop-u64", 64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[2]; + u16 val_u16, array_u16[2]; + u32 val_u32, array_u32[2]; + u64 val_u64, array_u64[2]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_uint_arrays(struct kunit *test) +{ + static const u8 a_u8[16] = { 8, 9 }; + static const u16 a_u16[16] = { 16, 17 }; + static const u32 a_u32[16] = { 32, 33 }; + static const u64 a_u64[16] = { 64, 65 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8), + PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16), + PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32), + PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[32]; + u16 val_u16, array_u16[32]; + u32 val_u32, array_u32[32]; + u64 val_u64, array_u64[32]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_strings(struct kunit *test) +{ + static const char *strings[] = { + "string-a", + "string-b", + }; + + static const struct property_entry entries[] = { + PROPERTY_ENTRY_STRING("str", "single"), + PROPERTY_ENTRY_STRING("empty", ""), + PROPERTY_ENTRY_STRING_ARRAY("strs", strings), + { } + }; + + struct fwnode_handle *node; + const char *str; + const char *strs[10]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_string(node, "str", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "single"); + + error = fwnode_property_read_string_array(node, "str", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + /* asking for more data returns what we have */ + error = fwnode_property_read_string_array(node, "str", strs, 2); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + error = fwnode_property_read_string(node, "no-str", &str); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_string_array(node, "no-str", strs, 1); + KUNIT_EXPECT_LT(test, error, 0); + + error = fwnode_property_read_string(node, "empty", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, ""); + + error = fwnode_property_read_string_array(node, "strs", strs, 3); + KUNIT_EXPECT_EQ(test, error, 2); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + KUNIT_EXPECT_STREQ(test, strs[1], "string-b"); + + error = fwnode_property_read_string_array(node, "strs", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + + /* NULL argument -> returns size */ + error = fwnode_property_read_string_array(node, "strs", NULL, 0); + KUNIT_EXPECT_EQ(test, error, 2); + + /* accessing array as single value */ + error = fwnode_property_read_string(node, "strs", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "string-a"); + + fwnode_remove_software_node(node); +} + +static void pe_test_bool(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_BOOL("prop"), + { } + }; + + struct fwnode_handle *node; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop")); + KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop")); + + fwnode_remove_software_node(node); +} + +/* Verifies that small U8 array is stored inline when property is copied */ +static void pe_test_move_inline_u8(struct kunit *test) +{ + static const u8 u8_array_small[8] = { 1, 2, 3, 4 }; + static const u8 u8_array_big[128] = { 5, 6, 7, 8 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small), + PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big), + { } + }; + + struct property_entry *copy; + const u8 *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + data_ptr = (u8 *)©[0].value; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6); + + property_entries_free(copy); +} + +/* Verifies that single string array is stored inline when property is copied */ +static void pe_test_move_inline_str(struct kunit *test) +{ + static char *str_array_small[] = { "a" }; + static char *str_array_big[] = { "b", "c", "d", "e" }; + static char *str_array_small_empty[] = { "" }; + static struct property_entry entries[] = { + PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small), + PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big), + PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty), + { } + }; + + struct property_entry *copy; + const char * const *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a"); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_STREQ(test, data_ptr[0], "b"); + KUNIT_EXPECT_STREQ(test, data_ptr[1], "c"); + + KUNIT_EXPECT_TRUE(test, copy[2].is_inline); + KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], ""); + + property_entries_free(copy); +} + +/* Handling of reference properties */ +static void pe_test_reference(struct kunit *test) +{ + static const struct software_node nodes[] = { + { .name = "1", }, + { .name = "2", }, + { } + }; + + static const struct software_node_ref_args refs[] = { + { + .node = &nodes[0], + .nargs = 0, + }, + { + .node = &nodes[1], + .nargs = 2, + .args = { 3, 4 }, + }, + }; + + const struct property_entry entries[] = { + PROPERTY_ENTRY_REF("ref-1", &nodes[0]), + PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2), + PROPERTY_ENTRY_REF_ARRAY("ref-3", refs), + { } + }; + + struct fwnode_handle *node; + struct fwnode_reference_args ref; + int error; + + error = software_node_register_nodes(nodes); + KUNIT_ASSERT_EQ(test, error, 0); + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 1, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 1U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + + /* asking for more args, padded with zero data */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 3, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 3U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU); + KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 2, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + /* array of references */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* second reference in the array */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 2, 1, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 2U); + KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 2, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); + software_node_unregister_nodes(nodes); +} + +static struct kunit_case property_entry_test_cases[] = { + KUNIT_CASE(pe_test_uints), + KUNIT_CASE(pe_test_uint_arrays), + KUNIT_CASE(pe_test_strings), + KUNIT_CASE(pe_test_bool), + KUNIT_CASE(pe_test_move_inline_u8), + KUNIT_CASE(pe_test_move_inline_str), + KUNIT_CASE(pe_test_reference), + { } +}; + +static struct kunit_suite property_entry_test_suite = { + .name = "property-entry", + .test_cases = property_entry_test_cases, +}; + +kunit_test_suite(property_entry_test_suite); diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 5cf49d9db95e..ed34785dd64b 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return BLK_STS_IOERR; case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: /* Writes must be at the write pointer position */ if (sector != zone->wp) return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_EMPTY) + if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->wp += nr_sectors; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f4d1597df0a2..ccb44fe790a7 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata) return -EINVAL; } + /* Always add a slot for main clocks fck and ick even if unused */ + if (!nr_fck) + ddata->nr_clocks++; + if (!nr_ick) + ddata->nr_clocks++; + ddata->clocks = devm_kcalloc(ddata->dev, ddata->nr_clocks, sizeof(*ddata->clocks), GFP_KERNEL); @@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata) struct clk *clock; int i, error; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return 0; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata) struct clk *clock; int i; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index b23b0b999232..87f449340202 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -130,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, priv->response_read = true; ret_size = min_t(ssize_t, size, priv->response_length); - if (!ret_size) { + if (ret_size <= 0) { priv->response_length = 0; goto out; } diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index 1089fc0bb290..f3742bcc73e3 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -14,7 +14,7 @@ struct file_priv { struct work_struct timeout_work; struct work_struct async_work; wait_queue_head_t async_wait; - size_t response_length; + ssize_t response_length; bool response_read; bool command_enqueued; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 3b53b3e5ec3e..d52bf4df0bca 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(timeouts); -static struct attribute *tpm_dev_attrs[] = { +static ssize_t tpm_version_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tpm_chip *chip = to_tpm_chip(dev); + + return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2 + ? "2" : "1"); +} +static DEVICE_ATTR_RO(tpm_version_major); + +static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_enabled.attr, @@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = { &dev_attr_cancel.attr, &dev_attr_durations.attr, &dev_attr_timeouts.attr, + &dev_attr_tpm_version_major.attr, NULL, }; -static const struct attribute_group tpm_dev_group = { - .attrs = tpm_dev_attrs, +static struct attribute *tpm2_dev_attrs[] = { + &dev_attr_tpm_version_major.attr, + NULL +}; + +static const struct attribute_group tpm1_dev_group = { + .attrs = tpm1_dev_attrs, +}; + +static const struct attribute_group tpm2_dev_group = { + .attrs = tpm2_dev_attrs, }; void tpm_sysfs_add_device(struct tpm_chip *chip) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - return; - WARN_ON(chip->groups_cnt != 0); - chip->groups[chip->groups_cnt++] = &tpm_dev_group; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + chip->groups[chip->groups_cnt++] = &tpm2_dev_group; + else + chip->groups[chip->groups_cnt++] = &tpm1_dev_group; } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index bb0343ffd235..27c6ca031e23 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -978,13 +978,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, if (wait_startup(chip, 0) != 0) { rc = -ENODEV; - goto err_start; + goto out_err; } /* Take control of the TPM's interrupt hardware and shut it off */ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) - goto err_start; + goto out_err; intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; @@ -993,21 +993,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, rc = tpm_chip_start(chip); if (rc) - goto err_start; - + goto out_err; rc = tpm2_probe(chip); + tpm_chip_stop(chip); if (rc) - goto err_probe; + goto out_err; rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); if (rc < 0) - goto err_probe; + goto out_err; priv->manufacturer_id = vendor; rc = tpm_tis_read8(priv, TPM_RID(0), &rid); if (rc < 0) - goto err_probe; + goto out_err; dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", @@ -1016,13 +1016,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, probe = probe_itpm(chip); if (probe < 0) { rc = -ENODEV; - goto err_probe; + goto out_err; } /* Figure out the capabilities */ rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps); if (rc < 0) - goto err_probe; + goto out_err; dev_dbg(dev, "TPM interface capabilities (0x%x):\n", intfcaps); @@ -1056,10 +1056,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, if (tpm_get_timeouts(chip)) { dev_err(dev, "Could not get TPM timeouts and durations\n"); rc = -ENODEV; - goto err_probe; + goto out_err; } - chip->flags |= TPM_CHIP_FLAG_IRQ; if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); @@ -1071,18 +1070,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } } - tpm_chip_stop(chip); - rc = tpm_chip_register(chip); if (rc) - goto err_start; - - return 0; + goto out_err; -err_probe: - tpm_chip_stop(chip); + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); -err_start: + return 0; +out_err: if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) chip->ops->clk_enable(chip, false); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 6a11239ccde3..772258de2d1f 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3426,11 +3426,17 @@ static int __clk_core_init(struct clk_core *core) if (core->flags & CLK_IS_CRITICAL) { unsigned long flags; - clk_core_prepare(core); + ret = clk_core_prepare(core); + if (ret) + goto out; flags = clk_enable_lock(); - clk_core_enable(core); + ret = clk_core_enable(core); clk_enable_unlock(flags); + if (ret) { + clk_core_unprepare(core); + goto out; + } } clk_core_reparent_orphans_nolock(); diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index a60a1be937ad..b4a95cbbda98 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock); static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; static DEFINE_SPINLOCK(timer_lock); -static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"}; +static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"}; static DEFINE_SPINLOCK(reset_lock); diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index f7b370f3acef..f6ce888098be 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { @@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { @@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { @@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { @@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { @@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct clk_regmap *gcc_sdm845_clocks[] = { diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 3a991ca1ee36..c9e5a1fb6653 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -12,6 +12,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/clk.h> #include "clk.h" #include "clk-cpu.h" @@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np, exynos5x_subcmus); } + /* + * Keep top part of G3D clock path enabled permanently to ensure + * that the internal busses get their clock regardless of the + * main G3D clock enablement status. + */ + clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d")); + samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 45a1ed3fe674..50f8d1bc7046 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -23,9 +23,9 @@ */ static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k", - "pll-periph0", "iosc" }; + "iosc", "pll-periph0" }; static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = { - { .index = 2, .shift = 0, .width = 5 }, + { .index = 3, .shift = 0, .width = 5 }, }; static struct ccu_div ar100_clk = { @@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div r_apb1_clk = { - .div = _SUNXI_CCU_DIV(0, 2), - - .common = { - .reg = 0x00c, - .hw.init = CLK_HW_INIT("r-apb1", - "r-ahb", - &ccu_div_ops, - 0), - }, -}; +static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0); static struct ccu_div r_apb2_clk = { .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO), diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index 4646fdc61053..4c8c491b87c2 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div apb0_clk = { - .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), - - .common = { - .reg = 0x0c, - .hw.init = CLK_HW_INIT_HW("apb0", - &ahb0_clk.hw, - &ccu_div_ops, - 0), - }, -}; - -static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); +static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); /* * Define the parent as an array that can be reused to save space @@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = { static struct ccu_common *sun8i_a83t_r_ccu_clks[] = { &ar100_clk.common, - &a83t_apb0_clk.common, + &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, @@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, - [CLK_APB0] = &a83t_apb0_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, @@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node, static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) { - /* Fix apb0 bus gate parents here */ - apb0_gate_parent[0] = &a83t_apb0_clk.common.hw; - sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc); } CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 897490800102..23bfe1d12f21 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = { .reg = 0x1f0, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outa", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; @@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = { .reg = 0x1f4, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outb", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 5c779eec454b..0e36ca3bf3d5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_PLL_DDR1 + 1, }; static struct clk_hw_onecell_data sun8i_v3_hw_clks = { @@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_I2S0 + 1, }; static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h index b0160d305a67..108eeeedcbf7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h @@ -51,6 +51,4 @@ #define CLK_PLL_DDR1 74 -#define CLK_NUMBER (CLK_I2S0 + 1) - #endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index e6bd6d1ea012..f6cdce441cf7 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks) periph_banks = banks; clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL); - if (!clks) + if (!clks) { kfree(periph_clk_enb_refcnt); + return NULL; + } clk_num = num; diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index f65e16c4f3c4..8d4c08b034bd 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) cinfo->iobase = of_iomap(node, 0); cinfo->dev = &pdev->dev; pm_runtime_enable(cinfo->dev); - pm_runtime_irq_safe(cinfo->dev); pm_runtime_get_sync(cinfo->dev); atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 77b0e5d0fb13..4f86ce2db34f 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct private_data *priv = policy->driver_data; + cpufreq_cpu_put(policy); + return brcm_avs_get_frequency(priv->base); } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8d8da763adc5..a06777c35fc0 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -39,7 +39,7 @@ static struct cppc_cpudata **all_cpu_data; struct cppc_workaround_oem_info { - char oem_id[ACPI_OEM_ID_SIZE +1]; + char oem_id[ACPI_OEM_ID_SIZE + 1]; char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; u32 oem_revision; }; @@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void) for (i = 0; i < ARRAY_SIZE(wa_info); i++) { if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && - wa_info[i].oem_revision == tbl->oem_revision) + wa_info[i].oem_revision == tbl->oem_revision) { apply_hisi_workaround = true; + break; + } } + + acpi_put_table(tbl); } /* Callback function used to retrieve the max frequency from DMI */ diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index f1d170dcf4d3..f2ae9cd455c1 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "fsl,imx8mq", }, { .compatible = "fsl,imx8mm", }, { .compatible = "fsl,imx8mn", }, + { .compatible = "fsl,imx8mp", }, { .compatible = "marvell,armadaxp", }, @@ -121,6 +122,8 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "nvidia,tegra20", }, + { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 85a6efd6b68f..6cb8193421ea 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (ret) return ret; - if (of_machine_is_compatible("fsl,imx8mn")) + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT; else @@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (of_machine_is_compatible("fsl,imx8mm") || of_machine_is_compatible("fsl,imx8mq")) speed_grade = 1; - if (of_machine_is_compatible("fsl,imx8mn")) + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) speed_grade = 0xb; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d2fa3e9ccd97..ad6a17cf0011 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -172,7 +172,7 @@ struct vid_data { /** * struct global_params - Global parameters, mostly tunable via sysfs. * @no_turbo: Whether or not to use turbo P-states. - * @turbo_disabled: Whethet or not turbo P-states are available at all, + * @turbo_disabled: Whether or not turbo P-states are available at all, * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index cb74bdc5baaa..70ad8fe1d78b 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = { static int kirkwood_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - struct resource *res; int err; priv.dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv.base = devm_ioremap_resource(&pdev->dev, res); + priv.base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv.base)) return PTR_ERR(priv.base); diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index e9caa9586982..909f40fbcde2 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void) u32 cpu_freq; spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG(0); - LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */ + cpu_freq = readl(LOONGSON_CHIPCFG); + /* Put CPU into wait mode */ + writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG); + /* Restore CPU state */ + writel(cpu_freq, LOONGSON_CHIPCFG); spin_unlock_irqrestore(&loongson2_wait_lock, flags); local_irq_enable(); } diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 106910351c41..5c221bc90210 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; int ret; + struct cpufreq_policy *policy; mutex_lock(&cpufreq_lock); @@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, */ if (s3c_freq->is_dvs) { pr_debug("cpufreq: leave dvs on reboot\n"); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0); + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0); + cpufreq_cpu_put(policy); + if (ret < 0) return NOTIFY_BAD; } diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5d10030f2560..e84281e2561d 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret; + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0); + cpufreq_cpu_put(policy); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); if (ret < 0) return NOTIFY_BAD; diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index bcecb068b51b..2e233ad72758 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) { struct tegra186_cpufreq_data *data; struct tegra_bpmp *bpmp; - struct resource *res; unsigned int i = 0, err; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); @@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) if (IS_ERR(bpmp)) return PTR_ERR(bpmp); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) { err = PTR_ERR(data->regs); goto put_bpmp; diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index a224d33dda7f..62272ecfa771 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" - depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS + depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST depends on MCPM && !ARM64 select ARM_CPU_SUSPEND select CPU_IDLE_MULTIPLE_DRIVERS @@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE config ARM_KIRKWOOD_CPUIDLE bool "CPU Idle Driver for Marvell Kirkwood SoCs" - depends on MACH_KIRKWOOD && !ARM64 + depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64 help This adds the CPU Idle driver for Marvell Kirkwood SoCs. config ARM_ZYNQ_CPUIDLE bool "CPU Idle Driver for Xilinx Zynq processors" - depends on ARCH_ZYNQ && !ARM64 + depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Xilinx Zynq processors. @@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE config ARM_AT91_CPUIDLE bool "Cpu Idle Driver for the AT91 processors" default y - depends on ARCH_AT91 && !ARM64 + depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle for AT91 processors. config ARM_EXYNOS_CPUIDLE bool "Cpu Idle Driver for the Exynos processors" - depends on ARCH_EXYNOS && !ARM64 + depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64 select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP help Select this to enable cpuidle for Exynos processors. config ARM_MVEBU_V7_CPUIDLE bool "CPU Idle Driver for mvebu v7 family processors" - depends on ARCH_MVEBU && !ARM64 + depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Armada 370, 38x and XP processors. diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index b607278df25b..04003b90dc49 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -89,6 +89,7 @@ * @coupled_cpus: mask of cpus that are part of the coupled set * @requested_state: array of requested states for cpus in the coupled set * @ready_waiting_counts: combined count of cpus in ready or waiting loops + * @abort_barrier: synchronisation point for abort cases * @online_count: count of cpus that are online * @refcnt: reference count of cpuidle devices that are using this struct * @prevent: flag to prevent coupled idle while a cpu is hotplugging @@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu) /** * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting - * @dev: struct cpuidle_device for this cpu + * @this_cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Calls cpuidle_coupled_poke on all other online cpus. @@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu, /** * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * @@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu, /** * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Removes the requested idle state for the specified cpuidle device. @@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) /** * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed - * @cpu - this cpu + * @cpu: this cpu * * Turns on interrupts and spins until any outstanding poke interrupts have * been processed and the poke bit has been cleared. diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c index 6e36740f5719..fc22c59b6c73 100644 --- a/drivers/cpuidle/cpuidle-clps711x.c +++ b/drivers/cpuidle/cpuidle-clps711x.c @@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = { static int __init clps711x_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - clps711x_halt = devm_ioremap_resource(&pdev->dev, res); + clps711x_halt = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(clps711x_halt)) return PTR_ERR(clps711x_halt); diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c index d23d8f468c12..511c4f46027a 100644 --- a/drivers/cpuidle/cpuidle-kirkwood.c +++ b/drivers/cpuidle/cpuidle-kirkwood.c @@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = { /* Initialize CPU idle by registering the idle states */ static int kirkwood_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ddr_operation_base = devm_ioremap_resource(&pdev->dev, res); + ddr_operation_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ddr_operation_base)) return PTR_ERR(ddr_operation_base); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 33d19c8eb027..de81298051b3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns) * cpuidle_find_deepest_state - Find the deepest available idle state. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. + * @latency_limit_ns: Idle state exit latency limit + * + * Return: the index of the deepest available idle state. */ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, @@ -572,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) if (!try_module_get(drv->owner)) return -EINVAL; - for (i = 0; i < drv->state_count; i++) + for (i = 0; i < drv->state_count; i++) { if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER; + if (drv->states[i].flags & CPUIDLE_FLAG_OFF) + dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; + } + per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index ce6a5f80fb83..4070e573bf43 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) { int i; - drv->refcnt = 0; - /* * Use all possible CPUs as the default, because if the kernel boots * with some CPUs offline and then we online one of them, the CPU @@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) */ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) { - if (WARN_ON(drv->refcnt > 0)) - return; - if (drv->bctimer) { drv->bctimer = 0; on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, @@ -350,47 +345,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); /** - * cpuidle_driver_ref - get a reference to the driver. - * - * Increment the reference counter of the cpuidle driver associated with - * the current CPU. - * - * Returns a pointer to the driver, or NULL if the current CPU has no driver. - */ -struct cpuidle_driver *cpuidle_driver_ref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv) - drv->refcnt++; - - spin_unlock(&cpuidle_driver_lock); - return drv; -} - -/** - * cpuidle_driver_unref - puts down the refcount for the driver - * - * Decrement the reference counter of the cpuidle driver associated with - * the current CPU. - */ -void cpuidle_driver_unref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv && !WARN_ON(drv->refcnt <= 0)) - drv->refcnt--; - - spin_unlock(&cpuidle_driver_lock); -} - -/** * cpuidle_driver_state_disabled - Disable or enable an idle state * @drv: cpuidle driver owning the state * @idx: State index diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index de7e706efd46..6deaaf5f05b5 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * pattern detection. */ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns; - if (cpu_data->interval_idx > INTERVALS) + if (cpu_data->interval_idx >= INTERVALS) cpu_data->interval_idx = 0; } diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 38ef770be90d..cdeedbf02646 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = { /** * cpuidle_add_interface - add CPU global sysfs attributes + * @dev: the target device */ int cpuidle_add_interface(struct device *dev) { @@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev) /** * cpuidle_remove_interface - remove CPU global sysfs attributes + * @dev: the target device */ void cpuidle_remove_interface(struct device *dev) { @@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state, return size; } +static ssize_t show_state_default_status(struct cpuidle_state *state, + struct cpuidle_state_usage *state_usage, + char *buf) +{ + return sprintf(buf, "%s\n", + state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); +} + define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); define_one_state_ro(latency, show_state_exit_latency); @@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); define_one_state_ro(above, show_state_above); define_one_state_ro(below, show_state_below); +define_one_state_ro(default_status, show_state_default_status); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, @@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = { &attr_disable.attr, &attr_above.attr, &attr_below.attr, + &attr_default_status.attr, NULL }; @@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = { /** * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) { @@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) /** * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) { diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 26754d0570ba..b846d73d9a85 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -40,7 +40,7 @@ struct sec_req { int req_id; /* Status of the SEC request */ - int fake_busy; + atomic_t fake_busy; }; /** @@ -132,8 +132,8 @@ struct sec_debug_file { }; struct sec_dfx { - u64 send_cnt; - u64 recv_cnt; + atomic64_t send_cnt; + atomic64_t recv_cnt; }; struct sec_debug { diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 62b04e19067c..0a5391fff485 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -120,7 +120,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) return; } - __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1); + atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt); req->ctx->req_op->buf_unmap(req->ctx, req); @@ -135,13 +135,13 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); mutex_unlock(&qp_ctx->req_lock); - __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1); + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); if (ret == -EBUSY) return -ENOBUFS; if (!ret) { - if (req->fake_busy) + if (atomic_read(&req->fake_busy)) ret = -EBUSY; else ret = -EINPROGRESS; @@ -641,7 +641,7 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req) if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) sec_update_iv(req); - if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0)) + if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1) sk_req->base.complete(&sk_req->base, -EINPROGRESS); sk_req->base.complete(&sk_req->base, req->err_type); @@ -672,9 +672,9 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) } if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - req->fake_busy = 1; + atomic_set(&req->fake_busy, 1); else - req->fake_busy = 0; + atomic_set(&req->fake_busy, 0); ret = ctx->req_op->get_res(ctx, req); if (ret) { diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 74f0654028c9..ab742dfbab99 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -608,6 +608,14 @@ static const struct file_operations sec_dbg_fops = { .write = sec_debug_write, }; +static int debugfs_atomic64_t_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL, + "%lld\n"); + static int sec_core_debug_init(struct sec_dev *sec) { struct hisi_qm *qm = &sec->qm; @@ -628,9 +636,11 @@ static int sec_core_debug_init(struct sec_dev *sec) debugfs_create_regset32("regs", 0444, tmp_d, regset); - debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt); + debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt, + &fops_atomic64_t_ro); - debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt); + debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt, + &fops_atomic64_t_ro); return 0; } diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 35535833b6f7..0b1df12e0f21 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -77,7 +77,7 @@ config DEVFREQ_GOV_PASSIVE comment "DEVFREQ Drivers" config ARM_EXYNOS_BUS_DEVFREQ - tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver" + tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver" depends on ARCH_EXYNOS || COMPILE_TEST select DEVFREQ_GOV_SIMPLE_ONDEMAND select DEVFREQ_GOV_PASSIVE @@ -91,6 +91,16 @@ config ARM_EXYNOS_BUS_DEVFREQ and adjusts the operating frequencies and voltages with OPP support. This does not yet operate with optimal voltages. +config ARM_IMX8M_DDRC_DEVFREQ + tristate "i.MX8M DDRC DEVFREQ Driver" + depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \ + (COMPILE_TEST && HAVE_ARM_SMCCC) + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select DEVFREQ_GOV_USERSPACE + help + This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows + adjusting DRAM frequency. + config ARM_TEGRA_DEVFREQ tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver" depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \ @@ -115,14 +125,15 @@ config ARM_TEGRA20_DEVFREQ config ARM_RK3399_DMC_DEVFREQ tristate "ARM RK3399 DMC DEVFREQ Driver" - depends on ARCH_ROCKCHIP + depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \ + (COMPILE_TEST && HAVE_ARM_SMCCC) select DEVFREQ_EVENT_ROCKCHIP_DFI select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ_EVENT help - This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). - It sets the frequency for the memory controller and reads the usage counts - from hardware. + This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). + It sets the frequency for the memory controller and reads the usage counts + from hardware. source "drivers/devfreq/event/Kconfig" diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 338ae8440db6..3eb4d5e6635c 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o # DEVFREQ Drivers obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o +obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c index 3dc5fd6065a3..8c31b0f2e28f 100644 --- a/drivers/devfreq/devfreq-event.c +++ b/drivers/devfreq/devfreq-event.c @@ -346,9 +346,9 @@ EXPORT_SYMBOL_GPL(devfreq_event_add_edev); /** * devfreq_event_remove_edev() - Remove the devfreq-event device registered. - * @dev : the devfreq-event device + * @edev : the devfreq-event device * - * Note that this function remove the registered devfreq-event device. + * Note that this function removes the registered devfreq-event device. */ int devfreq_event_remove_edev(struct devfreq_event_dev *edev) { diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 57f6944d65a6..cceee8bc3c2f 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/sched.h> +#include <linux/debugfs.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> @@ -33,6 +34,7 @@ #define HZ_PER_KHZ 1000 static struct class *devfreq_class; +static struct dentry *devfreq_debugfs; /* * devfreq core provides delayed work based load monitoring helper @@ -209,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq) int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) { int lev, prev_lev, ret = 0; - unsigned long cur_time; + u64 cur_time; lockdep_assert_held(&devfreq->lock); - cur_time = jiffies; + cur_time = get_jiffies_64(); /* Immediately exit if previous_freq is not initialized yet. */ if (!devfreq->previous_freq) @@ -224,8 +226,8 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) goto out; } - devfreq->time_in_state[prev_lev] += - cur_time - devfreq->last_stat_updated; + devfreq->stats.time_in_state[prev_lev] += + cur_time - devfreq->stats.last_update; lev = devfreq_get_freq_level(devfreq, freq); if (lev < 0) { @@ -234,13 +236,13 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) } if (lev != prev_lev) { - devfreq->trans_table[(prev_lev * - devfreq->profile->max_state) + lev]++; - devfreq->total_trans++; + devfreq->stats.trans_table[ + (prev_lev * devfreq->profile->max_state) + lev]++; + devfreq->stats.total_trans++; } out: - devfreq->last_stat_updated = cur_time; + devfreq->stats.last_update = cur_time; return ret; } EXPORT_SYMBOL(devfreq_update_status); @@ -535,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq) msecs_to_jiffies(devfreq->profile->polling_ms)); out_update: - devfreq->last_stat_updated = jiffies; + devfreq->stats.last_update = get_jiffies_64(); devfreq->stop_polling = false; if (devfreq->profile->get_cur_freq && @@ -807,28 +809,29 @@ struct devfreq *devfreq_add_device(struct device *dev, goto err_out; } - devfreq->trans_table = devm_kzalloc(&devfreq->dev, + devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev, array3_size(sizeof(unsigned int), devfreq->profile->max_state, devfreq->profile->max_state), GFP_KERNEL); - if (!devfreq->trans_table) { + if (!devfreq->stats.trans_table) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } - devfreq->time_in_state = devm_kcalloc(&devfreq->dev, + devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev, devfreq->profile->max_state, - sizeof(unsigned long), + sizeof(*devfreq->stats.time_in_state), GFP_KERNEL); - if (!devfreq->time_in_state) { + if (!devfreq->stats.time_in_state) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } - devfreq->last_stat_updated = jiffies; + devfreq->stats.total_trans = 0; + devfreq->stats.last_update = get_jiffies_64(); srcu_init_notifier_head(&devfreq->transition_notifier_list); @@ -1259,6 +1262,14 @@ err_out: } EXPORT_SYMBOL(devfreq_remove_governor); +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct devfreq *devfreq = to_devfreq(dev); + return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent)); +} +static DEVICE_ATTR_RO(name); + static ssize_t governor_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1461,6 +1472,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%lu\n", min_freq); } +static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1501,7 +1513,6 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1580,18 +1591,47 @@ static ssize_t trans_stat_show(struct device *dev, devfreq->profile->freq_table[i]); for (j = 0; j < max_state; j++) len += sprintf(buf + len, "%10u", - devfreq->trans_table[(i * max_state) + j]); - len += sprintf(buf + len, "%10u\n", - jiffies_to_msecs(devfreq->time_in_state[i])); + devfreq->stats.trans_table[(i * max_state) + j]); + + len += sprintf(buf + len, "%10llu\n", (u64) + jiffies64_to_msecs(devfreq->stats.time_in_state[i])); } len += sprintf(buf + len, "Total transition : %u\n", - devfreq->total_trans); + devfreq->stats.total_trans); return len; } -static DEVICE_ATTR_RO(trans_stat); + +static ssize_t trans_stat_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct devfreq *df = to_devfreq(dev); + int err, value; + + if (df->profile->max_state == 0) + return count; + + err = kstrtoint(buf, 10, &value); + if (err || value != 0) + return -EINVAL; + + mutex_lock(&df->lock); + memset(df->stats.time_in_state, 0, (df->profile->max_state * + sizeof(*df->stats.time_in_state))); + memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int), + df->profile->max_state, + df->profile->max_state)); + df->stats.total_trans = 0; + df->stats.last_update = get_jiffies_64(); + mutex_unlock(&df->lock); + + return count; +} +static DEVICE_ATTR_RW(trans_stat); static struct attribute *devfreq_attrs[] = { + &dev_attr_name.attr, &dev_attr_governor.attr, &dev_attr_available_governors.attr, &dev_attr_cur_freq.attr, @@ -1605,6 +1645,81 @@ static struct attribute *devfreq_attrs[] = { }; ATTRIBUTE_GROUPS(devfreq); +/** + * devfreq_summary_show() - Show the summary of the devfreq devices + * @s: seq_file instance to show the summary of devfreq devices + * @data: not used + * + * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file. + * It helps that user can know the detailed information of the devfreq devices. + * + * Return 0 always because it shows the information without any data change. + */ +static int devfreq_summary_show(struct seq_file *s, void *data) +{ + struct devfreq *devfreq; + struct devfreq *p_devfreq = NULL; + unsigned long cur_freq, min_freq, max_freq; + unsigned int polling_ms; + + seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n", + "dev_name", + "dev", + "parent_dev", + "governor", + "polling_ms", + "cur_freq_Hz", + "min_freq_Hz", + "max_freq_Hz"); + seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n", + "------------------------------", + "----------", + "----------", + "---------------", + "----------", + "------------", + "------------", + "------------"); + + mutex_lock(&devfreq_list_lock); + + list_for_each_entry_reverse(devfreq, &devfreq_list, node) { +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) + if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE, + DEVFREQ_NAME_LEN)) { + struct devfreq_passive_data *data = devfreq->data; + + if (data) + p_devfreq = data->parent; + } else { + p_devfreq = NULL; + } +#endif + + mutex_lock(&devfreq->lock); + cur_freq = devfreq->previous_freq, + get_freq_range(devfreq, &min_freq, &max_freq); + polling_ms = devfreq->profile->polling_ms, + mutex_unlock(&devfreq->lock); + + seq_printf(s, + "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n", + dev_name(devfreq->dev.parent), + dev_name(&devfreq->dev), + p_devfreq ? dev_name(&p_devfreq->dev) : "null", + devfreq->governor_name, + polling_ms, + cur_freq, + min_freq, + max_freq); + } + + mutex_unlock(&devfreq_list_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(devfreq_summary); + static int __init devfreq_init(void) { devfreq_class = class_create(THIS_MODULE, "devfreq"); @@ -1621,6 +1736,11 @@ static int __init devfreq_init(void) } devfreq_class->dev_groups = devfreq_groups; + devfreq_debugfs = debugfs_create_dir("devfreq", NULL); + debugfs_create_file("devfreq_summary", 0444, + devfreq_debugfs, NULL, + &devfreq_summary_fops); + return 0; } subsys_initcall(devfreq_init); @@ -1814,7 +1934,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res) /** * devm_devfreq_register_notifier() - - Resource-managed devfreq_register_notifier() + * - Resource-managed devfreq_register_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. @@ -1850,7 +1970,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier); /** * devm_devfreq_unregister_notifier() - - Resource-managed devfreq_unregister_notifier() + * - Resource-managed devfreq_unregister_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index cef2cf5347ca..878825372f6f 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -15,7 +15,7 @@ menuconfig PM_DEVFREQ_EVENT if PM_DEVFREQ_EVENT config DEVFREQ_EVENT_EXYNOS_NOCP - tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" + tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP select REGMAP_MMIO @@ -24,7 +24,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP (Network on Chip) Probe counters to measure the bandwidth of AXI bus. config DEVFREQ_EVENT_EXYNOS_PPMU - tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" + tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP help @@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU config DEVFREQ_EVENT_ROCKCHIP_DFI tristate "ROCKCHIP DFI DEVFREQ event Driver" - depends on ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST help This add the devfreq-event driver for Rockchip SoC. It provides DFI (DDR Monitor Module) driver to count ddr load. diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 1c565926db9f..ccc531ee6938 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support + * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h index 55cc96284a36..2d6f08cfd0c5 100644 --- a/drivers/devfreq/event/exynos-nocp.h +++ b/drivers/devfreq/event/exynos-nocp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file + * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 85c7a77bf3f0..17ed980d9099 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support + * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support * * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> @@ -101,17 +101,22 @@ static struct __exynos_ppmu_events { PPMU_EVENT(dmc1_1), }; -static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +static int __exynos_ppmu_find_ppmu_id(const char *edev_name) { int i; for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) - if (!strcmp(edev->desc->name, ppmu_events[i].name)) + if (!strcmp(edev_name, ppmu_events[i].name)) return ppmu_events[i].id; return -EINVAL; } +static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +{ + return __exynos_ppmu_find_ppmu_id(edev->desc->name); +} + /* * The devfreq-event ops structure for PPMU v1.1 */ @@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np, * use default if not. */ if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) { - struct devfreq_event_dev edev; int id; /* Not all registers take the same value for * read+write data count. */ - edev.desc = &desc[j]; - id = exynos_ppmu_find_ppmu_id(&edev); + id = __exynos_ppmu_find_ppmu_id(desc[j].name); switch (id) { case PPMU_PMNCNT0: diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h index 284420047455..97f667d0cbdd 100644 --- a/drivers/devfreq/event/exynos-ppmu.h +++ b/drivers/devfreq/event/exynos-ppmu.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * exynos_ppmu.h - EXYNOS PPMU header file + * exynos_ppmu.h - Exynos PPMU header file * * Copyright (c) 2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 5d1042188727..9a88faaf8b27 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -177,7 +177,6 @@ static int rockchip_dfi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_dfi *data; - struct resource *res; struct devfreq_event_desc *desc; struct device_node *np = pdev->dev.of_node, *node; @@ -185,8 +184,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev) if (!data) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) return PTR_ERR(data->regs); @@ -200,6 +198,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { data->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(data->regmap_pmu)) return PTR_ERR(data->regmap_pmu); } diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index c832673273a2..8fa8eb541373 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -15,11 +15,10 @@ #include <linux/device.h> #include <linux/export.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <linux/slab.h> #define DEFAULT_SATURATION_RATIO 40 @@ -127,6 +126,7 @@ static int exynos_bus_get_dev_status(struct device *dev, ret = exynos_bus_get_event(bus, &edata); if (ret < 0) { + dev_err(dev, "failed to get event from devfreq-event devices\n"); stat->total_time = stat->busy_time = 0; goto err; } @@ -287,52 +287,12 @@ err_clk: return ret; } -static int exynos_bus_probe(struct platform_device *pdev) +static int exynos_bus_profile_init(struct exynos_bus *bus, + struct devfreq_dev_profile *profile) { - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node, *node; - struct devfreq_dev_profile *profile; + struct device *dev = bus->dev; struct devfreq_simple_ondemand_data *ondemand_data; - struct devfreq_passive_data *passive_data; - struct devfreq *parent_devfreq; - struct exynos_bus *bus; - int ret, max_state; - unsigned long min_freq, max_freq; - bool passive = false; - - if (!np) { - dev_err(dev, "failed to find devicetree node\n"); - return -EINVAL; - } - - bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); - if (!bus) - return -ENOMEM; - mutex_init(&bus->lock); - bus->dev = &pdev->dev; - platform_set_drvdata(pdev, bus); - - profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); - if (!profile) - return -ENOMEM; - - node = of_parse_phandle(dev->of_node, "devfreq", 0); - if (node) { - of_node_put(node); - passive = true; - } else { - ret = exynos_bus_parent_parse_of(np, bus); - if (ret < 0) - return ret; - } - - /* Parse the device-tree to get the resource information */ - ret = exynos_bus_parse_of(np, bus); - if (ret < 0) - goto err_reg; - - if (passive) - goto passive; + int ret; /* Initialize the struct profile and governor data for parent device */ profile->polling_ms = 50; @@ -341,10 +301,9 @@ static int exynos_bus_probe(struct platform_device *pdev) profile->exit = exynos_bus_exit; ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL); - if (!ondemand_data) { - ret = -ENOMEM; - goto err; - } + if (!ondemand_data) + return -ENOMEM; + ondemand_data->upthreshold = 40; ondemand_data->downdifferential = 5; @@ -354,15 +313,14 @@ static int exynos_bus_probe(struct platform_device *pdev) ondemand_data); if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq device\n"); - ret = PTR_ERR(bus->devfreq); - goto err; + return PTR_ERR(bus->devfreq); } /* Register opp_notifier to catch the change of OPP */ ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq); if (ret < 0) { dev_err(dev, "failed to register opp notifier\n"); - goto err; + return ret; } /* @@ -372,33 +330,44 @@ static int exynos_bus_probe(struct platform_device *pdev) ret = exynos_bus_enable_edev(bus); if (ret < 0) { dev_err(dev, "failed to enable devfreq-event devices\n"); - goto err; + return ret; } ret = exynos_bus_set_event(bus); if (ret < 0) { dev_err(dev, "failed to set event to devfreq-event devices\n"); - goto err; + goto err_edev; } - goto out; -passive: + return 0; + +err_edev: + if (exynos_bus_disable_edev(bus)) + dev_warn(dev, "failed to disable the devfreq-event devices\n"); + + return ret; +} + +static int exynos_bus_profile_init_passive(struct exynos_bus *bus, + struct devfreq_dev_profile *profile) +{ + struct device *dev = bus->dev; + struct devfreq_passive_data *passive_data; + struct devfreq *parent_devfreq; + /* Initialize the struct profile and governor data for passive device */ profile->target = exynos_bus_target; profile->exit = exynos_bus_passive_exit; /* Get the instance of parent devfreq device */ parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0); - if (IS_ERR(parent_devfreq)) { - ret = -EPROBE_DEFER; - goto err; - } + if (IS_ERR(parent_devfreq)) + return -EPROBE_DEFER; passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL); - if (!passive_data) { - ret = -ENOMEM; - goto err; - } + if (!passive_data) + return -ENOMEM; + passive_data->parent = parent_devfreq; /* Add devfreq device for exynos bus with passive governor */ @@ -407,11 +376,61 @@ passive: if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq dev with passive governor\n"); - ret = PTR_ERR(bus->devfreq); - goto err; + return PTR_ERR(bus->devfreq); + } + + return 0; +} + +static int exynos_bus_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node, *node; + struct devfreq_dev_profile *profile; + struct exynos_bus *bus; + int ret, max_state; + unsigned long min_freq, max_freq; + bool passive = false; + + if (!np) { + dev_err(dev, "failed to find devicetree node\n"); + return -EINVAL; } -out: + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + mutex_init(&bus->lock); + bus->dev = &pdev->dev; + platform_set_drvdata(pdev, bus); + + profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); + if (!profile) + return -ENOMEM; + + node = of_parse_phandle(dev->of_node, "devfreq", 0); + if (node) { + of_node_put(node); + passive = true; + } else { + ret = exynos_bus_parent_parse_of(np, bus); + if (ret < 0) + return ret; + } + + /* Parse the device-tree to get the resource information */ + ret = exynos_bus_parse_of(np, bus); + if (ret < 0) + goto err_reg; + + if (passive) + ret = exynos_bus_profile_init_passive(bus, profile); + else + ret = exynos_bus_profile_init(bus, profile); + + if (ret < 0) + goto err; + max_state = bus->devfreq->profile->max_state; min_freq = (bus->devfreq->profile->freq_table[0] / 1000); max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000); diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c new file mode 100644 index 000000000000..bc82d3653bff --- /dev/null +++ b/drivers/devfreq/imx8m-ddrc.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/devfreq.h> +#include <linux/pm_opp.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/arm-smccc.h> + +#define IMX_SIP_DDR_DVFS 0xc2000004 + +/* Query available frequencies. */ +#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10 +#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11 + +/* + * This should be in a 1:1 mapping with devicetree OPPs but + * firmware provides additional info. + */ +struct imx8m_ddrc_freq { + unsigned long rate; + unsigned long smcarg; + int dram_core_parent_index; + int dram_alt_parent_index; + int dram_apb_parent_index; +}; + +/* Hardware limitation */ +#define IMX8M_DDRC_MAX_FREQ_COUNT 4 + +/* + * i.MX8M DRAM Controller clocks have the following structure (abridged): + * + * +----------+ |\ +------+ + * | dram_pll |-------|M| dram_core | | + * +----------+ |U|---------->| D | + * /--|X| | D | + * dram_alt_root | |/ | R | + * | | C | + * +---------+ | | + * |FIX DIV/4| | | + * +---------+ | | + * composite: | | | + * +----------+ | | | + * | dram_alt |----/ | | + * +----------+ | | + * | dram_apb |-------------------->| | + * +----------+ +------+ + * + * The dram_pll is used for higher rates and dram_alt is used for lower rates. + * + * Frequency switching is implemented in TF-A (via SMC call) and can change the + * configuration of the clocks, including mux parents. The dram_alt and + * dram_apb clocks are "imx composite" and their parent can change too. + * + * We need to prepare/enable the new mux parents head of switching and update + * their information afterwards. + */ +struct imx8m_ddrc { + struct devfreq_dev_profile profile; + struct devfreq *devfreq; + + /* For frequency switching: */ + struct clk *dram_core; + struct clk *dram_pll; + struct clk *dram_alt; + struct clk *dram_apb; + + int freq_count; + struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT]; +}; + +static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv, + unsigned long rate) +{ + struct imx8m_ddrc_freq *freq; + int i; + + /* + * Firmware reports values in MT/s, so we round-down from Hz + * Rounding is extra generous to ensure a match. + */ + rate = DIV_ROUND_CLOSEST(rate, 250000); + for (i = 0; i < priv->freq_count; ++i) { + freq = &priv->freq_table[i]; + if (freq->rate == rate || + freq->rate + 1 == rate || + freq->rate - 1 == rate) + return freq; + } + + return NULL; +} + +static void imx8m_ddrc_smc_set_freq(int target_freq) +{ + struct arm_smccc_res res; + u32 online_cpus = 0; + int cpu; + + local_irq_disable(); + + for_each_online_cpu(cpu) + online_cpus |= (1 << (cpu * 8)); + + /* change the ddr freqency */ + arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus, + 0, 0, 0, 0, 0, &res); + + local_irq_enable(); +} + +static struct clk *clk_get_parent_by_index(struct clk *clk, int index) +{ + struct clk_hw *hw; + + hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index); + + return hw ? hw->clk : NULL; +} + +static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct clk *new_dram_core_parent; + struct clk *new_dram_alt_parent; + struct clk *new_dram_apb_parent; + int ret; + + /* + * Fetch new parents + * + * new_dram_alt_parent and new_dram_apb_parent are optional but + * new_dram_core_parent is not. + */ + new_dram_core_parent = clk_get_parent_by_index( + priv->dram_core, freq->dram_core_parent_index - 1); + if (!new_dram_core_parent) { + dev_err(dev, "failed to fetch new dram_core parent\n"); + return -EINVAL; + } + if (freq->dram_alt_parent_index) { + new_dram_alt_parent = clk_get_parent_by_index( + priv->dram_alt, + freq->dram_alt_parent_index - 1); + if (!new_dram_alt_parent) { + dev_err(dev, "failed to fetch new dram_alt parent\n"); + return -EINVAL; + } + } else + new_dram_alt_parent = NULL; + + if (freq->dram_apb_parent_index) { + new_dram_apb_parent = clk_get_parent_by_index( + priv->dram_apb, + freq->dram_apb_parent_index - 1); + if (!new_dram_apb_parent) { + dev_err(dev, "failed to fetch new dram_apb parent\n"); + return -EINVAL; + } + } else + new_dram_apb_parent = NULL; + + /* increase reference counts and ensure clks are ON before switch */ + ret = clk_prepare_enable(new_dram_core_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_core parent: %d\n", + ret); + goto out; + } + ret = clk_prepare_enable(new_dram_alt_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_alt parent: %d\n", + ret); + goto out_disable_core_parent; + } + ret = clk_prepare_enable(new_dram_apb_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_apb parent: %d\n", + ret); + goto out_disable_alt_parent; + } + + imx8m_ddrc_smc_set_freq(freq->smcarg); + + /* update parents in clk tree after switch. */ + ret = clk_set_parent(priv->dram_core, new_dram_core_parent); + if (ret) + dev_warn(dev, "failed to set dram_core parent: %d\n", ret); + if (new_dram_alt_parent) { + ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent); + if (ret) + dev_warn(dev, "failed to set dram_alt parent: %d\n", + ret); + } + if (new_dram_apb_parent) { + ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent); + if (ret) + dev_warn(dev, "failed to set dram_apb parent: %d\n", + ret); + } + + /* + * Explicitly refresh dram PLL rate. + * + * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be + * automatically refreshed when clk_get_rate is called on children. + */ + clk_get_rate(priv->dram_pll); + + /* + * clk_set_parent transfer the reference count from old parent. + * now we drop extra reference counts used during the switch + */ + clk_disable_unprepare(new_dram_apb_parent); +out_disable_alt_parent: + clk_disable_unprepare(new_dram_alt_parent); +out_disable_core_parent: + clk_disable_unprepare(new_dram_core_parent); +out: + return ret; +} + +static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct imx8m_ddrc_freq *freq_info; + struct dev_pm_opp *new_opp; + unsigned long old_freq, new_freq; + int ret; + + new_opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(new_opp)) { + ret = PTR_ERR(new_opp); + dev_err(dev, "failed to get recommended opp: %d\n", ret); + return ret; + } + dev_pm_opp_put(new_opp); + + old_freq = clk_get_rate(priv->dram_core); + if (*freq == old_freq) + return 0; + + freq_info = imx8m_ddrc_find_freq(priv, *freq); + if (!freq_info) + return -EINVAL; + + /* + * Read back the clk rate to verify switch was correct and so that + * we can report it on all error paths. + */ + ret = imx8m_ddrc_set_freq(dev, freq_info); + + new_freq = clk_get_rate(priv->dram_core); + if (ret) + dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n", + *freq, old_freq, ret, new_freq); + else if (*freq != new_freq) + dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n", + *freq, old_freq, new_freq); + else + dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n", + *freq, old_freq); + + return ret; +} + +static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + + *freq = clk_get_rate(priv->dram_core); + + return 0; +} + +static int imx8m_ddrc_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + + stat->busy_time = 0; + stat->total_time = 0; + stat->current_frequency = clk_get_rate(priv->dram_core); + + return 0; +} + +static int imx8m_ddrc_init_freq_info(struct device *dev) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct arm_smccc_res res; + int index; + + /* An error here means DDR DVFS API not supported by firmware */ + arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT, + 0, 0, 0, 0, 0, 0, &res); + priv->freq_count = res.a0; + if (priv->freq_count <= 0 || + priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT) + return -ENODEV; + + for (index = 0; index < priv->freq_count; ++index) { + struct imx8m_ddrc_freq *freq = &priv->freq_table[index]; + + arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO, + index, 0, 0, 0, 0, 0, &res); + /* Result should be strictly positive */ + if ((long)res.a0 <= 0) + return -ENODEV; + + freq->rate = res.a0; + freq->smcarg = index; + freq->dram_core_parent_index = res.a1; + freq->dram_alt_parent_index = res.a2; + freq->dram_apb_parent_index = res.a3; + + /* dram_core has 2 options: dram_pll or dram_alt_root */ + if (freq->dram_core_parent_index != 1 && + freq->dram_core_parent_index != 2) + return -ENODEV; + /* dram_apb and dram_alt have exactly 8 possible parents */ + if (freq->dram_alt_parent_index > 8 || + freq->dram_apb_parent_index > 8) + return -ENODEV; + /* dram_core from alt requires explicit dram_alt parent */ + if (freq->dram_core_parent_index == 2 && + freq->dram_alt_parent_index == 0) + return -ENODEV; + } + + return 0; +} + +static int imx8m_ddrc_check_opps(struct device *dev) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct imx8m_ddrc_freq *freq_info; + struct dev_pm_opp *opp; + unsigned long freq; + int i, opp_count; + + /* Enumerate DT OPPs and disable those not supported by firmware */ + opp_count = dev_pm_opp_get_opp_count(dev); + if (opp_count < 0) + return opp_count; + for (i = 0, freq = 0; i < opp_count; ++i, ++freq) { + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) { + dev_err(dev, "Failed enumerating OPPs: %ld\n", + PTR_ERR(opp)); + return PTR_ERR(opp); + } + dev_pm_opp_put(opp); + + freq_info = imx8m_ddrc_find_freq(priv, freq); + if (!freq_info) { + dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n", + freq, DIV_ROUND_CLOSEST(freq, 250000)); + dev_pm_opp_disable(dev, freq); + } + } + + return 0; +} + +static void imx8m_ddrc_exit(struct device *dev) +{ + dev_pm_opp_of_remove_table(dev); +} + +static int imx8m_ddrc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct imx8m_ddrc *priv; + const char *gov = DEVFREQ_GOV_USERSPACE; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + ret = imx8m_ddrc_init_freq_info(dev); + if (ret) { + dev_err(dev, "failed to init firmware freq info: %d\n", ret); + return ret; + } + + priv->dram_core = devm_clk_get(dev, "core"); + if (IS_ERR(priv->dram_core)) { + ret = PTR_ERR(priv->dram_core); + dev_err(dev, "failed to fetch core clock: %d\n", ret); + return ret; + } + priv->dram_pll = devm_clk_get(dev, "pll"); + if (IS_ERR(priv->dram_pll)) { + ret = PTR_ERR(priv->dram_pll); + dev_err(dev, "failed to fetch pll clock: %d\n", ret); + return ret; + } + priv->dram_alt = devm_clk_get(dev, "alt"); + if (IS_ERR(priv->dram_alt)) { + ret = PTR_ERR(priv->dram_alt); + dev_err(dev, "failed to fetch alt clock: %d\n", ret); + return ret; + } + priv->dram_apb = devm_clk_get(dev, "apb"); + if (IS_ERR(priv->dram_apb)) { + ret = PTR_ERR(priv->dram_apb); + dev_err(dev, "failed to fetch apb clock: %d\n", ret); + return ret; + } + + ret = dev_pm_opp_of_add_table(dev); + if (ret < 0) { + dev_err(dev, "failed to get OPP table\n"); + return ret; + } + + ret = imx8m_ddrc_check_opps(dev); + if (ret < 0) + goto err; + + priv->profile.polling_ms = 1000; + priv->profile.target = imx8m_ddrc_target; + priv->profile.get_dev_status = imx8m_ddrc_get_dev_status; + priv->profile.exit = imx8m_ddrc_exit; + priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq; + priv->profile.initial_freq = clk_get_rate(priv->dram_core); + + priv->devfreq = devm_devfreq_add_device(dev, &priv->profile, + gov, NULL); + if (IS_ERR(priv->devfreq)) { + ret = PTR_ERR(priv->devfreq); + dev_err(dev, "failed to add devfreq device: %d\n", ret); + goto err; + } + + return 0; + +err: + dev_pm_opp_of_remove_table(dev); + return ret; +} + +static const struct of_device_id imx8m_ddrc_of_match[] = { + { .compatible = "fsl,imx8m-ddrc", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match); + +static struct platform_driver imx8m_ddrc_platdrv = { + .probe = imx8m_ddrc_probe, + .driver = { + .name = "imx8m-ddrc-devfreq", + .of_match_table = of_match_ptr(imx8m_ddrc_of_match), + }, +}; +module_platform_driver(imx8m_ddrc_platdrv); + +MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver"); +MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index 2e65d7279d79..24f04f78285b 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) if (res.a0) { dev_err(dev, "Failed to set dram param: %ld\n", res.a0); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } } } @@ -372,8 +373,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { data->regmap_pmu = syscon_node_to_regmap(node); - if (IS_ERR(data->regmap_pmu)) - return PTR_ERR(data->regmap_pmu); + of_node_put(node); + if (IS_ERR(data->regmap_pmu)) { + ret = PTR_ERR(data->regmap_pmu); + goto err_edev; + } } regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); @@ -391,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq; break; default: - return -EINVAL; + ret = -EINVAL; + goto err_edev; }; arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, @@ -425,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) */ if (dev_pm_opp_of_add_table(dev)) { dev_err(dev, "Invalid operating-points in device tree.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } of_property_read_u32(np, "upthreshold", @@ -465,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) err_free_opp: dev_pm_opp_of_remove_table(&pdev->dev); +err_edev: + devfreq_event_disable_edev(data->edev); + return ret; } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6fa1eba9d477..5142da401db3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -239,6 +239,14 @@ config FSL_RAID the capability to offload memcpy, xor and pq computation for raid5/6. +config HISI_DMA + tristate "HiSilicon DMA Engine support" + depends on ARM64 || (COMPILE_TEST && PCI_MSI) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support HiSilicon Kunpeng DMA engine. + config IMG_MDC_DMA tristate "IMG MDC support" depends on MIPS || COMPILE_TEST @@ -273,6 +281,19 @@ config INTEL_IDMA64 Enable DMA support for Intel Low Power Subsystem such as found on Intel Skylake PCH. +config INTEL_IDXD + tristate "Intel Data Accelerators support" + depends on PCI && X86_64 + select DMA_ENGINE + select SBITMAP + help + Enable support for the Intel(R) data accelerators present + in Intel Xeon CPU. + + Say Y if you have such a platform. + + If unsure, say N. + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86_64 @@ -497,6 +518,15 @@ config PXA_DMA 16 to 32 channels for peripheral to memory or memory to memory transfers. +config PLX_DMA + tristate "PLX ExpressLane PEX Switch DMA Engine Support" + depends on PCI + select DMA_ENGINE + help + Some PLX ExpressLane PCI Switches support additional DMA engines. + These are exposed via extra functions on the switch's + upstream port. Each function exposes one DMA channel. + config SIRF_DMA tristate "CSR SiRFprimaII/SiRFmarco DMA support" depends on ARCH_SIRF diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 42d7e2fc64fa..1d908394fbea 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o +obj-$(CONFIG_HISI_DMA) += hisi_dma.o obj-$(CONFIG_HSU_DMA) += hsu/ obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ +obj-$(CONFIG_INTEL_IDXD) += idxd/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o obj-$(CONFIG_K3_DMA) += k3dma.o @@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o obj-$(CONFIG_OWL_DMA) += owl-dma.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_PL330_DMA) += pl330.o +obj-$(CONFIG_PLX_DMA) += plx_dma.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e4c593f48575..4768ef26013b 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) /* stop DMA activity */ if (c->desc) { - if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT) - vchan_terminate_vdesc(&c->desc->vd); - else - vchan_vdesc_fini(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; bcm2835_dma_abort(c); } diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index a0ee404b736e..f1d149e32839 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev) struct dma_device *dma_dev; struct axi_dmac *dmac; struct resource *res; + struct regmap *regmap; int ret; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); @@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dmac); - devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); + regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, + &axi_dmac_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto err_free_irq; + } return 0; +err_free_irq: + free_irq(dmac->irq, dmac); err_unregister_of: of_dma_controller_free(pdev->dev.of_node); err_unregister_device: diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 44af435628f8..448f663da89c 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1021,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = { .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, }; +static const struct jz4780_dma_soc_data x1830_dma_soc_data = { + .nb_channels = 32, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, + { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data }, {}, }; MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 03ac4b96117c..f3ef4edd4de1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -60,6 +60,8 @@ static long dmaengine_ref_count; /* --- sysfs implementation --- */ +#define DMA_SLAVE_NAME "slave" + /** * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev - device node @@ -164,11 +166,152 @@ static struct class dma_devclass = { /* --- client and device registration --- */ -#define dma_device_satisfies_mask(device, mask) \ - __dma_device_satisfies_mask((device), &(mask)) -static int -__dma_device_satisfies_mask(struct dma_device *device, - const dma_cap_mask_t *want) +/** + * dma_cap_mask_all - enable iteration over all operation types + */ +static dma_cap_mask_t dma_cap_mask_all; + +/** + * dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan - associated channel for this entry + */ +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +/** + * channel_table - percpu lookup table for memory-to-memory offload providers + */ +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; + +static int __init dma_channel_table_init(void) +{ + enum dma_transaction_type cap; + int err = 0; + + bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); + + /* 'interrupt', 'private', and 'slave' are channel capabilities, + * but are not associated with an operation so they do not need + * an entry in the channel_table + */ + clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); + clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); + clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); + + for_each_dma_cap_mask(cap, dma_cap_mask_all) { + channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); + if (!channel_table[cap]) { + err = -ENOMEM; + break; + } + } + + if (err) { + pr_err("dmaengine dma_channel_table_init failure: %d\n", err); + for_each_dma_cap_mask(cap, dma_cap_mask_all) + free_percpu(channel_table[cap]); + } + + return err; +} +arch_initcall(dma_channel_table_init); + +/** + * dma_chan_is_local - returns true if the channel is in the same numa-node as + * the cpu + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - returns the channel with min count and in the same numa-node as + * the cpu + * @cap: capability to match + * @cpu: cpu index which the channel should be close to + * + * If some channels are close to the given cpu, the one with the lowest + * reference count is returned. Otherwise, cpu is ignored and only the + * reference count is taken into account. + * Must be called under dma_list_mutex. + */ +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) +{ + struct dma_device *device; + struct dma_chan *chan; + struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (!dma_has_cap(cap, device->cap_mask) || + dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + if (!chan->client_count) + continue; + if (!min || chan->table_count < min->table_count) + min = chan; + + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; + } + } + + chan = localmin ? localmin : min; + + if (chan) + chan->table_count++; + + return chan; +} + +/** + * dma_channel_rebalance - redistribute the available channels + * + * Optimize for cpu isolation (each cpu gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. Must be called under + * dma_list_mutex. + */ +static void dma_channel_rebalance(void) +{ + struct dma_chan *chan; + struct dma_device *device; + int cpu; + int cap; + + /* undo the last distribution */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_possible_cpu(cpu) + per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + chan->table_count = 0; + } + + /* don't populate the channel_table if no clients are available */ + if (!dmaengine_ref_count) + return; + + /* redistribute available channels */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_online_cpu(cpu) { + chan = min_chan(cap, cpu); + per_cpu_ptr(channel_table[cap], cpu)->chan = chan; + } +} + +static int dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) { dma_cap_mask_t has; @@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device, static struct module *dma_chan_to_owner(struct dma_chan *chan) { - return chan->device->dev->driver->owner; + return chan->device->owner; } /** @@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan) } } +static void dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, dma_device_release); +} + /** * dma_chan_get - try to grab a dma channel's parent driver module * @chan - channel to grab @@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan) if (!try_module_get(owner)) return -ENODEV; + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + /* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) { ret = chan->device->device_alloc_chan_resources(chan); @@ -233,6 +399,8 @@ out: return 0; err_out: + dma_device_put(chan->device); +module_put_out: module_put(owner); return ret; } @@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan) return; chan->client_count--; - module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ if (!chan->client_count && chan->device->device_free_chan_resources) { @@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan) chan->router = NULL; chan->route_data = NULL; } + + dma_device_put(chan->device); + module_put(dma_chan_to_owner(chan)); } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) @@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) EXPORT_SYMBOL(dma_sync_wait); /** - * dma_cap_mask_all - enable iteration over all operation types - */ -static dma_cap_mask_t dma_cap_mask_all; - -/** - * dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan - associated channel for this entry - */ -struct dma_chan_tbl_ent { - struct dma_chan *chan; -}; - -/** - * channel_table - percpu lookup table for memory-to-memory offload providers - */ -static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; - -static int __init dma_channel_table_init(void) -{ - enum dma_transaction_type cap; - int err = 0; - - bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); - - /* 'interrupt', 'private', and 'slave' are channel capabilities, - * but are not associated with an operation so they do not need - * an entry in the channel_table - */ - clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); - clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); - clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); - - for_each_dma_cap_mask(cap, dma_cap_mask_all) { - channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); - if (!channel_table[cap]) { - err = -ENOMEM; - break; - } - } - - if (err) { - pr_err("initialization failure\n"); - for_each_dma_cap_mask(cap, dma_cap_mask_all) - free_percpu(channel_table[cap]); - } - - return err; -} -arch_initcall(dma_channel_table_init); - -/** * dma_find_channel - find a channel to carry out the operation * @tx_type: transaction type */ @@ -369,97 +488,6 @@ void dma_issue_pending_all(void) } EXPORT_SYMBOL(dma_issue_pending_all); -/** - * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu - */ -static bool dma_chan_is_local(struct dma_chan *chan, int cpu) -{ - int node = dev_to_node(chan->device->dev); - return node == NUMA_NO_NODE || - cpumask_test_cpu(cpu, cpumask_of_node(node)); -} - -/** - * min_chan - returns the channel with min count and in the same numa-node as the cpu - * @cap: capability to match - * @cpu: cpu index which the channel should be close to - * - * If some channels are close to the given cpu, the one with the lowest - * reference count is returned. Otherwise, cpu is ignored and only the - * reference count is taken into account. - * Must be called under dma_list_mutex. - */ -static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) -{ - struct dma_device *device; - struct dma_chan *chan; - struct dma_chan *min = NULL; - struct dma_chan *localmin = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (!dma_has_cap(cap, device->cap_mask) || - dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) { - if (!chan->client_count) - continue; - if (!min || chan->table_count < min->table_count) - min = chan; - - if (dma_chan_is_local(chan, cpu)) - if (!localmin || - chan->table_count < localmin->table_count) - localmin = chan; - } - } - - chan = localmin ? localmin : min; - - if (chan) - chan->table_count++; - - return chan; -} - -/** - * dma_channel_rebalance - redistribute the available channels - * - * Optimize for cpu isolation (each cpu gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. Must be called under - * dma_list_mutex. - */ -static void dma_channel_rebalance(void) -{ - struct dma_chan *chan; - struct dma_device *device; - int cpu; - int cap; - - /* undo the last distribution */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_possible_cpu(cpu) - per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - chan->table_count = 0; - } - - /* don't populate the channel_table if no clients are available */ - if (!dmaengine_ref_count) - return; - - /* redistribute available channels */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_online_cpu(cpu) { - chan = min_chan(cap, cpu); - per_cpu_ptr(channel_table[cap], cpu)->chan = chan; - } -} - int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { struct dma_device *device; @@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, { struct dma_chan *chan; - if (mask && !__dma_device_satisfies_mask(dev, mask)) { + if (mask && !dma_device_satisfies_mask(dev, mask)) { dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); return NULL; } @@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) if (has_acpi_companion(dev) && !chan) chan = acpi_dma_request_slave_chan_by_name(dev, name); - if (chan) { - /* Valid channel found or requester needs to be deferred */ - if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) - return chan; - } + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; /* Try to find the channel via the DMA filter map(s) */ mutex_lock(&dma_list_mutex); @@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) } mutex_unlock(&dma_list_mutex); - return chan ? chan : ERR_PTR(-EPROBE_DEFER); + if (!IS_ERR_OR_NULL(chan)) + goto found; + + return ERR_PTR(-EPROBE_DEFER); + +found: + chan->slave = dev; + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return ERR_PTR(-ENOMEM); + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_err(dev, "Cannot create DMA %s symlink\n", chan->name); + return chan; } EXPORT_SYMBOL_GPL(dma_request_chan); @@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan) /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + if (chan->slave) { + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); @@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get); */ void dmaengine_put(void) { - struct dma_device *device; + struct dma_device *device, *_d; struct dma_chan *chan; mutex_lock(&dma_list_mutex); dmaengine_ref_count--; BUG_ON(dmaengine_ref_count < 0); /* drop channel references */ - list_for_each_entry(device, &dma_device_list, global_node) { + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue; list_for_each_entry(chan, &device->channels, device_node) @@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device) return 0; } +static int __dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan, + int chan_id) +{ + int rc = 0; + int chancnt = device->chancnt; + atomic_t *idr_ref; + struct dma_chan *tchan; + + tchan = list_first_entry_or_null(&device->channels, + struct dma_chan, device_node); + if (tchan->dev) { + idr_ref = tchan->dev->idr_ref; + } else { + idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); + if (!idr_ref) + return -ENOMEM; + atomic_set(idr_ref, 0); + } + + chan->local = alloc_percpu(typeof(*chan->local)); + if (!chan->local) + goto err_out; + chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + if (!chan->dev) { + free_percpu(chan->local); + chan->local = NULL; + goto err_out; + } + + /* + * When the chan_id is a negative value, we are dynamically adding + * the channel. Otherwise we are static enumerating. + */ + chan->chan_id = chan_id < 0 ? chancnt : chan_id; + chan->dev->device.class = &dma_devclass; + chan->dev->device.parent = device->dev; + chan->dev->chan = chan; + chan->dev->idr_ref = idr_ref; + chan->dev->dev_id = device->dev_id; + atomic_inc(idr_ref); + dev_set_name(&chan->dev->device, "dma%dchan%d", + device->dev_id, chan->chan_id); + + rc = device_register(&chan->dev->device); + if (rc) + goto err_out; + chan->client_count = 0; + device->chancnt = chan->chan_id + 1; + + return 0; + + err_out: + free_percpu(chan->local); + kfree(chan->dev); + if (atomic_dec_return(idr_ref) == 0) + kfree(idr_ref); + return rc; +} + +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + rc = __dma_async_device_channel_register(device, chan, -1); + if (rc < 0) + return rc; + + dma_channel_rebalance(); + return 0; +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_register); + +static void __dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + WARN_ONCE(!device->device_release && chan->client_count, + "%s called while %d clients hold a reference\n", + __func__, chan->client_count); + mutex_lock(&dma_list_mutex); + list_del(&chan->device_node); + device->chancnt--; + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + device_unregister(&chan->dev->device); + free_percpu(chan->local); +} + +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __dma_async_device_channel_unregister(device, chan); + dma_channel_rebalance(); +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); + /** * dma_async_device_register - registers DMA devices found * @device: &dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * dma_async_device_unregister() is called and no further references are taken. */ int dma_async_device_register(struct dma_device *device) { - int chancnt = 0, rc; + int rc, i = 0; struct dma_chan* chan; - atomic_t *idr_ref; if (!device) return -ENODEV; @@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + device->owner = device->dev->driver->owner; + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", @@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + if (!device->device_release) + dev_warn(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case */ if (device_has_all_tx_types(device)) dma_cap_set(DMA_ASYNC_TX, device->cap_mask); - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); - if (!idr_ref) - return -ENOMEM; rc = get_dma_id(device); - if (rc != 0) { - kfree(idr_ref); + if (rc != 0) return rc; - } - - atomic_set(idr_ref, 0); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { - rc = -ENOMEM; - chan->local = alloc_percpu(typeof(*chan->local)); - if (chan->local == NULL) - goto err_out; - chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); - if (chan->dev == NULL) { - free_percpu(chan->local); - chan->local = NULL; + rc = __dma_async_device_channel_register(device, chan, i++); + if (rc < 0) goto err_out; - } - - chan->chan_id = chancnt++; - chan->dev->device.class = &dma_devclass; - chan->dev->device.parent = device->dev; - chan->dev->chan = chan; - chan->dev->idr_ref = idr_ref; - chan->dev->dev_id = device->dev_id; - atomic_inc(idr_ref); - dev_set_name(&chan->dev->device, "dma%dchan%d", - device->dev_id, chan->chan_id); - - rc = device_register(&chan->dev->device); - if (rc) { - free_percpu(chan->local); - chan->local = NULL; - kfree(chan->dev); - atomic_dec(idr_ref); - goto err_out; - } - chan->client_count = 0; - } - - if (!chancnt) { - dev_err(device->dev, "%s: device has no channels!\n", __func__); - rc = -ENODEV; - goto err_out; } - device->chancnt = chancnt; - mutex_lock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) @@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device) err_out: /* if we never registered a channel just release the idr */ - if (atomic_read(idr_ref) == 0) { + if (!device->chancnt) { ida_free(&dma_ida, device->dev_id); - kfree(idr_ref); return rc; } @@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register); */ void dma_async_device_unregister(struct dma_device *device) { - struct dma_chan *chan; + struct dma_chan *chan, *n; + + list_for_each_entry_safe(chan, n, &device->channels, device_node) + __dma_async_device_channel_unregister(device, chan); mutex_lock(&dma_list_mutex); - list_del_rcu(&device->global_node); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_channel_rebalance(); + dma_device_put(device); mutex_unlock(&dma_list_mutex); - - list_for_each_entry(chan, &device->channels, device_node) { - WARN_ONCE(chan->client_count, - "%s called while %d clients hold a reference\n", - __func__, chan->client_count); - mutex_lock(&dma_list_mutex); - chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); - device_unregister(&chan->dev->device); - free_percpu(chan->local); - } } EXPORT_SYMBOL(dma_async_device_unregister); @@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, } EXPORT_SYMBOL(dma_async_tx_descriptor_init); +static inline int desc_check_and_set_metadata_mode( + struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); + +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); + +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); + /* dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on */ @@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void) return class_register(&dma_devclass); } arch_initcall(dma_bus_init); - - diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 501c0b063f85..e8a320c9e57c 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan, state->last = complete; state->used = used; state->residue = 0; + state->in_flight_bytes = 0; } return dma_async_is_complete(cookie, complete, used); } @@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) state->residue = residue; } +static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + struct dmaengine_desc_callback { dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; @@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) return (cb->callback) ? true : false; } +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); + #endif diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index a1ce307c502f..14c1ac26f866 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) vchan_get_all_descriptors(&chan->vc, &head); - /* - * As vchan_dma_desc_free_list can access to desc_allocated list - * we need to call it in vc.lock context. - */ - vchan_dma_desc_free_list(&chan->vc, &head); - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_dma_desc_free_list(&chan->vc, &head); + dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); return 0; diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b1a7ca91701a..5697c3622699 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; + int endian_diff[4] = {3, 1, -1, -3}; u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; + + if (fsl_chan->edma->drvdata->mux_swap) + ch_off += endian_diff[ch_off % 4]; + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 5eaa2902ed39..67e422590c9a 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -147,6 +147,7 @@ struct fsl_edma_drvdata { enum edma_version version; u32 dmamuxs; bool has_dmaclk; + bool mux_swap; int (*setup_irq)(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma); }; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b626c06ac2e0..eff7ebd8cf35 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = { .setup_irq = fsl_edma_irq_init, }; +static struct fsl_edma_drvdata ls1028a_data = { + .version = v1, + .dmamuxs = DMAMUX_NR, + .mux_swap = true, + .setup_irq = fsl_edma_irq_init, +}; + static struct fsl_edma_drvdata imx7ulp_data = { .version = v3, .dmamuxs = 1, @@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = { static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,vf610-edma", .data = &vf610_data}, + { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { /* sentinel */ } }; diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 89792083d62c..95cc0256b387 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) vchan_dma_desc_free_list(&fsl_chan->vchan, &head); - if (!fsl_queue->comp_pool && !fsl_queue->comp_pool) + if (!fsl_queue->comp_pool && !fsl_queue->desc_pool) return; list_for_each_entry_safe(comp_temp, _comp_temp, diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c new file mode 100644 index 000000000000..ed3619266a48 --- /dev/null +++ b/drivers/dma/hisi_dma.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2019 HiSilicon Limited. */ +#include <linux/bitfield.h> +#include <linux/dmaengine.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include "virt-dma.h" + +#define HISI_DMA_SQ_BASE_L 0x0 +#define HISI_DMA_SQ_BASE_H 0x4 +#define HISI_DMA_SQ_DEPTH 0x8 +#define HISI_DMA_SQ_TAIL_PTR 0xc +#define HISI_DMA_CQ_BASE_L 0x10 +#define HISI_DMA_CQ_BASE_H 0x14 +#define HISI_DMA_CQ_DEPTH 0x18 +#define HISI_DMA_CQ_HEAD_PTR 0x1c +#define HISI_DMA_CTRL0 0x20 +#define HISI_DMA_CTRL0_QUEUE_EN_S 0 +#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4 +#define HISI_DMA_CTRL1 0x24 +#define HISI_DMA_CTRL1_QUEUE_RESET_S 0 +#define HISI_DMA_Q_FSM_STS 0x30 +#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0) +#define HISI_DMA_INT_STS 0x40 +#define HISI_DMA_INT_STS_MASK GENMASK(12, 0) +#define HISI_DMA_INT_MSK 0x44 +#define HISI_DMA_MODE 0x217c +#define HISI_DMA_OFFSET 0x100 + +#define HISI_DMA_MSI_NUM 30 +#define HISI_DMA_CHAN_NUM 30 +#define HISI_DMA_Q_DEPTH_VAL 1024 + +#define PCI_BAR_2 2 + +enum hisi_dma_mode { + EP = 0, + RC, +}; + +enum hisi_dma_chan_status { + DISABLE = -1, + IDLE = 0, + RUN, + CPL, + PAUSE, + HALT, + ABORT, + WAIT, + BUFFCLR, +}; + +struct hisi_dma_sqe { + __le32 dw0; +#define OPCODE_MASK GENMASK(3, 0) +#define OPCODE_SMALL_PACKAGE 0x1 +#define OPCODE_M2M 0x4 +#define LOCAL_IRQ_EN BIT(8) +#define ATTR_SRC_MASK GENMASK(14, 12) + __le32 dw1; + __le32 dw2; +#define ATTR_DST_MASK GENMASK(26, 24) + __le32 length; + __le64 src_addr; + __le64 dst_addr; +}; + +struct hisi_dma_cqe { + __le32 rsv0; + __le32 rsv1; + __le16 sq_head; + __le16 rsv2; + __le16 rsv3; + __le16 w0; +#define STATUS_MASK GENMASK(15, 1) +#define STATUS_SUCC 0x0 +#define VALID_BIT BIT(0) +}; + +struct hisi_dma_desc { + struct virt_dma_desc vd; + struct hisi_dma_sqe sqe; +}; + +struct hisi_dma_chan { + struct virt_dma_chan vc; + struct hisi_dma_dev *hdma_dev; + struct hisi_dma_sqe *sq; + struct hisi_dma_cqe *cq; + dma_addr_t sq_dma; + dma_addr_t cq_dma; + u32 sq_tail; + u32 cq_head; + u32 qp_num; + enum hisi_dma_chan_status status; + struct hisi_dma_desc *desc; +}; + +struct hisi_dma_dev { + struct pci_dev *pdev; + void __iomem *base; + struct dma_device dma_dev; + u32 chan_num; + u32 chan_depth; + struct hisi_dma_chan chan[]; +}; + +static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct hisi_dma_chan, vc.chan); +} + +static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct hisi_dma_desc, vd); +} + +static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index, + u32 val) +{ + writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET); +} + +static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val) +{ + u32 tmp; + + tmp = readl_relaxed(addr); + tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos); + writel_relaxed(tmp, addr); +} + +static void hisi_dma_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool pause) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause); +} + +static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool enable) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable); +} + +static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index, + HISI_DMA_INT_STS_MASK); +} + +static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + void __iomem *base = hdma_dev->base; + + hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index, + HISI_DMA_INT_STS_MASK); + hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0); +} + +static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1); +} + +static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) +{ + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + u32 index = chan->qp_num, tmp; + int ret; + + hisi_dma_pause_dma(hdma_dev, index, true); + hisi_dma_enable_dma(hdma_dev, index, false); + hisi_dma_mask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n"); + WARN_ON(1); + } + + hisi_dma_do_reset(hdma_dev, index); + hisi_dma_reset_qp_point(hdma_dev, index); + hisi_dma_pause_dma(hdma_dev, index, false); + hisi_dma_enable_dma(hdma_dev, index, true); + hisi_dma_unmask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n"); + WARN_ON(1); + } +} + +static void hisi_dma_free_chan_resources(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + + hisi_dma_reset_hw_chan(chan); + vchan_free_chan_resources(&chan->vc); + + memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); + memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth); + chan->sq_tail = 0; + chan->cq_head = 0; + chan->status = DISABLE; +} + +static void hisi_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(to_hisi_dma_desc(vd)); +} + +static struct dma_async_tx_descriptor * +hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->sqe.length = cpu_to_le32(len); + desc->sqe.src_addr = cpu_to_le64(src); + desc->sqe.dst_addr = cpu_to_le64(dst); + + return vchan_tx_prep(&chan->vc, &desc->vd, flags); +} + +static enum dma_status +hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(c, cookie, txstate); +} + +static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) +{ + struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); + chan->desc = NULL; + return; + } + list_del(&vd->node); + desc = to_hisi_dma_desc(vd); + chan->desc = desc; + + memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); + + /* update other field in sqe */ + sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); + sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); + + /* make sure data has been updated in sqe */ + wmb(); + + /* update sq tail, point to new sqe position */ + chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth; + + /* update sq_tail to trigger a new task */ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num, + chan->sq_tail); +} + +static void hisi_dma_issue_pending(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + if (vchan_issue_pending(&chan->vc)) + hisi_dma_start_transfer(chan); + + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static int hisi_dma_terminate_all(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vc.lock, flags); + + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vd); + chan->desc = NULL; + } + + vchan_get_all_descriptors(&chan->vc, &head); + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + vchan_dma_desc_free_list(&chan->vc, &head); + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false); + + return 0; +} + +static void hisi_dma_synchronize(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + + vchan_synchronize(&chan->vc); +} + +static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev) +{ + size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth; + size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; + struct device *dev = &hdma_dev->pdev->dev; + struct hisi_dma_chan *chan; + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + chan = &hdma_dev->chan[i]; + chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma, + GFP_KERNEL); + if (!chan->sq) + return -ENOMEM; + + chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, + GFP_KERNEL); + if (!chan->cq) + return -ENOMEM; + } + + return 0; +} + +static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index) +{ + struct hisi_dma_chan *chan = &hdma_dev->chan[index]; + u32 hw_depth = hdma_dev->chan_depth - 1; + void __iomem *base = hdma_dev->base; + + /* set sq, cq base */ + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index, + lower_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index, + upper_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index, + lower_32_bits(chan->cq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index, + upper_32_bits(chan->cq_dma)); + + /* set sq, cq depth */ + hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth); + hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth); + + /* init sq tail and cq head */ + hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_init_hw_qp(hdma_dev, qp_index); + hisi_dma_unmask_irq(hdma_dev, qp_index); + hisi_dma_enable_dma(hdma_dev, qp_index, true); +} + +static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]); +} + +static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hdma_dev->chan[i].qp_num = i; + hdma_dev->chan[i].hdma_dev = hdma_dev; + hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free; + vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev); + hisi_dma_enable_qp(hdma_dev, i); + } +} + +static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hisi_dma_disable_qp(hdma_dev, i); + tasklet_kill(&hdma_dev->chan[i].vc.task); + } +} + +static irqreturn_t hisi_dma_irq(int irq, void *data) +{ + struct hisi_dma_chan *chan = data; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct hisi_dma_cqe *cqe; + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + desc = chan->desc; + cqe = chan->cq + chan->cq_head; + if (desc) { + if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { + chan->cq_head = (chan->cq_head + 1) % + hdma_dev->chan_depth; + hisi_dma_chan_write(hdma_dev->base, + HISI_DMA_CQ_HEAD_PTR, chan->qp_num, + chan->cq_head); + vchan_cookie_complete(&desc->vd); + } else { + dev_err(&hdma_dev->pdev->dev, "task error!\n"); + } + + chan->desc = NULL; + } + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev) +{ + struct pci_dev *pdev = hdma_dev->pdev; + int i, ret; + + for (i = 0; i < hdma_dev->chan_num; i++) { + ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), + hisi_dma_irq, IRQF_SHARED, "hisi_dma", + &hdma_dev->chan[i]); + if (ret) + return ret; + } + + return 0; +} + +/* This function enables all hw channels in a device */ +static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev) +{ + int ret; + + ret = hisi_dma_alloc_qps_mem(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n"); + return ret; + } + + ret = hisi_dma_request_qps_irq(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n"); + return ret; + } + + hisi_dma_enable_qps(hdma_dev); + + return 0; +} + +static void hisi_dma_disable_hw_channels(void *data) +{ + hisi_dma_disable_qps(data); +} + +static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev, + enum hisi_dma_mode mode) +{ + writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE); +} + +static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct hisi_dma_dev *hdma_dev; + struct dma_device *dma_dev; + size_t dev_size; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "failed to enable device mem!\n"); + return ret; + } + + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev)); + if (ret) { + dev_err(dev, "failed to remap I/O region!\n"); + return ret; + } + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM + + sizeof(*hdma_dev); + hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL); + if (!hdma_dev) + return -EINVAL; + + hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2]; + hdma_dev->pdev = pdev; + hdma_dev->chan_num = HISI_DMA_CHAN_NUM; + hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL; + + pci_set_drvdata(pdev, hdma_dev); + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM, + PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to allocate MSI vectors!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev); + if (ret) + return ret; + + dma_dev = &hdma_dev->dma_dev; + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources; + dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy; + dma_dev->device_tx_status = hisi_dma_tx_status; + dma_dev->device_issue_pending = hisi_dma_issue_pending; + dma_dev->device_terminate_all = hisi_dma_terminate_all; + dma_dev->device_synchronize = hisi_dma_synchronize; + dma_dev->directions = BIT(DMA_MEM_TO_MEM); + dma_dev->dev = dev; + INIT_LIST_HEAD(&dma_dev->channels); + + hisi_dma_set_mode(hdma_dev, RC); + + ret = hisi_dma_enable_hw_channels(hdma_dev); + if (ret < 0) { + dev_err(dev, "failed to enable hw channel!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels, + hdma_dev); + if (ret) + return ret; + + ret = dmaenginem_async_device_register(dma_dev); + if (ret < 0) + dev_err(dev, "failed to register device!\n"); + + return ret; +} + +static const struct pci_device_id hisi_dma_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) }, + { 0, } +}; + +static struct pci_driver hisi_dma_pci_driver = { + .name = "hisi_dma", + .id_table = hisi_dma_pci_tbl, + .probe = hisi_dma_probe, +}; + +module_pci_driver(hisi_dma_pci_driver); + +MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); +MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>"); +MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl); diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile new file mode 100644 index 000000000000..8978b898d777 --- /dev/null +++ b/drivers/dma/idxd/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IDXD) += idxd.o +idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c new file mode 100644 index 000000000000..1d7347825b95 --- /dev/null +++ b/drivers/dma/idxd/cdev.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/sched/task.h> +#include <linux/intel-svm.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +struct idxd_cdev_context { + const char *name; + dev_t devt; + struct ida minor_ida; +}; + +/* + * ictx is an array based off of accelerator types. enum idxd_type + * is used as index + */ +static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { + { .name = "dsa" }, +}; + +struct idxd_user_context { + struct idxd_wq *wq; + struct task_struct *task; + unsigned int flags; +}; + +enum idxd_cdev_cleanup { + CDEV_NORMAL = 0, + CDEV_FAILED, +}; + +static void idxd_cdev_dev_release(struct device *dev) +{ + dev_dbg(dev, "releasing cdev device\n"); + kfree(dev); +} + +static struct device_type idxd_cdev_device_type = { + .name = "idxd_cdev", + .release = idxd_cdev_dev_release, +}; + +static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) +{ + struct cdev *cdev = inode->i_cdev; + + return container_of(cdev, struct idxd_cdev, cdev); +} + +static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev) +{ + return container_of(idxd_cdev, struct idxd_wq, idxd_cdev); +} + +static inline struct idxd_wq *inode_wq(struct inode *inode) +{ + return idxd_cdev_wq(inode_idxd_cdev(inode)); +} + +static int idxd_cdev_open(struct inode *inode, struct file *filp) +{ + struct idxd_user_context *ctx; + struct idxd_device *idxd; + struct idxd_wq *wq; + struct device *dev; + struct idxd_cdev *idxd_cdev; + + wq = inode_wq(inode); + idxd = wq->idxd; + dev = &idxd->pdev->dev; + idxd_cdev = &wq->idxd_cdev; + + dev_dbg(dev, "%s called\n", __func__); + + if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) + return -EBUSY; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->wq = wq; + filp->private_data = ctx; + idxd_wq_get(wq); + return 0; +} + +static int idxd_cdev_release(struct inode *node, struct file *filep) +{ + struct idxd_user_context *ctx = filep->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + + dev_dbg(dev, "%s called\n", __func__); + filep->private_data = NULL; + + kfree(ctx); + idxd_wq_put(wq); + return 0; +} + +static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, + const char *func) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { + dev_info_ratelimited(dev, + "%s: %s: mapping too large: %lu\n", + current->comm, func, + vma->vm_end - vma->vm_start); + return -EINVAL; + } + + return 0; +} + +static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR); + unsigned long pfn; + int rc; + + dev_dbg(&pdev->dev, "%s called\n", __func__); + rc = check_vma(wq, vma, __func__); + + vma->vm_flags |= VM_DONTCOPY; + pfn = (base + idxd_get_wq_portal_full_offset(wq->id, + IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_private_data = ctx; + + return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, + vma->vm_page_prot); +} + +static __poll_t idxd_cdev_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + unsigned long flags; + __poll_t out = 0; + + poll_wait(filp, &idxd_cdev->err_queue, wait); + spin_lock_irqsave(&idxd->dev_lock, flags); + if (idxd->sw_err.valid) + out = EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return out; +} + +static const struct file_operations idxd_cdev_fops = { + .owner = THIS_MODULE, + .open = idxd_cdev_open, + .release = idxd_cdev_release, + .mmap = idxd_cdev_mmap, + .poll = idxd_cdev_poll, +}; + +int idxd_cdev_get_major(struct idxd_device *idxd) +{ + return MAJOR(ictx[idxd->type].devt); +} + +static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + struct device *dev; + int minor, rc; + + idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL); + if (!idxd_cdev->dev) + return -ENOMEM; + + dev = idxd_cdev->dev; + dev->parent = &idxd->pdev->dev; + dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd), + idxd->id, wq->id); + dev->bus = idxd_get_bus_type(idxd); + + cdev_ctx = &ictx[wq->idxd->type]; + minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); + if (minor < 0) { + rc = minor; + goto ida_err; + } + + dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); + dev->type = &idxd_cdev_device_type; + rc = device_register(dev); + if (rc < 0) { + dev_err(&idxd->pdev->dev, "device register failed\n"); + put_device(dev); + goto dev_reg_err; + } + idxd_cdev->minor = minor; + + return 0; + + dev_reg_err: + ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); + ida_err: + kfree(dev); + idxd_cdev->dev = NULL; + return rc; +} + +static void idxd_wq_cdev_cleanup(struct idxd_wq *wq, + enum idxd_cdev_cleanup cdev_state) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + + cdev_ctx = &ictx[wq->idxd->type]; + if (cdev_state == CDEV_NORMAL) + cdev_del(&idxd_cdev->cdev); + device_unregister(idxd_cdev->dev); + /* + * The device_type->release() will be called on the device and free + * the allocated struct device. We can just forget it. + */ + ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); + idxd_cdev->dev = NULL; + idxd_cdev->minor = -1; +} + +int idxd_wq_add_cdev(struct idxd_wq *wq) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct cdev *cdev = &idxd_cdev->cdev; + struct device *dev; + int rc; + + rc = idxd_wq_cdev_dev_setup(wq); + if (rc < 0) + return rc; + + dev = idxd_cdev->dev; + cdev_init(cdev, &idxd_cdev_fops); + cdev_set_parent(cdev, &dev->kobj); + rc = cdev_add(cdev, dev->devt, 1); + if (rc) { + dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); + idxd_wq_cdev_cleanup(wq, CDEV_FAILED); + return rc; + } + + init_waitqueue_head(&idxd_cdev->err_queue); + return 0; +} + +void idxd_wq_del_cdev(struct idxd_wq *wq) +{ + idxd_wq_cdev_cleanup(wq, CDEV_NORMAL); +} + +int idxd_cdev_register(void) +{ + int rc, i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + ida_init(&ictx[i].minor_ida); + rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, + ictx[i].name); + if (rc) + return rc; + } + + return 0; +} + +void idxd_cdev_remove(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + unregister_chrdev_region(ictx[i].devt, MINORMASK); + ida_destroy(&ictx[i].minor_ida); + } +} diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c new file mode 100644 index 000000000000..ada69e722f84 --- /dev/null +++ b/drivers/dma/idxd/device.c @@ -0,0 +1,693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout); +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand); + +/* Interrupt control bits */ +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 1; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_mask_msix_vectors(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + int i, rc; + + for (i = 0; i < msixcnt; i++) { + rc = idxd_mask_msix_vector(idxd, i); + if (rc < 0) + dev_warn(&pdev->dev, + "Failed disabling msix vec %d\n", i); + } +} + +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 0; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_unmask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 1; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +void idxd_mask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 0; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +static void free_hw_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->hw_descs[i]); + + kfree(wq->hw_descs); +} + +static int alloc_hw_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), + GFP_KERNEL, node); + if (!wq->hw_descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), + GFP_KERNEL, node); + if (!wq->hw_descs[i]) { + free_hw_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +static void free_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->descs[i]); + + kfree(wq->descs); +} + +static int alloc_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), + GFP_KERNEL, node); + if (!wq->descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), + GFP_KERNEL, node); + if (!wq->descs[i]) { + free_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +/* WQ control bits */ +int idxd_wq_alloc_resources(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_group *group = wq->group; + struct device *dev = &idxd->pdev->dev; + int rc, num_descs, i; + + if (wq->type != IDXD_WQT_KERNEL) + return 0; + + num_descs = wq->size + + idxd->hw.gen_cap.max_descs_per_engine * group->num_engines; + wq->num_descs = num_descs; + + rc = alloc_hw_descs(wq, num_descs); + if (rc < 0) + return rc; + + wq->compls_size = num_descs * sizeof(struct dsa_completion_record); + wq->compls = dma_alloc_coherent(dev, wq->compls_size, + &wq->compls_addr, GFP_KERNEL); + if (!wq->compls) { + rc = -ENOMEM; + goto fail_alloc_compls; + } + + rc = alloc_descs(wq, num_descs); + if (rc < 0) + goto fail_alloc_descs; + + rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL, + dev_to_node(dev)); + if (rc < 0) + goto fail_sbitmap_init; + + for (i = 0; i < num_descs; i++) { + struct idxd_desc *desc = wq->descs[i]; + + desc->hw = wq->hw_descs[i]; + desc->completion = &wq->compls[i]; + desc->compl_dma = wq->compls_addr + + sizeof(struct dsa_completion_record) * i; + desc->id = i; + desc->wq = wq; + + dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan); + desc->txd.tx_submit = idxd_dma_tx_submit; + } + + return 0; + + fail_sbitmap_init: + free_descs(wq); + fail_alloc_descs: + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + fail_alloc_compls: + free_hw_descs(wq); + return rc; +} + +void idxd_wq_free_resources(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if (wq->type != IDXD_WQT_KERNEL) + return; + + free_hw_descs(wq); + free_descs(wq); + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + sbitmap_free(&wq->sbmap); +} + +int idxd_wq_enable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + + if (wq->state == IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d already enabled\n", wq->id); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_WQ_ENABLED) { + dev_dbg(dev, "WQ enable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_ENABLED; + dev_dbg(dev, "WQ %d enabled\n", wq->id); + return 0; +} + +int idxd_wq_disable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status, operand; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + dev_dbg(dev, "Disabling WQ %d\n", wq->id); + + if (wq->state != IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); + return 0; + } + + operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS) { + dev_dbg(dev, "WQ disable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_DISABLED; + dev_dbg(dev, "WQ %d disabled\n", wq->id); + return 0; +} + +int idxd_wq_map_portal(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + resource_size_t start; + + start = pci_resource_start(pdev, IDXD_WQ_BAR); + start = start + wq->id * IDXD_PORTAL_SIZE; + + wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); + if (!wq->dportal) + return -ENOMEM; + dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal); + + return 0; +} + +void idxd_wq_unmap_portal(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + devm_iounmap(dev, wq->dportal); +} + +/* Device control bits */ +static inline bool idxd_is_enabled(struct idxd_device *idxd) +{ + union gensts_reg gensts; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + + if (gensts.state == IDXD_DEVICE_STATE_ENABLED) + return true; + return false; +} + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout) +{ + u32 sts, to = timeout; + + lockdep_assert_held(&idxd->dev_lock); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + while (sts & IDXD_CMDSTS_ACTIVE && --to) { + cpu_relax(); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + } + + if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) { + dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__); + *status = 0; + return -EBUSY; + } + + *status = sts; + return 0; +} + +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand) +{ + union idxd_command_reg cmd; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd = cmd_code; + cmd.operand = operand; + dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", + __func__, cmd_code, operand); + iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); + + return 0; +} + +int idxd_device_enable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device already enabled\n"); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was enabled */ + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_DEV_ENABLED) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + return -ENXIO; + } + + idxd->state = IDXD_DEV_ENABLED; + return 0; +} + +int idxd_device_disable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (!idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device is not enabled\n"); + return 0; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was disabled */ + if (status != IDXD_CMDSTS_SUCCESS && + !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + rc = -ENXIO; + return rc; + } + + idxd->state = IDXD_DEV_CONF_READY; + return 0; +} + +int __idxd_device_reset(struct idxd_device *idxd) +{ + u32 status; + int rc; + + rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + return 0; +} + +int idxd_device_reset(struct idxd_device *idxd) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = __idxd_device_reset(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + return rc; +} + +/* Device configuration bits */ +static void idxd_group_config_write(struct idxd_group *group) +{ + struct idxd_device *idxd = group->idxd; + struct device *dev = &idxd->pdev->dev; + int i; + u32 grpcfg_offset; + + dev_dbg(dev, "Writing group %d cfg registers\n", group->id); + + /* setup GRPWQCFG */ + for (i = 0; i < 4; i++) { + grpcfg_offset = idxd->grpcfg_offset + + group->id * 64 + i * sizeof(u64); + iowrite64(group->grpcfg.wqs[i], + idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", + group->id, i, grpcfg_offset, + ioread64(idxd->reg_base + grpcfg_offset)); + } + + /* setup GRPENGCFG */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32; + iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, + grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); + + /* setup GRPFLAGS */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40; + iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", + group->id, grpcfg_offset, + ioread32(idxd->reg_base + grpcfg_offset)); +} + +static int idxd_groups_config_write(struct idxd_device *idxd) + +{ + union gencfg_reg reg; + int i; + struct device *dev = &idxd->pdev->dev; + + /* Setup bandwidth token limit */ + if (idxd->token_limit) { + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); + reg.token_limit = idxd->token_limit; + iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); + } + + dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, + ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + idxd_group_config_write(group); + } + + return 0; +} + +static int idxd_wq_config_write(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 wq_offset; + int i; + + if (!wq->group) + return 0; + + memset(&wq->wqcfg, 0, sizeof(union wqcfg)); + + /* byte 0-3 */ + wq->wqcfg.wq_size = wq->size; + + if (wq->size == 0) { + dev_warn(dev, "Incorrect work queue size: 0\n"); + return -EINVAL; + } + + /* bytes 4-7 */ + wq->wqcfg.wq_thresh = wq->threshold; + + /* byte 8-11 */ + wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL); + wq->wqcfg.mode = 1; + + wq->wqcfg.priority = wq->priority; + + /* bytes 12-15 */ + wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift; + wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift; + + dev_dbg(dev, "WQ %d CFGs\n", wq->id); + for (i = 0; i < 8; i++) { + wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); + iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset); + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", + wq->id, i, wq_offset, + ioread32(idxd->reg_base + wq_offset)); + } + + return 0; +} + +static int idxd_wqs_config_write(struct idxd_device *idxd) +{ + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + rc = idxd_wq_config_write(wq); + if (rc < 0) + return rc; + } + + return 0; +} + +static void idxd_group_flags_setup(struct idxd_device *idxd) +{ + int i; + + /* TC-A 0 and TC-B 1 should be defaults */ + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + if (group->tc_a == -1) + group->grpcfg.flags.tc_a = 0; + else + group->grpcfg.flags.tc_a = group->tc_a; + if (group->tc_b == -1) + group->grpcfg.flags.tc_b = 1; + else + group->grpcfg.flags.tc_b = group->tc_b; + group->grpcfg.flags.use_token_limit = group->use_token_limit; + group->grpcfg.flags.tokens_reserved = group->tokens_reserved; + if (group->tokens_allowed) + group->grpcfg.flags.tokens_allowed = + group->tokens_allowed; + else + group->grpcfg.flags.tokens_allowed = idxd->max_tokens; + } +} + +static int idxd_engines_setup(struct idxd_device *idxd) +{ + int i, engines = 0; + struct idxd_engine *eng; + struct idxd_group *group; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + group->grpcfg.engines = 0; + } + + for (i = 0; i < idxd->max_engines; i++) { + eng = &idxd->engines[i]; + group = eng->group; + + if (!group) + continue; + + group->grpcfg.engines |= BIT(eng->id); + engines++; + } + + if (!engines) + return -EINVAL; + + return 0; +} + +static int idxd_wqs_setup(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct idxd_group *group; + int i, j, configured = 0; + struct device *dev = &idxd->pdev->dev; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + for (j = 0; j < 4; j++) + group->grpcfg.wqs[j] = 0; + } + + for (i = 0; i < idxd->max_wqs; i++) { + wq = &idxd->wqs[i]; + group = wq->group; + + if (!wq->group) + continue; + if (!wq->size) + continue; + + if (!wq_dedicated(wq)) { + dev_warn(dev, "No shared workqueue support.\n"); + return -EINVAL; + } + + group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); + configured++; + } + + if (configured == 0) + return -EINVAL; + + return 0; +} + +int idxd_device_config(struct idxd_device *idxd) +{ + int rc; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_wqs_setup(idxd); + if (rc < 0) + return rc; + + rc = idxd_engines_setup(idxd); + if (rc < 0) + return rc; + + idxd_group_flags_setup(idxd); + + rc = idxd_wqs_config_write(idxd); + if (rc < 0) + return rc; + + rc = idxd_groups_config_write(idxd); + if (rc < 0) + return rc; + + return 0; +} diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c new file mode 100644 index 000000000000..c64c1429d160 --- /dev/null +++ b/drivers/dma/idxd/dma.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) +{ + return container_of(c, struct idxd_wq, dma_chan); +} + +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type) +{ + struct dma_async_tx_descriptor *tx; + struct dmaengine_result res; + int complete = 1; + + if (desc->completion->status == DSA_COMP_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (desc->completion->status) + res.result = DMA_TRANS_WRITE_FAILED; + else if (comp_type == IDXD_COMPLETE_ABORT) + res.result = DMA_TRANS_ABORTED; + else + complete = 0; + + tx = &desc->txd; + if (complete && tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + dmaengine_desc_get_callback_invoke(tx, &res); + tx->callback = NULL; + tx->callback_result = NULL; + } +} + +static void op_flag_setup(unsigned long flags, u32 *desc_flags) +{ + *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; + if (flags & DMA_PREP_INTERRUPT) + *desc_flags |= IDXD_OP_FLAG_RCI; +} + +static inline void set_completion_address(struct idxd_desc *desc, + u64 *compl_addr) +{ + *compl_addr = desc->compl_dma; +} + +static inline void idxd_prep_desc_common(struct idxd_wq *wq, + struct dsa_hw_desc *hw, char opcode, + u64 addr_f1, u64 addr_f2, u64 len, + u64 compl, u32 flags) +{ + struct idxd_device *idxd = wq->idxd; + + hw->flags = flags; + hw->opcode = opcode; + hw->src_addr = addr_f1; + hw->dst_addr = addr_f2; + hw->xfer_size = len; + hw->priv = !!(wq->type == IDXD_WQT_KERNEL); + hw->completion_addr = compl; + + /* + * Descriptor completion vectors are 1-8 for MSIX. We will round + * robin through the 8 vectors. + */ + wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; + hw->int_handle = wq->vec_ptr; +} + +static struct dma_async_tx_descriptor * +idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_device *idxd = wq->idxd; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + if (len > idxd->max_xfer_bytes) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE, + dma_src, dma_dest, len, desc->compl_dma, + desc_flags); + + desc->txd.flags = flags; + + return &desc->txd; +} + +static int idxd_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_get(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); + return 0; +} + +static void idxd_dma_free_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_put(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); +} + +static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dma_chan, cookie, txstate); +} + +/* + * issue_pending() does not need to do anything since tx_submit() does the job + * already. + */ +static void idxd_dma_issue_pending(struct dma_chan *dma_chan) +{ +} + +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *c = tx->chan; + struct idxd_wq *wq = to_idxd_wq(c); + dma_cookie_t cookie; + int rc; + struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd); + + cookie = dma_cookie_assign(tx); + + rc = idxd_submit_desc(wq, desc); + if (rc < 0) { + idxd_free_desc(wq, desc); + return rc; + } + + return cookie; +} + +static void idxd_dma_release(struct dma_device *device) +{ +} + +int idxd_register_dma_device(struct idxd_device *idxd) +{ + struct dma_device *dma = &idxd->dma_dev; + + INIT_LIST_HEAD(&dma->channels); + dma->dev = &idxd->pdev->dev; + + dma->device_release = idxd_dma_release; + + if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; + } + + dma->device_tx_status = idxd_dma_tx_status; + dma->device_issue_pending = idxd_dma_issue_pending; + dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; + dma->device_free_chan_resources = idxd_dma_free_chan_resources; + + return dma_async_device_register(&idxd->dma_dev); +} + +void idxd_unregister_dma_device(struct idxd_device *idxd) +{ + dma_async_device_unregister(&idxd->dma_dev); +} + +int idxd_register_dma_channel(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct dma_device *dma = &idxd->dma_dev; + struct dma_chan *chan = &wq->dma_chan; + int rc; + + memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); + chan->device = dma; + list_add_tail(&chan->device_node, &dma->channels); + rc = dma_async_device_channel_register(dma, chan); + if (rc < 0) + return rc; + + return 0; +} + +void idxd_unregister_dma_channel(struct idxd_wq *wq) +{ + dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan); +} diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h new file mode 100644 index 000000000000..b8f8a363b4a7 --- /dev/null +++ b/drivers/dma/idxd/idxd.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_H_ +#define _IDXD_H_ + +#include <linux/sbitmap.h> +#include <linux/dmaengine.h> +#include <linux/percpu-rwsem.h> +#include <linux/wait.h> +#include <linux/cdev.h> +#include "registers.h" + +#define IDXD_DRIVER_VERSION "1.00" + +extern struct kmem_cache *idxd_desc_pool; + +#define IDXD_REG_TIMEOUT 50 +#define IDXD_DRAIN_TIMEOUT 5000 + +enum idxd_type { + IDXD_TYPE_UNKNOWN = -1, + IDXD_TYPE_DSA = 0, + IDXD_TYPE_MAX +}; + +#define IDXD_NAME_SIZE 128 + +struct idxd_device_driver { + struct device_driver drv; +}; + +struct idxd_irq_entry { + struct idxd_device *idxd; + int id; + struct llist_head pending_llist; + struct list_head work_list; +}; + +struct idxd_group { + struct device conf_dev; + struct idxd_device *idxd; + struct grpcfg grpcfg; + int id; + int num_engines; + int num_wqs; + bool use_token_limit; + u8 tokens_allowed; + u8 tokens_reserved; + int tc_a; + int tc_b; +}; + +#define IDXD_MAX_PRIORITY 0xf + +enum idxd_wq_state { + IDXD_WQ_DISABLED = 0, + IDXD_WQ_ENABLED, +}; + +enum idxd_wq_flag { + WQ_FLAG_DEDICATED = 0, +}; + +enum idxd_wq_type { + IDXD_WQT_NONE = 0, + IDXD_WQT_KERNEL, + IDXD_WQT_USER, +}; + +struct idxd_cdev { + struct cdev cdev; + struct device *dev; + int minor; + struct wait_queue_head err_queue; +}; + +#define IDXD_ALLOCATED_BATCH_SIZE 128U +#define WQ_NAME_SIZE 1024 +#define WQ_TYPE_SIZE 10 + +enum idxd_op_type { + IDXD_OP_BLOCK = 0, + IDXD_OP_NONBLOCK = 1, +}; + +enum idxd_complete_type { + IDXD_COMPLETE_NORMAL = 0, + IDXD_COMPLETE_ABORT, +}; + +struct idxd_wq { + void __iomem *dportal; + struct device conf_dev; + struct idxd_cdev idxd_cdev; + struct idxd_device *idxd; + int id; + enum idxd_wq_type type; + struct idxd_group *group; + int client_count; + struct mutex wq_lock; /* mutex for workqueue */ + u32 size; + u32 threshold; + u32 priority; + enum idxd_wq_state state; + unsigned long flags; + union wqcfg wqcfg; + atomic_t dq_count; /* dedicated queue flow control */ + u32 vec_ptr; /* interrupt steering */ + struct dsa_hw_desc **hw_descs; + int num_descs; + struct dsa_completion_record *compls; + dma_addr_t compls_addr; + int compls_size; + struct idxd_desc **descs; + struct sbitmap sbmap; + struct dma_chan dma_chan; + struct percpu_rw_semaphore submit_lock; + wait_queue_head_t submit_waitq; + char name[WQ_NAME_SIZE + 1]; +}; + +struct idxd_engine { + struct device conf_dev; + int id; + struct idxd_group *group; + struct idxd_device *idxd; +}; + +/* shadow registers */ +struct idxd_hw { + u32 version; + union gen_cap_reg gen_cap; + union wq_cap_reg wq_cap; + union group_cap_reg group_cap; + union engine_cap_reg engine_cap; + struct opcap opcap; +}; + +enum idxd_device_state { + IDXD_DEV_HALTED = -1, + IDXD_DEV_DISABLED = 0, + IDXD_DEV_CONF_READY, + IDXD_DEV_ENABLED, +}; + +enum idxd_device_flag { + IDXD_FLAG_CONFIGURABLE = 0, +}; + +struct idxd_device { + enum idxd_type type; + struct device conf_dev; + struct list_head list; + struct idxd_hw hw; + enum idxd_device_state state; + unsigned long flags; + int id; + int major; + + struct pci_dev *pdev; + void __iomem *reg_base; + + spinlock_t dev_lock; /* spinlock for device */ + struct idxd_group *groups; + struct idxd_wq *wqs; + struct idxd_engine *engines; + + int num_groups; + + u32 msix_perm_offset; + u32 wqcfg_offset; + u32 grpcfg_offset; + u32 perfmon_offset; + + u64 max_xfer_bytes; + u32 max_batch_size; + int max_groups; + int max_engines; + int max_tokens; + int max_wqs; + int max_wq_size; + int token_limit; + int nr_tokens; /* non-reserved tokens */ + + union sw_err_reg sw_err; + + struct msix_entry *msix_entries; + int num_wq_irqs; + struct idxd_irq_entry *irq_entries; + + struct dma_device dma_dev; +}; + +/* IDXD software descriptor */ +struct idxd_desc { + struct dsa_hw_desc *hw; + dma_addr_t desc_dma; + struct dsa_completion_record *completion; + dma_addr_t compl_dma; + struct dma_async_tx_descriptor txd; + struct llist_node llnode; + struct list_head list; + int id; + struct idxd_wq *wq; +}; + +#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) +#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) + +extern struct bus_type dsa_bus_type; + +static inline bool wq_dedicated(struct idxd_wq *wq) +{ + return test_bit(WQ_FLAG_DEDICATED, &wq->flags); +} + +enum idxd_portal_prot { + IDXD_PORTAL_UNLIMITED = 0, + IDXD_PORTAL_LIMITED, +}; + +static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) +{ + return prot * 0x1000; +} + +static inline int idxd_get_wq_portal_full_offset(int wq_id, + enum idxd_portal_prot prot) +{ + return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); +} + +static inline void idxd_set_type(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + + if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0) + idxd->type = IDXD_TYPE_DSA; + else + idxd->type = IDXD_TYPE_UNKNOWN; +} + +static inline void idxd_wq_get(struct idxd_wq *wq) +{ + wq->client_count++; +} + +static inline void idxd_wq_put(struct idxd_wq *wq) +{ + wq->client_count--; +} + +static inline int idxd_wq_refcount(struct idxd_wq *wq) +{ + return wq->client_count; +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd); +int idxd_register_bus_type(void); +void idxd_unregister_bus_type(void); +int idxd_setup_sysfs(struct idxd_device *idxd); +void idxd_cleanup_sysfs(struct idxd_device *idxd); +int idxd_register_driver(void); +void idxd_unregister_driver(void); +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd); + +/* device interrupt control */ +irqreturn_t idxd_irq_handler(int vec, void *data); +irqreturn_t idxd_misc_thread(int vec, void *data); +irqreturn_t idxd_wq_thread(int irq, void *data); +void idxd_mask_error_interrupts(struct idxd_device *idxd); +void idxd_unmask_error_interrupts(struct idxd_device *idxd); +void idxd_mask_msix_vectors(struct idxd_device *idxd); +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); + +/* device control */ +int idxd_device_enable(struct idxd_device *idxd); +int idxd_device_disable(struct idxd_device *idxd); +int idxd_device_reset(struct idxd_device *idxd); +int __idxd_device_reset(struct idxd_device *idxd); +void idxd_device_cleanup(struct idxd_device *idxd); +int idxd_device_config(struct idxd_device *idxd); +void idxd_device_wqs_clear_state(struct idxd_device *idxd); + +/* work queue control */ +int idxd_wq_alloc_resources(struct idxd_wq *wq); +void idxd_wq_free_resources(struct idxd_wq *wq); +int idxd_wq_enable(struct idxd_wq *wq); +int idxd_wq_disable(struct idxd_wq *wq); +int idxd_wq_map_portal(struct idxd_wq *wq); +void idxd_wq_unmap_portal(struct idxd_wq *wq); + +/* submission */ +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); + +/* dmaengine */ +int idxd_register_dma_device(struct idxd_device *idxd); +void idxd_unregister_dma_device(struct idxd_device *idxd); +int idxd_register_dma_channel(struct idxd_wq *wq); +void idxd_unregister_dma_channel(struct idxd_wq *wq); +void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type); +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx); + +/* cdev */ +int idxd_cdev_register(void); +void idxd_cdev_remove(void); +int idxd_cdev_get_major(struct idxd_device *idxd); +int idxd_wq_add_cdev(struct idxd_wq *wq); +void idxd_wq_del_cdev(struct idxd_wq *wq); + +#endif diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c new file mode 100644 index 000000000000..7778c05deb5d --- /dev/null +++ b/drivers/dma/idxd/init.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/aer.h> +#include <linux/fs.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/device.h> +#include <linux/idr.h> +#include <uapi/linux/idxd.h> +#include <linux/dmaengine.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +MODULE_VERSION(IDXD_DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); + +#define DRV_NAME "idxd" + +static struct idr idxd_idrs[IDXD_TYPE_MAX]; +static struct mutex idxd_idr_lock; + +static struct pci_device_id idxd_pci_tbl[] = { + /* DSA ver 1.0 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); + +static char *idxd_name[] = { + "dsa", +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd) +{ + return idxd_name[idxd->type]; +} + +static int idxd_setup_interrupts(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + struct idxd_irq_entry *irq_entry; + int i, msixcnt; + int rc = 0; + + msixcnt = pci_msix_vec_count(pdev); + if (msixcnt < 0) { + dev_err(dev, "Not MSI-X interrupt capable.\n"); + goto err_no_irq; + } + + idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) * + msixcnt, GFP_KERNEL); + if (!idxd->msix_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) + idxd->msix_entries[i].entry = i; + + rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt); + if (rc) { + dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt); + goto err_no_irq; + } + dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); + + /* + * We implement 1 completion list per MSI-X entry except for + * entry 0, which is for errors and others. + */ + idxd->irq_entries = devm_kcalloc(dev, msixcnt, + sizeof(struct idxd_irq_entry), + GFP_KERNEL); + if (!idxd->irq_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) { + idxd->irq_entries[i].id = i; + idxd->irq_entries[i].idxd = idxd; + } + + msix = &idxd->msix_entries[0]; + irq_entry = &idxd->irq_entries[0]; + rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler, + idxd_misc_thread, 0, "idxd-misc", + irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate misc interrupt.\n"); + goto err_no_irq; + } + + dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", + msix->vector); + + /* first MSI-X entry is not for wq interrupts */ + idxd->num_wq_irqs = msixcnt - 1; + + for (i = 1; i < msixcnt; i++) { + msix = &idxd->msix_entries[i]; + irq_entry = &idxd->irq_entries[i]; + + init_llist_head(&idxd->irq_entries[i].pending_llist); + INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); + rc = devm_request_threaded_irq(dev, msix->vector, + idxd_irq_handler, + idxd_wq_thread, 0, + "idxd-portal", irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate irq %d.\n", + msix->vector); + goto err_no_irq; + } + dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", + i, msix->vector); + } + + idxd_unmask_error_interrupts(idxd); + + return 0; + + err_no_irq: + /* Disable error interrupt generation */ + idxd_mask_error_interrupts(idxd); + pci_disable_msix(pdev); + dev_err(dev, "No usable interrupts\n"); + return rc; +} + +static void idxd_wqs_free_lock(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + percpu_free_rwsem(&wq->submit_lock); + } +} + +static int idxd_setup_internals(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + idxd->groups = devm_kcalloc(dev, idxd->max_groups, + sizeof(struct idxd_group), GFP_KERNEL); + if (!idxd->groups) + return -ENOMEM; + + for (i = 0; i < idxd->max_groups; i++) { + idxd->groups[i].idxd = idxd; + idxd->groups[i].id = i; + idxd->groups[i].tc_a = -1; + idxd->groups[i].tc_b = -1; + } + + idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq), + GFP_KERNEL); + if (!idxd->wqs) + return -ENOMEM; + + idxd->engines = devm_kcalloc(dev, idxd->max_engines, + sizeof(struct idxd_engine), GFP_KERNEL); + if (!idxd->engines) + return -ENOMEM; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + int rc; + + wq->id = i; + wq->idxd = idxd; + mutex_init(&wq->wq_lock); + atomic_set(&wq->dq_count, 0); + init_waitqueue_head(&wq->submit_waitq); + wq->idxd_cdev.minor = -1; + rc = percpu_init_rwsem(&wq->submit_lock); + if (rc < 0) { + idxd_wqs_free_lock(idxd); + return rc; + } + } + + for (i = 0; i < idxd->max_engines; i++) { + idxd->engines[i].idxd = idxd; + idxd->engines[i].id = i; + } + + return 0; +} + +static void idxd_read_table_offsets(struct idxd_device *idxd) +{ + union offsets_reg offsets; + struct device *dev = &idxd->pdev->dev; + + offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); + offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + + sizeof(u64)); + idxd->grpcfg_offset = offsets.grpcfg * 0x100; + dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); + idxd->wqcfg_offset = offsets.wqcfg * 0x100; + dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", + idxd->wqcfg_offset); + idxd->msix_perm_offset = offsets.msix_perm * 0x100; + dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", + idxd->msix_perm_offset); + idxd->perfmon_offset = offsets.perfmon * 0x100; + dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); +} + +static void idxd_read_caps(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + /* reading generic capabilities */ + idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); + dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); + idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; + dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); + idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; + dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); + if (idxd->hw.gen_cap.config_en) + set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); + + /* reading group capabilities */ + idxd->hw.group_cap.bits = + ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); + dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); + idxd->max_groups = idxd->hw.group_cap.num_groups; + dev_dbg(dev, "max groups: %u\n", idxd->max_groups); + idxd->max_tokens = idxd->hw.group_cap.total_tokens; + dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); + idxd->nr_tokens = idxd->max_tokens; + + /* read engine capabilities */ + idxd->hw.engine_cap.bits = + ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); + dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); + idxd->max_engines = idxd->hw.engine_cap.num_engines; + dev_dbg(dev, "max engines: %u\n", idxd->max_engines); + + /* read workqueue capabilities */ + idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); + dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); + idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; + dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); + idxd->max_wqs = idxd->hw.wq_cap.num_wqs; + dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); + + /* reading operation capabilities */ + for (i = 0; i < 4; i++) { + idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + + IDXD_OPCAP_OFFSET + i * sizeof(u64)); + dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); + } +} + +static struct idxd_device *idxd_alloc(struct pci_dev *pdev, + void __iomem * const *iomap) +{ + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + + idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL); + if (!idxd) + return NULL; + + idxd->pdev = pdev; + idxd->reg_base = iomap[IDXD_MMIO_BAR]; + spin_lock_init(&idxd->dev_lock); + + return idxd; +} + +static int idxd_probe(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + int rc; + + dev_dbg(dev, "%s entered and resetting device\n", __func__); + rc = idxd_device_reset(idxd); + if (rc < 0) + return rc; + dev_dbg(dev, "IDXD reset complete\n"); + + idxd_read_caps(idxd); + idxd_read_table_offsets(idxd); + + rc = idxd_setup_internals(idxd); + if (rc) + goto err_setup; + + rc = idxd_setup_interrupts(idxd); + if (rc) + goto err_setup; + + dev_dbg(dev, "IDXD interrupt setup complete.\n"); + + mutex_lock(&idxd_idr_lock); + idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL); + mutex_unlock(&idxd_idr_lock); + if (idxd->id < 0) { + rc = -ENOMEM; + goto err_idr_fail; + } + + idxd->major = idxd_cdev_get_major(idxd); + + dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); + return 0; + + err_idr_fail: + idxd_mask_error_interrupts(idxd); + idxd_mask_msix_vectors(idxd); + err_setup: + return rc; +} + +static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + int rc; + unsigned int mask; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + dev_dbg(dev, "Mapping BARs\n"); + mask = (1 << IDXD_MMIO_BAR); + rc = pcim_iomap_regions(pdev, mask, DRV_NAME); + if (rc) + return rc; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + dev_dbg(dev, "Set DMA masks\n"); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev, iomap); + if (!idxd) + return -ENOMEM; + + idxd_set_type(idxd); + + dev_dbg(dev, "Set PCI master\n"); + pci_set_master(pdev); + pci_set_drvdata(pdev, idxd); + + idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); + rc = idxd_probe(idxd); + if (rc) { + dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); + return -ENODEV; + } + + rc = idxd_setup_sysfs(idxd); + if (rc) { + dev_err(dev, "IDXD sysfs setup failed\n"); + return -ENODEV; + } + + idxd->state = IDXD_DEV_CONF_READY; + + dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", + idxd->hw.version); + + return 0; +} + +static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *itr; + struct llist_node *head; + + head = llist_del_all(&ie->pending_llist); + if (!head) + return; + + llist_for_each_entry_safe(desc, itr, head, llnode) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_flush_work_list(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *iter; + + list_for_each_entry_safe(desc, iter, &ie->work_list, list) { + list_del(&desc->list); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_shutdown(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + int rc, i; + struct idxd_irq_entry *irq_entry; + int msixcnt = pci_msix_vec_count(pdev); + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + if (rc) + dev_err(&pdev->dev, "Disabling device failed\n"); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_mask_msix_vectors(idxd); + idxd_mask_error_interrupts(idxd); + + for (i = 0; i < msixcnt; i++) { + irq_entry = &idxd->irq_entries[i]; + synchronize_irq(idxd->msix_entries[i].vector); + if (i == 0) + continue; + idxd_flush_pending_llist(irq_entry); + idxd_flush_work_list(irq_entry); + } +} + +static void idxd_remove(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_cleanup_sysfs(idxd); + idxd_shutdown(pdev); + idxd_wqs_free_lock(idxd); + mutex_lock(&idxd_idr_lock); + idr_remove(&idxd_idrs[idxd->type], idxd->id); + mutex_unlock(&idxd_idr_lock); +} + +static struct pci_driver idxd_pci_driver = { + .name = DRV_NAME, + .id_table = idxd_pci_tbl, + .probe = idxd_pci_probe, + .remove = idxd_remove, + .shutdown = idxd_shutdown, +}; + +static int __init idxd_init_module(void) +{ + int err, i; + + /* + * If the CPU does not support write512, there's no point in + * enumerating the device. We can not utilize it. + */ + if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { + pr_warn("idxd driver failed to load without MOVDIR64B.\n"); + return -ENODEV; + } + + pr_info("%s: Intel(R) Accelerator Devices Driver %s\n", + DRV_NAME, IDXD_DRIVER_VERSION); + + mutex_init(&idxd_idr_lock); + for (i = 0; i < IDXD_TYPE_MAX; i++) + idr_init(&idxd_idrs[i]); + + err = idxd_register_bus_type(); + if (err < 0) + return err; + + err = idxd_register_driver(); + if (err < 0) + goto err_idxd_driver_register; + + err = idxd_cdev_register(); + if (err) + goto err_cdev_register; + + err = pci_register_driver(&idxd_pci_driver); + if (err) + goto err_pci_register; + + return 0; + +err_pci_register: + idxd_cdev_remove(); +err_cdev_register: + idxd_unregister_driver(); +err_idxd_driver_register: + idxd_unregister_bus_type(); + return err; +} +module_init(idxd_init_module); + +static void __exit idxd_exit_module(void) +{ + pci_unregister_driver(&idxd_pci_driver); + idxd_cdev_remove(); + idxd_unregister_bus_type(); +} +module_exit(idxd_exit_module); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c new file mode 100644 index 000000000000..d6fcd2e60103 --- /dev/null +++ b/drivers/dma/idxd/irq.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +void idxd_device_wqs_clear_state(struct idxd_device *idxd) +{ + int i; + + lockdep_assert_held(&idxd->dev_lock); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->state = IDXD_WQ_DISABLED; + } +} + +static int idxd_restart(struct idxd_device *idxd) +{ + int i, rc; + + lockdep_assert_held(&idxd->dev_lock); + + rc = __idxd_device_reset(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_config(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_enable(idxd); + if (rc < 0) + goto out; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_ENABLED) { + rc = idxd_wq_enable(wq); + if (rc < 0) { + dev_warn(&idxd->pdev->dev, + "Unable to re-enable wq %s\n", + dev_name(&wq->conf_dev)); + } + } + } + + return 0; + + out: + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + return rc; +} + +irqreturn_t idxd_irq_handler(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + + idxd_mask_msix_vector(idxd, irq_entry->id); + return IRQ_WAKE_THREAD; +} + +irqreturn_t idxd_misc_thread(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + struct device *dev = &idxd->pdev->dev; + union gensts_reg gensts; + u32 cause, val = 0; + int i, rc; + bool err = false; + + cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); + + if (cause & IDXD_INTC_ERR) { + spin_lock_bh(&idxd->dev_lock); + for (i = 0; i < 4; i++) + idxd->sw_err.bits[i] = ioread64(idxd->reg_base + + IDXD_SWERR_OFFSET + i * sizeof(u64)); + iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); + + if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { + int id = idxd->sw_err.wq_idx; + struct idxd_wq *wq = &idxd->wqs[id]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } else { + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } + } + + spin_unlock_bh(&idxd->dev_lock); + val |= IDXD_INTC_ERR; + + for (i = 0; i < 4; i++) + dev_warn(dev, "err[%d]: %#16.16llx\n", + i, idxd->sw_err.bits[i]); + err = true; + } + + if (cause & IDXD_INTC_CMD) { + /* Driver does use command interrupts */ + val |= IDXD_INTC_CMD; + } + + if (cause & IDXD_INTC_OCCUPY) { + /* Driver does not utilize occupancy interrupt */ + val |= IDXD_INTC_OCCUPY; + } + + if (cause & IDXD_INTC_PERFMON_OVFL) { + /* + * Driver does not utilize perfmon counter overflow interrupt + * yet. + */ + val |= IDXD_INTC_PERFMON_OVFL; + } + + val ^= cause; + if (val) + dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", + val); + + iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); + if (!err) + return IRQ_HANDLED; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + if (gensts.state == IDXD_DEVICE_STATE_HALT) { + spin_lock_bh(&idxd->dev_lock); + if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { + rc = idxd_restart(idxd); + if (rc < 0) + dev_err(&idxd->pdev->dev, + "idxd restart failed, device halt."); + } else { + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + dev_err(&idxd->pdev->dev, + "idxd halted, need %s.\n", + gensts.reset_type == IDXD_DEVICE_RESET_FLR ? + "FLR" : "system reset"); + } + spin_unlock_bh(&idxd->dev_lock); + } + + idxd_unmask_msix_vector(idxd, irq_entry->id); + return IRQ_HANDLED; +} + +static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct idxd_desc *desc, *t; + struct llist_node *head; + int queued = 0; + + head = llist_del_all(&irq_entry->pending_llist); + if (!head) + return 0; + + llist_for_each_entry_safe(desc, t, head, llnode) { + if (desc->completion->status) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + list_add_tail(&desc->list, &irq_entry->work_list); + queued++; + } + } + + return queued; +} + +static int irq_process_work_list(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct list_head *node, *next; + int queued = 0; + + if (list_empty(&irq_entry->work_list)) + return 0; + + list_for_each_safe(node, next, &irq_entry->work_list) { + struct idxd_desc *desc = + container_of(node, struct idxd_desc, list); + + if (desc->completion->status) { + list_del(&desc->list); + /* process and callback */ + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + queued++; + } + } + + return queued; +} + +irqreturn_t idxd_wq_thread(int irq, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + int rc, processed = 0, retry = 0; + + /* + * There are two lists we are processing. The pending_llist is where + * submmiter adds all the submitted descriptor after sending it to + * the workqueue. It's a lockless singly linked list. The work_list + * is the common linux double linked list. We are in a scenario of + * multiple producers and a single consumer. The producers are all + * the kernel submitters of descriptors, and the consumer is the + * kernel irq handler thread for the msix vector when using threaded + * irq. To work with the restrictions of llist to remain lockless, + * we are doing the following steps: + * 1. Iterate through the work_list and process any completed + * descriptor. Delete the completed entries during iteration. + * 2. llist_del_all() from the pending list. + * 3. Iterate through the llist that was deleted from the pending list + * and process the completed entries. + * 4. If the entry is still waiting on hardware, list_add_tail() to + * the work_list. + * 5. Repeat until no more descriptors. + */ + do { + rc = irq_process_work_list(irq_entry, &processed); + if (rc != 0) { + retry++; + continue; + } + + rc = irq_process_pending_llist(irq_entry, &processed); + } while (rc != 0 && retry != 10); + + idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); + + if (processed == 0) + return IRQ_NONE; + + return IRQ_HANDLED; +} diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h new file mode 100644 index 000000000000..a39e7ae6b3d9 --- /dev/null +++ b/drivers/dma/idxd/registers.h @@ -0,0 +1,336 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_REGISTERS_H_ +#define _IDXD_REGISTERS_H_ + +/* PCI Config */ +#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 + +#define IDXD_MMIO_BAR 0 +#define IDXD_WQ_BAR 2 +#define IDXD_PORTAL_SIZE 0x4000 + +/* MMIO Device BAR0 Registers */ +#define IDXD_VER_OFFSET 0x00 +#define IDXD_VER_MAJOR_MASK 0xf0 +#define IDXD_VER_MINOR_MASK 0x0f +#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4) +#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK) + +union gen_cap_reg { + struct { + u64 block_on_fault:1; + u64 overlap_copy:1; + u64 cache_control_mem:1; + u64 cache_control_cache:1; + u64 rsvd:3; + u64 int_handle_req:1; + u64 dest_readback:1; + u64 drain_readback:1; + u64 rsvd2:6; + u64 max_xfer_shift:5; + u64 max_batch_shift:4; + u64 max_ims_mult:6; + u64 config_en:1; + u64 max_descs_per_engine:8; + u64 rsvd3:24; + }; + u64 bits; +} __packed; +#define IDXD_GENCAP_OFFSET 0x10 + +union wq_cap_reg { + struct { + u64 total_wq_size:16; + u64 num_wqs:8; + u64 rsvd:24; + u64 shared_mode:1; + u64 dedicated_mode:1; + u64 rsvd2:1; + u64 priority:1; + u64 occupancy:1; + u64 occupancy_int:1; + u64 rsvd3:10; + }; + u64 bits; +} __packed; +#define IDXD_WQCAP_OFFSET 0x20 + +union group_cap_reg { + struct { + u64 num_groups:8; + u64 total_tokens:8; + u64 token_en:1; + u64 token_limit:1; + u64 rsvd:46; + }; + u64 bits; +} __packed; +#define IDXD_GRPCAP_OFFSET 0x30 + +union engine_cap_reg { + struct { + u64 num_engines:8; + u64 rsvd:56; + }; + u64 bits; +} __packed; + +#define IDXD_ENGCAP_OFFSET 0x38 + +#define IDXD_OPCAP_NOOP 0x0001 +#define IDXD_OPCAP_BATCH 0x0002 +#define IDXD_OPCAP_MEMMOVE 0x0008 +struct opcap { + u64 bits[4]; +}; + +#define IDXD_OPCAP_OFFSET 0x40 + +#define IDXD_TABLE_OFFSET 0x60 +union offsets_reg { + struct { + u64 grpcfg:16; + u64 wqcfg:16; + u64 msix_perm:16; + u64 ims:16; + u64 perfmon:16; + u64 rsvd:48; + }; + u64 bits[2]; +} __packed; + +#define IDXD_GENCFG_OFFSET 0x80 +union gencfg_reg { + struct { + u32 token_limit:8; + u32 rsvd:4; + u32 user_int_en:1; + u32 rsvd2:19; + }; + u32 bits; +} __packed; + +#define IDXD_GENCTRL_OFFSET 0x88 +union genctrl_reg { + struct { + u32 softerr_int_en:1; + u32 rsvd:31; + }; + u32 bits; +} __packed; + +#define IDXD_GENSTATS_OFFSET 0x90 +union gensts_reg { + struct { + u32 state:2; + u32 reset_type:2; + u32 rsvd:28; + }; + u32 bits; +} __packed; + +enum idxd_device_status_state { + IDXD_DEVICE_STATE_DISABLED = 0, + IDXD_DEVICE_STATE_ENABLED, + IDXD_DEVICE_STATE_DRAIN, + IDXD_DEVICE_STATE_HALT, +}; + +enum idxd_device_reset_type { + IDXD_DEVICE_RESET_SOFTWARE = 0, + IDXD_DEVICE_RESET_FLR, + IDXD_DEVICE_RESET_WARM, + IDXD_DEVICE_RESET_COLD, +}; + +#define IDXD_INTCAUSE_OFFSET 0x98 +#define IDXD_INTC_ERR 0x01 +#define IDXD_INTC_CMD 0x02 +#define IDXD_INTC_OCCUPY 0x04 +#define IDXD_INTC_PERFMON_OVFL 0x08 + +#define IDXD_CMD_OFFSET 0xa0 +union idxd_command_reg { + struct { + u32 operand:20; + u32 cmd:5; + u32 rsvd:6; + u32 int_req:1; + }; + u32 bits; +} __packed; + +enum idxd_cmd { + IDXD_CMD_ENABLE_DEVICE = 1, + IDXD_CMD_DISABLE_DEVICE, + IDXD_CMD_DRAIN_ALL, + IDXD_CMD_ABORT_ALL, + IDXD_CMD_RESET_DEVICE, + IDXD_CMD_ENABLE_WQ, + IDXD_CMD_DISABLE_WQ, + IDXD_CMD_DRAIN_WQ, + IDXD_CMD_ABORT_WQ, + IDXD_CMD_RESET_WQ, + IDXD_CMD_DRAIN_PASID, + IDXD_CMD_ABORT_PASID, + IDXD_CMD_REQUEST_INT_HANDLE, +}; + +#define IDXD_CMDSTS_OFFSET 0xa8 +union cmdsts_reg { + struct { + u8 err; + u16 result; + u8 rsvd:7; + u8 active:1; + }; + u32 bits; +} __packed; +#define IDXD_CMDSTS_ACTIVE 0x80000000 + +enum idxd_cmdsts_err { + IDXD_CMDSTS_SUCCESS = 0, + IDXD_CMDSTS_INVAL_CMD, + IDXD_CMDSTS_INVAL_WQIDX, + IDXD_CMDSTS_HW_ERR, + /* enable device errors */ + IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10, + IDXD_CMDSTS_ERR_CONFIG, + IDXD_CMDSTS_ERR_BUSMASTER_EN, + IDXD_CMDSTS_ERR_PASID_INVAL, + IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE, + IDXD_CMDSTS_ERR_GRP_CONFIG, + IDXD_CMDSTS_ERR_GRP_CONFIG2, + IDXD_CMDSTS_ERR_GRP_CONFIG3, + IDXD_CMDSTS_ERR_GRP_CONFIG4, + /* enable wq errors */ + IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20, + IDXD_CMDSTS_ERR_WQ_ENABLED, + IDXD_CMDSTS_ERR_WQ_SIZE, + IDXD_CMDSTS_ERR_WQ_PRIOR, + IDXD_CMDSTS_ERR_WQ_MODE, + IDXD_CMDSTS_ERR_BOF_EN, + IDXD_CMDSTS_ERR_PASID_EN, + IDXD_CMDSTS_ERR_MAX_BATCH_SIZE, + IDXD_CMDSTS_ERR_MAX_XFER_SIZE, + /* disable device errors */ + IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31, + /* disable WQ, drain WQ, abort WQ, reset WQ */ + IDXD_CMDSTS_ERR_DEV_NOT_EN, + /* request interrupt handle */ + IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41, + IDXD_CMDSTS_ERR_NO_HANDLE, +}; + +#define IDXD_SWERR_OFFSET 0xc0 +#define IDXD_SWERR_VALID 0x00000001 +#define IDXD_SWERR_OVERFLOW 0x00000002 +#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW) +union sw_err_reg { + struct { + u64 valid:1; + u64 overflow:1; + u64 desc_valid:1; + u64 wq_idx_valid:1; + u64 batch:1; + u64 fault_rw:1; + u64 priv:1; + u64 rsvd:1; + u64 error:8; + u64 wq_idx:8; + u64 rsvd2:8; + u64 operation:8; + u64 pasid:20; + u64 rsvd3:4; + + u64 batch_idx:16; + u64 rsvd4:16; + u64 invalid_flags:32; + + u64 fault_addr; + + u64 rsvd5; + }; + u64 bits[4]; +} __packed; + +union msix_perm { + struct { + u32 rsvd:2; + u32 ignore:1; + u32 pasid_en:1; + u32 rsvd2:8; + u32 pasid:20; + }; + u32 bits; +} __packed; + +union group_flags { + struct { + u32 tc_a:3; + u32 tc_b:3; + u32 rsvd:1; + u32 use_token_limit:1; + u32 tokens_reserved:8; + u32 rsvd2:4; + u32 tokens_allowed:8; + u32 rsvd3:4; + }; + u32 bits; +} __packed; + +struct grpcfg { + u64 wqs[4]; + u64 engines; + union group_flags flags; +} __packed; + +union wqcfg { + struct { + /* bytes 0-3 */ + u16 wq_size; + u16 rsvd; + + /* bytes 4-7 */ + u16 wq_thresh; + u16 rsvd1; + + /* bytes 8-11 */ + u32 mode:1; /* shared or dedicated */ + u32 bof:1; /* block on fault */ + u32 rsvd2:2; + u32 priority:4; + u32 pasid:20; + u32 pasid_en:1; + u32 priv:1; + u32 rsvd3:2; + + /* bytes 12-15 */ + u32 max_xfer_shift:5; + u32 max_batch_shift:4; + u32 rsvd4:23; + + /* bytes 16-19 */ + u16 occupancy_inth; + u16 occupancy_table_sel:1; + u16 rsvd5:15; + + /* bytes 20-23 */ + u16 occupancy_limit; + u16 occupancy_int_en:1; + u16 rsvd6:15; + + /* bytes 24-27 */ + u16 occupancy; + u16 occupancy_int:1; + u16 rsvd7:12; + u16 mode_support:1; + u16 wq_state:2; + + /* bytes 28-31 */ + u32 rsvd8; + }; + u32 bits[8]; +} __packed; +#endif diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c new file mode 100644 index 000000000000..45a0c5869a0a --- /dev/null +++ b/drivers/dma/idxd/submit.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <uapi/linux/idxd.h> +#include "idxd.h" +#include "registers.h" + +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) +{ + struct idxd_desc *desc; + int idx; + struct idxd_device *idxd = wq->idxd; + + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + + if (optype == IDXD_OP_BLOCK) + percpu_down_read(&wq->submit_lock); + else if (!percpu_down_read_trylock(&wq->submit_lock)) + return ERR_PTR(-EBUSY); + + if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) { + int rc; + + if (optype == IDXD_OP_NONBLOCK) { + percpu_up_read(&wq->submit_lock); + return ERR_PTR(-EAGAIN); + } + + percpu_up_read(&wq->submit_lock); + percpu_down_write(&wq->submit_lock); + rc = wait_event_interruptible(wq->submit_waitq, + atomic_add_unless(&wq->dq_count, + 1, wq->size) || + idxd->state != IDXD_DEV_ENABLED); + percpu_up_write(&wq->submit_lock); + if (rc < 0) + return ERR_PTR(-EINTR); + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + } else { + percpu_up_read(&wq->submit_lock); + } + + idx = sbitmap_get(&wq->sbmap, 0, false); + if (idx < 0) { + atomic_dec(&wq->dq_count); + return ERR_PTR(-EAGAIN); + } + + desc = wq->descs[idx]; + memset(desc->hw, 0, sizeof(struct dsa_hw_desc)); + memset(desc->completion, 0, sizeof(struct dsa_completion_record)); + return desc; +} + +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + atomic_dec(&wq->dq_count); + + sbitmap_clear_bit(&wq->sbmap, desc->id); + wake_up(&wq->submit_waitq); +} + +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + struct idxd_device *idxd = wq->idxd; + int vec = desc->hw->int_handle; + void __iomem *portal; + + if (idxd->state != IDXD_DEV_ENABLED) + return -EIO; + + portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED); + /* + * The wmb() flushes writes to coherent DMA data before possibly + * triggering a DMA read. The wmb() is necessary even on UP because + * the recipient is a device. + */ + wmb(); + iosubmit_cmds512(portal, desc->hw, 1); + + /* + * Pending the descriptor to the lockless list for the irq_entry + * that we designated the descriptor to. + */ + if (desc->hw->flags & IDXD_OP_FLAG_RCI) + llist_add(&desc->llnode, + &idxd->irq_entries[vec].pending_llist); + + return 0; +} diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c new file mode 100644 index 000000000000..849c50ab939a --- /dev/null +++ b/drivers/dma/idxd/sysfs.c @@ -0,0 +1,1528 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +static char *idxd_wq_type_names[] = { + [IDXD_WQT_NONE] = "none", + [IDXD_WQT_KERNEL] = "kernel", + [IDXD_WQT_USER] = "user", +}; + +static void idxd_conf_device_release(struct device *dev) +{ + dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); +} + +static struct device_type idxd_group_device_type = { + .name = "group", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_wq_device_type = { + .name = "wq", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_engine_device_type = { + .name = "engine", + .release = idxd_conf_device_release, +}; + +static struct device_type dsa_device_type = { + .name = "dsa", + .release = idxd_conf_device_release, +}; + +static inline bool is_dsa_dev(struct device *dev) +{ + return dev ? dev->type == &dsa_device_type : false; +} + +static inline bool is_idxd_dev(struct device *dev) +{ + return is_dsa_dev(dev); +} + +static inline bool is_idxd_wq_dev(struct device *dev) +{ + return dev ? dev->type == &idxd_wq_device_type : false; +} + +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) +{ + if (wq->type == IDXD_WQT_KERNEL && + strcmp(wq->name, "dmaengine") == 0) + return true; + return false; +} + +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) +{ + return wq->type == IDXD_WQT_USER ? true : false; +} + +static int idxd_config_bus_match(struct device *dev, + struct device_driver *drv) +{ + int matched = 0; + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) + return 0; + matched = 1; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + if (idxd->state < IDXD_DEV_CONF_READY) + return 0; + + if (wq->state != IDXD_WQ_DISABLED) { + dev_dbg(dev, "%s not disabled\n", dev_name(dev)); + return 0; + } + matched = 1; + } + + if (matched) + dev_dbg(dev, "%s matched\n", dev_name(dev)); + + return matched; +} + +static int idxd_config_bus_probe(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called\n", __func__); + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) { + dev_warn(dev, "Device not ready for config\n"); + return -EBUSY; + } + + if (!try_module_get(THIS_MODULE)) + return -ENXIO; + + spin_lock_irqsave(&idxd->dev_lock, flags); + + /* Perform IDXD configuration and enabling */ + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device config failed: %d\n", rc); + return rc; + } + + /* start device */ + rc = idxd_device_enable(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device enable failed: %d\n", rc); + return rc; + } + + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_info(dev, "Device %s enabled\n", dev_name(dev)); + + rc = idxd_register_dma_device(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_dbg(dev, "Failed to register dmaengine device\n"); + return rc; + } + return 0; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + mutex_lock(&wq->wq_lock); + + if (idxd->state != IDXD_DEV_ENABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Enabling while device not enabled.\n"); + return -EPERM; + } + + if (wq->state != IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d already enabled.\n", wq->id); + return -EBUSY; + } + + if (!wq->group) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ not attached to group.\n"); + return -EINVAL; + } + + if (strlen(wq->name) == 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ name not set.\n"); + return -EINVAL; + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ resource alloc failed\n"); + return rc; + } + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Writing WQ %d config failed: %d\n", + wq->id, rc); + return rc; + } + + rc = idxd_wq_enable(wq); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d enabling failed: %d\n", + wq->id, rc); + return rc; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + rc = idxd_wq_map_portal(wq); + if (rc < 0) { + dev_warn(dev, "wq portal mapping failed: %d\n", rc); + rc = idxd_wq_disable(wq); + if (rc < 0) + dev_warn(dev, "IDXD wq disable failed\n"); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + return rc; + } + + wq->client_count = 0; + + dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); + + if (is_idxd_wq_dmaengine(wq)) { + rc = idxd_register_dma_channel(wq); + if (rc < 0) { + dev_dbg(dev, "DMA channel register failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } else if (is_idxd_wq_cdev(wq)) { + rc = idxd_wq_add_cdev(wq); + if (rc < 0) { + dev_dbg(dev, "Cdev creation failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } + + mutex_unlock(&wq->wq_lock); + return 0; + } + + return -ENODEV; +} + +static void disable_wq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + unsigned long flags; + int rc; + + mutex_lock(&wq->wq_lock); + dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); + if (wq->state == IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + return; + } + + if (is_idxd_wq_dmaengine(wq)) + idxd_unregister_dma_channel(wq); + else if (is_idxd_wq_cdev(wq)) + idxd_wq_del_cdev(wq); + + if (idxd_wq_refcount(wq)) + dev_warn(dev, "Clients has claim on wq %d: %d\n", + wq->id, idxd_wq_refcount(wq)); + + idxd_wq_unmap_portal(wq); + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_wq_disable(wq); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + idxd_wq_free_resources(wq); + wq->client_count = 0; + mutex_unlock(&wq->wq_lock); + + if (rc < 0) + dev_warn(dev, "Failed to disable %s: %d\n", + dev_name(&wq->conf_dev), rc); + else + dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); +} + +static int idxd_config_bus_remove(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); + + /* disable workqueue here */ + if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + + disable_wq(wq); + } else if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + int i; + + dev_dbg(dev, "%s removing dev %s\n", __func__, + dev_name(&idxd->conf_dev)); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_DISABLED) + continue; + dev_warn(dev, "Active wq %d on disable %s.\n", i, + dev_name(&idxd->conf_dev)); + device_release_driver(&wq->conf_dev); + } + + idxd_unregister_dma_device(idxd); + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); + if (rc < 0) + dev_warn(dev, "Device disable failed\n"); + else + dev_info(dev, "Device %s disabled\n", dev_name(dev)); + + } + + return 0; +} + +static void idxd_config_bus_shutdown(struct device *dev) +{ + dev_dbg(dev, "%s called\n", __func__); +} + +struct bus_type dsa_bus_type = { + .name = "dsa", + .match = idxd_config_bus_match, + .probe = idxd_config_bus_probe, + .remove = idxd_config_bus_remove, + .shutdown = idxd_config_bus_shutdown, +}; + +static struct bus_type *idxd_bus_types[] = { + &dsa_bus_type +}; + +static struct idxd_device_driver dsa_drv = { + .drv = { + .name = "dsa", + .bus = &dsa_bus_type, + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + }, +}; + +static struct idxd_device_driver *idxd_drvs[] = { + &dsa_drv +}; + +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) +{ + return idxd_bus_types[idxd->type]; +} + +static struct device_type *idxd_get_device_type(struct idxd_device *idxd) +{ + if (idxd->type == IDXD_TYPE_DSA) + return &dsa_device_type; + else + return NULL; +} + +/* IDXD generic driver setup */ +int idxd_register_driver(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = driver_register(&idxd_drvs[i]->drv); + if (rc < 0) + goto drv_fail; + } + + return 0; + +drv_fail: + for (; i > 0; i--) + driver_unregister(&idxd_drvs[i]->drv); + return rc; +} + +void idxd_unregister_driver(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + driver_unregister(&idxd_drvs[i]->drv); +} + +/* IDXD engine attributes */ +static ssize_t engine_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + + if (engine->group) + return sprintf(buf, "%d\n", engine->group->id); + else + return sprintf(buf, "%d\n", -1); +} + +static ssize_t engine_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + struct idxd_device *idxd = engine->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (engine->group) { + engine->group->num_engines--; + engine->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = engine->group; + + if (prevg) + prevg->num_engines--; + engine->group = &idxd->groups[id]; + engine->group->num_engines++; + + return count; +} + +static struct device_attribute dev_attr_engine_group = + __ATTR(group_id, 0644, engine_group_id_show, + engine_group_id_store); + +static struct attribute *idxd_engine_attributes[] = { + &dev_attr_engine_group.attr, + NULL, +}; + +static const struct attribute_group idxd_engine_attribute_group = { + .attrs = idxd_engine_attributes, +}; + +static const struct attribute_group *idxd_engine_attribute_groups[] = { + &idxd_engine_attribute_group, + NULL, +}; + +/* Group attributes */ + +static void idxd_set_free_tokens(struct idxd_device *idxd) +{ + int i, tokens; + + for (i = 0, tokens = 0; i < idxd->max_groups; i++) { + struct idxd_group *g = &idxd->groups[i]; + + tokens += g->tokens_reserved; + } + + idxd->nr_tokens = idxd->max_tokens - tokens; +} + +static ssize_t group_tokens_reserved_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_reserved); +} + +static ssize_t group_tokens_reserved_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + if (val > idxd->max_tokens) + return -EINVAL; + + if (val > idxd->nr_tokens) + return -EINVAL; + + group->tokens_reserved = val; + idxd_set_free_tokens(idxd); + return count; +} + +static struct device_attribute dev_attr_group_tokens_reserved = + __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, + group_tokens_reserved_store); + +static ssize_t group_tokens_allowed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_allowed); +} + +static ssize_t group_tokens_allowed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + if (val < 4 * group->num_engines || + val > group->tokens_reserved + idxd->nr_tokens) + return -EINVAL; + + group->tokens_allowed = val; + return count; +} + +static struct device_attribute dev_attr_group_tokens_allowed = + __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, + group_tokens_allowed_store); + +static ssize_t group_use_token_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->use_token_limit); +} + +static ssize_t group_use_token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + group->use_token_limit = !!val; + return count; +} + +static struct device_attribute dev_attr_group_use_token_limit = + __ATTR(use_token_limit, 0644, group_use_token_limit_show, + group_use_token_limit_store); + +static ssize_t group_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + if (!engine->group) + continue; + + if (engine->group->id == group->id) + rc += sprintf(tmp + rc, "engine%d.%d ", + idxd->id, engine->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_engines = + __ATTR(engines, 0444, group_engines_show, NULL); + +static ssize_t group_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (!wq->group) + continue; + + if (wq->group->id == group->id) + rc += sprintf(tmp + rc, "wq%d.%d ", + idxd->id, wq->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_work_queues = + __ATTR(work_queues, 0444, group_work_queues_show, NULL); + +static ssize_t group_traffic_class_a_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_a); +} + +static ssize_t group_traffic_class_a_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_a = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_a = + __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, + group_traffic_class_a_store); + +static ssize_t group_traffic_class_b_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_b); +} + +static ssize_t group_traffic_class_b_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_b = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_b = + __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, + group_traffic_class_b_store); + +static struct attribute *idxd_group_attributes[] = { + &dev_attr_group_work_queues.attr, + &dev_attr_group_engines.attr, + &dev_attr_group_use_token_limit.attr, + &dev_attr_group_tokens_allowed.attr, + &dev_attr_group_tokens_reserved.attr, + &dev_attr_group_traffic_class_a.attr, + &dev_attr_group_traffic_class_b.attr, + NULL, +}; + +static const struct attribute_group idxd_group_attribute_group = { + .attrs = idxd_group_attributes, +}; + +static const struct attribute_group *idxd_group_attribute_groups[] = { + &idxd_group_attribute_group, + NULL, +}; + +/* IDXD work queue attribs */ +static ssize_t wq_clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->client_count); +} + +static struct device_attribute dev_attr_wq_clients = + __ATTR(clients, 0444, wq_clients_show, NULL); + +static ssize_t wq_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->state) { + case IDXD_WQ_DISABLED: + return sprintf(buf, "disabled\n"); + case IDXD_WQ_ENABLED: + return sprintf(buf, "enabled\n"); + } + + return sprintf(buf, "unknown\n"); +} + +static struct device_attribute dev_attr_wq_state = + __ATTR(state, 0444, wq_state_show, NULL); + +static ssize_t wq_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->group) + return sprintf(buf, "%u\n", wq->group->id); + else + return sprintf(buf, "-1\n"); +} + +static ssize_t wq_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (wq->group) { + wq->group->num_wqs--; + wq->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = wq->group; + + if (prevg) + prevg->num_wqs--; + wq->group = group; + group->num_wqs++; + return count; +} + +static struct device_attribute dev_attr_wq_group_id = + __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); + +static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", + wq_dedicated(wq) ? "dedicated" : "shared"); +} + +static ssize_t wq_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (sysfs_streq(buf, "dedicated")) { + set_bit(WQ_FLAG_DEDICATED, &wq->flags); + wq->threshold = 0; + } else { + return -EINVAL; + } + + return count; +} + +static struct device_attribute dev_attr_wq_mode = + __ATTR(mode, 0644, wq_mode_show, wq_mode_store); + +static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->size); +} + +static ssize_t wq_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long size; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &size); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (size > idxd->max_wq_size) + return -EINVAL; + + wq->size = size; + return count; +} + +static struct device_attribute dev_attr_wq_size = + __ATTR(size, 0644, wq_size_show, wq_size_store); + +static ssize_t wq_priority_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->priority); +} + +static ssize_t wq_priority_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long prio; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &prio); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (prio > IDXD_MAX_PRIORITY) + return -EINVAL; + + wq->priority = prio; + return count; +} + +static struct device_attribute dev_attr_wq_priority = + __ATTR(priority, 0644, wq_priority_show, wq_priority_store); + +static ssize_t wq_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->type) { + case IDXD_WQT_KERNEL: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_KERNEL]); + case IDXD_WQT_USER: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_USER]); + case IDXD_WQT_NONE: + default: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_NONE]); + } + + return -EINVAL; +} + +static ssize_t wq_type_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + enum idxd_wq_type old_type; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + old_type = wq->type; + if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) + wq->type = IDXD_WQT_KERNEL; + else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) + wq->type = IDXD_WQT_USER; + else + wq->type = IDXD_WQT_NONE; + + /* If we are changing queue type, clear the name */ + if (wq->type != old_type) + memset(wq->name, 0, WQ_NAME_SIZE + 1); + + return count; +} + +static struct device_attribute dev_attr_wq_type = + __ATTR(type, 0644, wq_type_show, wq_type_store); + +static ssize_t wq_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", wq->name); +} + +static ssize_t wq_name_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) + return -EINVAL; + + memset(wq->name, 0, WQ_NAME_SIZE + 1); + strncpy(wq->name, buf, WQ_NAME_SIZE); + strreplace(wq->name, '\n', '\0'); + return count; +} + +static struct device_attribute dev_attr_wq_name = + __ATTR(name, 0644, wq_name_show, wq_name_store); + +static ssize_t wq_cdev_minor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->idxd_cdev.minor); +} + +static struct device_attribute dev_attr_wq_cdev_minor = + __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); + +static struct attribute *idxd_wq_attributes[] = { + &dev_attr_wq_clients.attr, + &dev_attr_wq_state.attr, + &dev_attr_wq_group_id.attr, + &dev_attr_wq_mode.attr, + &dev_attr_wq_size.attr, + &dev_attr_wq_priority.attr, + &dev_attr_wq_type.attr, + &dev_attr_wq_name.attr, + &dev_attr_wq_cdev_minor.attr, + NULL, +}; + +static const struct attribute_group idxd_wq_attribute_group = { + .attrs = idxd_wq_attributes, +}; + +static const struct attribute_group *idxd_wq_attribute_groups[] = { + &idxd_wq_attribute_group, + NULL, +}; + +/* IDXD device attribs */ +static ssize_t max_work_queues_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wq_size); +} +static DEVICE_ATTR_RO(max_work_queues_size); + +static ssize_t max_groups_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_groups); +} +static DEVICE_ATTR_RO(max_groups); + +static ssize_t max_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wqs); +} +static DEVICE_ATTR_RO(max_work_queues); + +static ssize_t max_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_engines); +} +static DEVICE_ATTR_RO(max_engines); + +static ssize_t numa_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); +} +static DEVICE_ATTR_RO(numa_node); + +static ssize_t max_batch_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_batch_size); +} +static DEVICE_ATTR_RO(max_batch_size); + +static ssize_t max_transfer_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); +} +static DEVICE_ATTR_RO(max_transfer_size); + +static ssize_t op_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); +} +static DEVICE_ATTR_RO(op_cap); + +static ssize_t configurable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", + test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); +} +static DEVICE_ATTR_RO(configurable); + +static ssize_t clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long flags; + int count = 0, i; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + count += wq->client_count; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return sprintf(buf, "%d\n", count); +} +static DEVICE_ATTR_RO(clients); + +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + switch (idxd->state) { + case IDXD_DEV_DISABLED: + case IDXD_DEV_CONF_READY: + return sprintf(buf, "disabled\n"); + case IDXD_DEV_ENABLED: + return sprintf(buf, "enabled\n"); + case IDXD_DEV_HALTED: + return sprintf(buf, "halted\n"); + } + + return sprintf(buf, "unknown\n"); +} +static DEVICE_ATTR_RO(state); + +static ssize_t errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + int i, out = 0; + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < 4; i++) + out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + out--; + out += sprintf(buf + out, "\n"); + return out; +} +static DEVICE_ATTR_RO(errors); + +static ssize_t max_tokens_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_tokens); +} +static DEVICE_ATTR_RO(max_tokens); + +static ssize_t token_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->token_limit); +} + +static ssize_t token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (!idxd->hw.group_cap.token_limit) + return -EPERM; + + if (val > idxd->hw.group_cap.total_tokens) + return -EINVAL; + + idxd->token_limit = val; + return count; +} +static DEVICE_ATTR_RW(token_limit); + +static ssize_t cdev_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->major); +} +static DEVICE_ATTR_RO(cdev_major); + +static struct attribute *idxd_device_attributes[] = { + &dev_attr_max_groups.attr, + &dev_attr_max_work_queues.attr, + &dev_attr_max_work_queues_size.attr, + &dev_attr_max_engines.attr, + &dev_attr_numa_node.attr, + &dev_attr_max_batch_size.attr, + &dev_attr_max_transfer_size.attr, + &dev_attr_op_cap.attr, + &dev_attr_configurable.attr, + &dev_attr_clients.attr, + &dev_attr_state.attr, + &dev_attr_errors.attr, + &dev_attr_max_tokens.attr, + &dev_attr_token_limit.attr, + &dev_attr_cdev_major.attr, + NULL, +}; + +static const struct attribute_group idxd_device_attribute_group = { + .attrs = idxd_device_attributes, +}; + +static const struct attribute_group *idxd_attribute_groups[] = { + &idxd_device_attribute_group, + NULL, +}; + +static int idxd_setup_engine_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + engine->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&engine->conf_dev, "engine%d.%d", + idxd->id, engine->id); + engine->conf_dev.bus = idxd_get_bus_type(idxd); + engine->conf_dev.groups = idxd_engine_attribute_groups; + engine->conf_dev.type = &idxd_engine_device_type; + dev_dbg(dev, "Engine device register: %s\n", + dev_name(&engine->conf_dev)); + rc = device_register(&engine->conf_dev); + if (rc < 0) { + put_device(&engine->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + return rc; +} + +static int idxd_setup_group_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + group->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&group->conf_dev, "group%d.%d", + idxd->id, group->id); + group->conf_dev.bus = idxd_get_bus_type(idxd); + group->conf_dev.groups = idxd_group_attribute_groups; + group->conf_dev.type = &idxd_group_device_type; + dev_dbg(dev, "Group device register: %s\n", + dev_name(&group->conf_dev)); + rc = device_register(&group->conf_dev); + if (rc < 0) { + put_device(&group->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + return rc; +} + +static int idxd_setup_wq_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); + wq->conf_dev.bus = idxd_get_bus_type(idxd); + wq->conf_dev.groups = idxd_wq_attribute_groups; + wq->conf_dev.type = &idxd_wq_device_type; + dev_dbg(dev, "WQ device register: %s\n", + dev_name(&wq->conf_dev)); + rc = device_register(&wq->conf_dev); + if (rc < 0) { + put_device(&wq->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + return rc; +} + +static int idxd_setup_device_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + char devname[IDXD_NAME_SIZE]; + + sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); + idxd->conf_dev.parent = dev; + dev_set_name(&idxd->conf_dev, "%s", devname); + idxd->conf_dev.bus = idxd_get_bus_type(idxd); + idxd->conf_dev.groups = idxd_attribute_groups; + idxd->conf_dev.type = idxd_get_device_type(idxd); + + dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); + rc = device_register(&idxd->conf_dev); + if (rc < 0) { + put_device(&idxd->conf_dev); + return rc; + } + + return 0; +} + +int idxd_setup_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + + rc = idxd_setup_device_sysfs(idxd); + if (rc < 0) { + dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_wq_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_group_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_engine_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); + return rc; + } + + return 0; +} + +void idxd_cleanup_sysfs(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + + device_unregister(&idxd->conf_dev); +} + +int idxd_register_bus_type(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = bus_register(idxd_bus_types[i]); + if (rc < 0) + goto bus_err; + } + + return 0; + +bus_err: + for (; i > 0; i--) + bus_unregister(idxd_bus_types[i]); + return rc; +} + +void idxd_unregister_bus_type(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + bus_unregister(idxd_bus_types[i]); +} diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c27e206a764c..066b21a32232 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac) return; } sdmac->desc = desc = to_sdma_desc(&vd->tx); - /* - * Do not delete the node in desc_issued list in cyclic mode, otherwise - * the desc allocated will never be freed in vchan_dma_desc_free_list - */ - if (!(sdmac->flags & IMX_DMA_SG_LOOP)) - list_del(&vd->node); + + list_del(&vd->node); sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; @@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work) spin_lock_irqsave(&sdmac->vc.lock, flags); vchan_get_all_descriptors(&sdmac->vc, &head); - sdmac->desc = NULL; spin_unlock_irqrestore(&sdmac->vc.lock, flags); vchan_dma_desc_free_list(&sdmac->vc, &head); sdmac->context_loaded = false; } -static int sdma_disable_channel_async(struct dma_chan *chan) +static int sdma_terminate_all(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&sdmac->vc.lock, flags); sdma_disable_channel(chan); - if (sdmac->desc) + if (sdmac->desc) { + vchan_terminate_vdesc(&sdmac->desc->vd); + sdmac->desc = NULL; schedule_work(&sdmac->terminate_worker); + } + + spin_unlock_irqrestore(&sdmac->vc.lock, flags); return 0; } @@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - sdma_disable_channel_async(chan); + sdma_terminate_all(chan); sdma_channel_synchronize(chan); @@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, struct dma_tx_state *txstate) { struct sdma_channel *sdmac = to_sdma_chan(chan); - struct sdma_desc *desc; + struct sdma_desc *desc = NULL; u32 residue; struct virt_dma_desc *vd; enum dma_status ret; @@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, return ret; spin_lock_irqsave(&sdmac->vc.lock, flags); + vd = vchan_find_desc(&sdmac->vc, cookie); - if (vd) { + if (vd) desc = to_sdma_desc(&vd->tx); + else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) + desc = sdmac->desc; + + if (desc) { if (sdmac->flags & IMX_DMA_SG_LOOP) residue = (desc->num_bd - desc->buf_ptail) * desc->period_len - desc->chn_real_count; else residue = desc->chn_count - desc->chn_real_count; - } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { - residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; } else { residue = 0; } + spin_unlock_irqrestore(&sdmac->vc.lock, flags); dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, @@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel_async; + sdma->dma_device.device_terminate_all = sdma_terminate_all; sdma->dma_device.device_synchronize = sdma_channel_synchronize; sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index a6a6dc432db8..60e9afbb896c 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ioat_kobject_del(ioat_dma); dma_async_device_unregister(dma); - - dma_pool_destroy(ioat_dma->completion_pool); - - INIT_LIST_HEAD(&dma->channels); } /** @@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) break; @@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c) return; ioat_stop(ioat_chan); - ioat_reset_hw(ioat_chan); - /* Put LTR to idle */ - if (ioat_dma->version >= IOAT_VER_3_4) - writeb(IOAT_CHAN_LTR_SWSEL_IDLE, - ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); + if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) { + ioat_reset_hw(ioat_chan); + + /* Put LTR to idle */ + if (ioat_dma->version >= IOAT_VER_3_4) + writeb(IOAT_CHAN_LTR_SWSEL_IDLE, + ioat_chan->reg_base + + IOAT_CHAN_LTR_SWSEL_OFFSET); + } spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); @@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = { .err_handler = &ioat_err_handler, }; +static void release_ioatdma(struct dma_device *device) +{ + struct ioatdma_device *d = to_ioatdma_device(device); + int i; + + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(d->idx[i]); + + dma_pool_destroy(d->completion_pool); + kfree(d); +} + static struct ioatdma_device * alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) { - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return NULL; d->pdev = pdev; d->reg_base = iobase; + d->dma_dev.device_release = release_ioatdma; return d; } @@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; + ioat_shutdown(pdev); + dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index c20e6bd4e298..29f1223b285a 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&c->vc.lock, flags); vchan_get_all_descriptors(&c->vc, &head); - vchan_dma_desc_free_list(&c->vc, &head); spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + return 0; } diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index c2d779daa4b5..b2c2b5e8093c 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -15,6 +15,8 @@ #include <linux/of.h> #include <linux/of_dma.h> +#include "dmaengine.h" + static LIST_HEAD(of_dma_list); static DEFINE_MUTEX(of_dma_lock); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 023f951189a7..c683051257fd 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan) } vchan_get_all_descriptors(&vchan->vc, &head); - vchan_dma_desc_free_list(&vchan->vc, &head); spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6cce9ef61b29..88b884cbb7c1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); - pm_runtime_disable(dev); - - if (!pm_runtime_status_suspended(dev)) { - /* amba did not disable the clock */ - amba_pclk_disable(pcdev); - } + pm_runtime_force_suspend(dev); amba_pclk_unprepare(pcdev); return 0; @@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev) if (ret) return ret; - if (!pm_runtime_status_suspended(dev)) - ret = amba_pclk_enable(pcdev); - - pm_runtime_enable(dev); + pm_runtime_force_resume(dev); return ret; } -static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); +static const struct dev_pm_ops pl330_pm = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) +}; static int pl330_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c new file mode 100644 index 000000000000..db4c5fd453a9 --- /dev/null +++ b/drivers/dma/plx_dma.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microsemi Switchtec(tm) PCIe Management Driver + * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com> + * Copyright (c) 2019, GigaIO Networks, Inc + */ + +#include "dmaengine.h" + +#include <linux/circ_buf.h> +#include <linux/dmaengine.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> + +MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Logan Gunthorpe"); + +#define PLX_REG_DESC_RING_ADDR 0x214 +#define PLX_REG_DESC_RING_ADDR_HI 0x218 +#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C +#define PLX_REG_DESC_RING_COUNT 0x220 +#define PLX_REG_DESC_RING_LAST_ADDR 0x224 +#define PLX_REG_DESC_RING_LAST_SIZE 0x228 +#define PLX_REG_PREF_LIMIT 0x234 +#define PLX_REG_CTRL 0x238 +#define PLX_REG_CTRL2 0x23A +#define PLX_REG_INTR_CTRL 0x23C +#define PLX_REG_INTR_STATUS 0x23E + +#define PLX_REG_PREF_LIMIT_PREF_FOUR 8 + +#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0) +#define PLX_REG_CTRL_ABORT BIT(1) +#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2) +#define PLX_REG_CTRL_START BIT(3) +#define PLX_REG_CTRL_RING_STOP_MODE BIT(4) +#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5) +#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5) +#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5) +#define PLX_REG_CTRL_DESC_INVALID BIT(8) +#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9) +#define PLX_REG_CTRL_ABORT_DONE BIT(10) +#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12) +#define PLX_REG_CTRL_IN_PROGRESS BIT(30) + +#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \ + PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \ + PLX_REG_CTRL_ABORT_DONE | \ + PLX_REG_CTRL_IMM_PAUSE_DONE) + +#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \ + PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \ + PLX_REG_CTRL_START | \ + PLX_REG_CTRL_RESET_VAL) + +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7 + +#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0) +#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1) +#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3) +#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4) +#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5) + +#define PLX_REG_INTR_STATUS_ERROR BIT(0) +#define PLX_REG_INTR_STATUS_INV_DESC BIT(1) +#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2) +#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3) + +struct plx_dma_hw_std_desc { + __le32 flags_and_size; + __le16 dst_addr_hi; + __le16 src_addr_hi; + __le32 dst_addr_lo; + __le32 src_addr_lo; +}; + +#define PLX_DESC_SIZE_MASK 0x7ffffff +#define PLX_DESC_FLAG_VALID BIT(31) +#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30) + +#define PLX_DESC_WB_SUCCESS BIT(30) +#define PLX_DESC_WB_RD_FAIL BIT(29) +#define PLX_DESC_WB_WR_FAIL BIT(28) + +#define PLX_DMA_RING_COUNT 2048 + +struct plx_dma_desc { + struct dma_async_tx_descriptor txd; + struct plx_dma_hw_std_desc *hw; + u32 orig_size; +}; + +struct plx_dma_dev { + struct dma_device dma_dev; + struct dma_chan dma_chan; + struct pci_dev __rcu *pdev; + void __iomem *bar; + struct tasklet_struct desc_task; + + spinlock_t ring_lock; + bool ring_active; + int head; + int tail; + struct plx_dma_hw_std_desc *hw_ring; + dma_addr_t hw_ring_dma; + struct plx_dma_desc **desc_ring; +}; + +static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c) +{ + return container_of(c, struct plx_dma_dev, dma_chan); +} + +static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct plx_dma_desc, txd); +} + +static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i) +{ + return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)]; +} + +static void plx_dma_process_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + u32 flags; + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size)); + + if (flags & PLX_DESC_FLAG_VALID) + break; + + res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK); + + if (flags & PLX_DESC_WB_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (flags & PLX_DESC_WB_WR_FAIL) + res.result = DMA_TRANS_WRITE_FAILED; + else + res.result = DMA_TRANS_READ_FAILED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + + plx_dma_process_desc(plxdev); + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + res.residue = desc->orig_size; + res.result = DMA_TRANS_ABORTED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void __plx_dma_stop(struct plx_dma_dev *plxdev) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + u32 val; + + val = readl(plxdev->bar + PLX_REG_CTRL); + if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE)) + return; + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + while (!time_after(jiffies, timeout)) { + val = readl(plxdev->bar + PLX_REG_CTRL); + if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE) + break; + + cpu_relax(); + } + + if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)) + dev_err(plxdev->dma_dev.dev, + "Timeout waiting for graceful pause!\n"); + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); +} + +static void plx_dma_stop(struct plx_dma_dev *plxdev) +{ + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + __plx_dma_stop(plxdev); + + rcu_read_unlock(); +} + +static void plx_dma_desc_task(unsigned long data) +{ + struct plx_dma_dev *plxdev = (void *)data; + + plx_dma_process_desc(plxdev); +} + +static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c, + dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, + unsigned long flags) + __acquires(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c); + struct plx_dma_desc *plxdesc; + + spin_lock_bh(&plxdev->ring_lock); + if (!plxdev->ring_active) + goto err_unlock; + + if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT)) + goto err_unlock; + + if (len > PLX_DESC_SIZE_MASK) + goto err_unlock; + + plxdesc = plx_dma_get_desc(plxdev, plxdev->head); + plxdev->head++; + + plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst)); + plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst)); + plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src)); + plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src)); + + plxdesc->orig_size = len; + + if (flags & DMA_PREP_INTERRUPT) + len |= PLX_DESC_FLAG_INT_WHEN_DONE; + + plxdesc->hw->flags_and_size = cpu_to_le32(len); + plxdesc->txd.flags = flags; + + /* return with the lock held, it will be released in tx_submit */ + + return &plxdesc->txd; + +err_unlock: + /* + * Keep sparse happy by restoring an even lock count on + * this lock. + */ + __acquire(plxdev->ring_lock); + + spin_unlock_bh(&plxdev->ring_lock); + return NULL; +} + +static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc) + __releases(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan); + struct plx_dma_desc *plxdesc = to_plx_desc(desc); + dma_cookie_t cookie; + + cookie = dma_cookie_assign(desc); + + /* + * Ensure the descriptor updates are visible to the dma device + * before setting the valid bit. + */ + wmb(); + + plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID); + + spin_unlock_bh(&plxdev->ring_lock); + + return cookie; +} + +static enum dma_status plx_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + plx_dma_process_desc(plxdev); + + return dma_cookie_status(chan, cookie, txstate); +} + +static void plx_dma_issue_pending(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + /* + * Ensure the valid bits are visible before starting the + * DMA engine. + */ + wmb(); + + writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL); + + rcu_read_unlock(); +} + +static irqreturn_t plx_dma_isr(int irq, void *devid) +{ + struct plx_dma_dev *plxdev = devid; + u32 status; + + status = readw(plxdev->bar + PLX_REG_INTR_STATUS); + + if (!status) + return IRQ_NONE; + + if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active) + tasklet_schedule(&plxdev->desc_task); + + writew(status, plxdev->bar + PLX_REG_INTR_STATUS); + + return IRQ_HANDLED; +} + +static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev) +{ + struct plx_dma_desc *desc; + int i; + + plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT, + sizeof(*plxdev->desc_ring), GFP_KERNEL); + if (!plxdev->desc_ring) + return -ENOMEM; + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) { + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + goto free_and_exit; + + dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan); + desc->txd.tx_submit = plx_dma_tx_submit; + desc->hw = &plxdev->hw_ring[i]; + + plxdev->desc_ring[i] = desc; + } + + return 0; + +free_and_exit: + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + kfree(plxdev->desc_ring); + return -ENOMEM; +} + +static int plx_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + int rc; + + plxdev->head = plxdev->tail = 0; + plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz, + &plxdev->hw_ring_dma, GFP_KERNEL); + if (!plxdev->hw_ring) + return -ENOMEM; + + rc = plx_dma_alloc_desc(plxdev); + if (rc) + goto out_free_hw_ring; + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + rc = -ENODEV; + goto out_free_hw_ring; + } + + writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(upper_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); + writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT); + + plxdev->ring_active = true; + + rcu_read_unlock(); + + return PLX_DMA_RING_COUNT; + +out_free_hw_ring: + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + return rc; +} + +static void plx_dma_free_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + struct pci_dev *pdev; + int irq = -1; + int i; + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + plx_dma_stop(plxdev); + + rcu_read_lock(); + pdev = rcu_dereference(plxdev->pdev); + if (pdev) + irq = pci_irq_vector(pdev, 0); + rcu_read_unlock(); + + if (irq > 0) + synchronize_irq(irq); + + tasklet_kill(&plxdev->desc_task); + + plx_dma_abort_desc(plxdev); + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + + kfree(plxdev->desc_ring); + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + +} + +static void plx_dma_release(struct dma_device *dma_dev) +{ + struct plx_dma_dev *plxdev = + container_of(dma_dev, struct plx_dma_dev, dma_dev); + + put_device(dma_dev->dev); + kfree(plxdev); +} + +static int plx_dma_create(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev; + struct dma_device *dma; + struct dma_chan *chan; + int rc; + + plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL); + if (!plxdev) + return -ENOMEM; + + rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0, + KBUILD_MODNAME, plxdev); + if (rc) { + kfree(plxdev); + return rc; + } + + spin_lock_init(&plxdev->ring_lock); + tasklet_init(&plxdev->desc_task, plx_dma_desc_task, + (unsigned long)plxdev); + + RCU_INIT_POINTER(plxdev->pdev, pdev); + plxdev->bar = pcim_iomap_table(pdev)[0]; + + dma = &plxdev->dma_dev; + dma->chancnt = 1; + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->copy_align = DMAENGINE_ALIGN_1_BYTE; + dma->dev = get_device(&pdev->dev); + + dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources; + dma->device_free_chan_resources = plx_dma_free_chan_resources; + dma->device_prep_dma_memcpy = plx_dma_prep_memcpy; + dma->device_issue_pending = plx_dma_issue_pending; + dma->device_tx_status = plx_dma_tx_status; + dma->device_release = plx_dma_release; + + chan = &plxdev->dma_chan; + chan->device = dma; + dma_cookie_init(chan); + list_add_tail(&chan->device_node, &dma->channels); + + rc = dma_async_device_register(dma); + if (rc) { + pci_err(pdev, "Failed to register dma device: %d\n", rc); + free_irq(pci_irq_vector(pdev, 0), plxdev); + kfree(plxdev); + return rc; + } + + pci_set_drvdata(pdev, plxdev); + + return 0; +} + +static int plx_dma_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int rc; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME); + if (rc) + return rc; + + rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (rc <= 0) + return rc; + + pci_set_master(pdev); + + rc = plx_dma_create(pdev); + if (rc) + goto err_free_irq_vectors; + + pci_info(pdev, "PLX DMA Channel Registered\n"); + + return 0; + +err_free_irq_vectors: + pci_free_irq_vectors(pdev); + return rc; +} + +static void plx_dma_remove(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev = pci_get_drvdata(pdev); + + free_irq(pci_irq_vector(pdev, 0), plxdev); + + rcu_assign_pointer(plxdev->pdev, NULL); + synchronize_rcu(); + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + __plx_dma_stop(plxdev); + plx_dma_abort_desc(plxdev); + + plxdev->bar = NULL; + dma_async_device_unregister(&plxdev->dma_dev); + + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id plx_dma_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_PLX, + .device = 0x87D0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = PCI_CLASS_SYSTEM_OTHER << 8, + .class_mask = 0xFFFFFFFF, + }, + {0} +}; +MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl); + +static struct pci_driver plx_dma_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = plx_dma_pci_tbl, + .probe = plx_dma_probe, + .remove = plx_dma_remove, +}; +module_pci_driver(plx_dma_pci_driver); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 43da8eeb18ef..8e14c72d03f0 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) s3c24xx_dma_start_next_sg(s3cchan, txd); } -static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, - struct s3c24xx_dma_chan *s3cchan) -{ - LIST_HEAD(head); - - vchan_get_all_descriptors(&s3cchan->vc, &head); - vchan_dma_desc_free_list(&s3cchan->vc, &head); -} - /* * Try to allocate a physical channel. When successful, assign it to * this virtual channel, and initiate the next descriptor. The @@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; + LIST_HEAD(head); unsigned long flags; - int ret = 0; + int ret; spin_lock_irqsave(&s3cchan->vc.lock, flags); @@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) } /* Dequeue jobs not yet fired as well */ - s3c24xx_dma_free_txd_list(s3cdma, s3cchan); + + vchan_get_all_descriptors(&s3cchan->vc, &head); + + spin_unlock_irqrestore(&s3cchan->vc.lock, flags); + + vchan_dma_desc_free_list(&s3cchan->vc, &head); + + return 0; + unlock: spin_unlock_irqrestore(&s3cchan->vc.lock, flags); @@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) /* Basic sanity check */ if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { - dev_err(&pdev->dev, "to many dma channels %d, max %d\n", + dev_err(&pdev->dev, "too many dma channels %d, max %d\n", pdata->num_phy_channels, MAX_DMA_CHANNELS); return -EINVAL; } diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 465256fe8b1f..6d0bec947636 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan) kfree(chan->desc); chan->desc = NULL; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); sf_pdma_disclaim_chan(chan); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); } static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, @@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan) chan->desc = NULL; chan->xfer_err = false; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); return 0; } diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index e397a50058c8..bbc2bda3b902 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dma_addr_t src, dest; u32 endpoints; int nr_periods, offset, plength, i; + u8 ram_type, io_mode, linear_mode; if (!is_slave_direction(dir)) { dev_err(chan2dev(chan), "Invalid DMA direction\n"); return NULL; } - if (vchan->is_dedicated) { - /* - * As we are using this just for audio data, we need to use - * normal DMA. There is nothing stopping us from supporting - * dedicated DMA here as well, so if a client comes up and - * requires it, it will be simple to implement it. - */ - dev_err(chan2dev(chan), - "Cyclic transfers are only supported on Normal DMA\n"); - return NULL; - } - contract = generate_dma_contract(); if (!contract) return NULL; contract->is_cyclic = 1; - /* Figure out the endpoints and the address we need */ + if (vchan->is_dedicated) { + io_mode = SUN4I_DDMA_ADDR_MODE_IO; + linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; + } else { + io_mode = SUN4I_NDMA_ADDR_MODE_IO; + linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; + } + if (dir == DMA_MEM_TO_DEV) { src = buf; dest = sconfig->dst_addr; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type); } else { src = sconfig->src_addr; dest = buf; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); } /* @@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dest = buf + offset; /* Make the promise */ - promise = generate_ndma_promise(chan, src, dest, - plength, sconfig, dir); + if (vchan->is_dedicated) + promise = generate_ddma_promise(chan, src, dest, + plength, sconfig); + else + promise = generate_ndma_promise(chan, src, dest, + plength, sconfig, dir); + if (!promise) { /* TODO: should we free everything? */ return NULL; @@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan) } spin_lock_irqsave(&vchan->vc.lock, flags); - vchan_dma_desc_free_list(&vchan->vc, &head); /* Clear these so the vchan is usable again */ vchan->processing = NULL; vchan->pchan = NULL; spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index d507c24fbf31..f76e06651f80 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -34,5 +34,29 @@ config DMA_OMAP Enable support for the TI sDMA (System DMA or DMA4) controller. This DMA engine is found on OMAP and DRA7xx parts. +config TI_K3_UDMA + bool "Texas Instruments UDMA support" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_PROTOCOL + depends on TI_SCI_INTA_IRQCHIP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select TI_K3_RINGACC + select TI_K3_PSIL + help + Enable support for the TI UDMA (Unified DMA) controller. This + DMA engine is used in AM65x and j721e. + +config TI_K3_UDMA_GLUE_LAYER + bool "Texas Instruments UDMA Glue layer for non DMAengine users" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_K3_UDMA + help + Say y here to support the K3 NAVSS DMA glue interface + If unsure, say N. + +config TI_K3_PSIL + bool + config TI_DMA_CROSSBAR bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index 113e59ec9c32..9a29a107e374 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -2,4 +2,7 @@ obj-$(CONFIG_TI_CPPI41) += cppi41.o obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o +obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o +obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 756a3c951dc7..03a7f647f7b2 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev) if (!info) return -ENODEV; - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync() failed\n"); - return ret; - } - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ecc); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + pm_runtime_disable(dev); + return ret; + } + /* Get eDMA3 configuration from IP */ ret = edma_setup_from_hw(dev, info, ecc); if (ret) - return ret; + goto err_disable_pm; /* Allocate memory based on the information we got from the IP */ ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, sizeof(*ecc->slave_chans), GFP_KERNEL); - if (!ecc->slave_chans) - return -ENOMEM; ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->slot_inuse) - return -ENOMEM; ecc->channels_mask = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_channels), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->channels_mask) - return -ENOMEM; + if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { + ret = -ENOMEM; + goto err_disable_pm; + } /* Mark all channels available initially */ bitmap_fill(ecc->channels_mask, ecc->num_channels); @@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccint = irq; } @@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccerrint = irq; } @@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev) ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(dev, "Can't allocate PaRAM dummy slot\n"); - return ecc->dummy_slot; + ret = ecc->dummy_slot; + goto err_disable_pm; } queue_priority_mapping = info->queue_priority_mapping; @@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev) err_reg1: edma_free_slot(ecc, ecc->dummy_slot); +err_disable_pm: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev) if (ecc->dma_memcpy) dma_async_device_unregister(ecc->dma_memcpy); edma_free_slot(ecc, ecc->dummy_slot); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return 0; } diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c new file mode 100644 index 000000000000..a896a15908cf --- /dev/null +++ b/drivers/dma/ti/k3-psil-am654.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am654_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0x4300), + PSIL_ETHERNET(0x4301), + PSIL_ETHERNET(0x4302), + PSIL_ETHERNET(0x4303), + /* PDMA0 - McASPs */ + PSIL_PDMA_XY_TR(0x4400), + PSIL_PDMA_XY_TR(0x4401), + PSIL_PDMA_XY_TR(0x4402), + /* PDMA1 - SPI0-4 */ + PSIL_PDMA_XY_PKT(0x4500), + PSIL_PDMA_XY_PKT(0x4501), + PSIL_PDMA_XY_PKT(0x4502), + PSIL_PDMA_XY_PKT(0x4503), + PSIL_PDMA_XY_PKT(0x4504), + PSIL_PDMA_XY_PKT(0x4505), + PSIL_PDMA_XY_PKT(0x4506), + PSIL_PDMA_XY_PKT(0x4507), + PSIL_PDMA_XY_PKT(0x4508), + PSIL_PDMA_XY_PKT(0x4509), + PSIL_PDMA_XY_PKT(0x450a), + PSIL_PDMA_XY_PKT(0x450b), + PSIL_PDMA_XY_PKT(0x450c), + PSIL_PDMA_XY_PKT(0x450d), + PSIL_PDMA_XY_PKT(0x450e), + PSIL_PDMA_XY_PKT(0x450f), + PSIL_PDMA_XY_PKT(0x4510), + PSIL_PDMA_XY_PKT(0x4511), + PSIL_PDMA_XY_PKT(0x4512), + PSIL_PDMA_XY_PKT(0x4513), + /* PDMA1 - USART0-2 */ + PSIL_PDMA_XY_PKT(0x4514), + PSIL_PDMA_XY_PKT(0x4515), + PSIL_PDMA_XY_PKT(0x4516), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 - ADCs */ + PSIL_PDMA_XY_TR(0x7100), + PSIL_PDMA_XY_TR(0x7101), + PSIL_PDMA_XY_TR(0x7102), + PSIL_PDMA_XY_TR(0x7103), + /* MCU_PDMA1 - MCU_SPI0-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + PSIL_PDMA_XY_PKT(0x7208), + PSIL_PDMA_XY_PKT(0x7209), + PSIL_PDMA_XY_PKT(0x720a), + PSIL_PDMA_XY_PKT(0x720b), + /* MCU_PDMA1 - MCU_USART0 */ + PSIL_PDMA_XY_PKT(0x7212), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am654_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0xc300), + PSIL_ETHERNET(0xc301), + PSIL_ETHERNET(0xc302), + PSIL_ETHERNET(0xc303), + PSIL_ETHERNET(0xc304), + PSIL_ETHERNET(0xc305), + PSIL_ETHERNET(0xc306), + PSIL_ETHERNET(0xc307), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), +}; + +struct psil_ep_map am654_ep_map = { + .name = "am654", + .src = am654_src_ep_map, + .src_count = ARRAY_SIZE(am654_src_ep_map), + .dst = am654_dst_ep_map, + .dst_count = ARRAY_SIZE(am654_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c new file mode 100644 index 000000000000..e3cfd5f66842 --- /dev/null +++ b/drivers/dma/ti/k3-psil-j721e.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j721e_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */ + PSIL_PDMA_MCASP(0x4400), + PSIL_PDMA_MCASP(0x4401), + PSIL_PDMA_MCASP(0x4402), + /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */ + PSIL_PDMA_MCASP(0x4500), + PSIL_PDMA_MCASP(0x4501), + PSIL_PDMA_MCASP(0x4502), + PSIL_PDMA_MCASP(0x4503), + PSIL_PDMA_MCASP(0x4504), + PSIL_PDMA_MCASP(0x4505), + PSIL_PDMA_MCASP(0x4506), + PSIL_PDMA_MCASP(0x4507), + PSIL_PDMA_MCASP(0x4508), + /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */ + PSIL_PDMA_XY_PKT(0x4600), + PSIL_PDMA_XY_PKT(0x4601), + PSIL_PDMA_XY_PKT(0x4602), + PSIL_PDMA_XY_PKT(0x4603), + PSIL_PDMA_XY_PKT(0x4604), + PSIL_PDMA_XY_PKT(0x4605), + PSIL_PDMA_XY_PKT(0x4606), + PSIL_PDMA_XY_PKT(0x4607), + /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */ + PSIL_PDMA_XY_PKT(0x460c), + PSIL_PDMA_XY_PKT(0x460d), + PSIL_PDMA_XY_PKT(0x460e), + PSIL_PDMA_XY_PKT(0x460f), + PSIL_PDMA_XY_PKT(0x4610), + PSIL_PDMA_XY_PKT(0x4611), + PSIL_PDMA_XY_PKT(0x4612), + PSIL_PDMA_XY_PKT(0x4613), + /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */ + PSIL_PDMA_XY_PKT(0x4618), + PSIL_PDMA_XY_PKT(0x4619), + PSIL_PDMA_XY_PKT(0x461a), + PSIL_PDMA_XY_PKT(0x461b), + PSIL_PDMA_XY_PKT(0x461c), + PSIL_PDMA_XY_PKT(0x461d), + PSIL_PDMA_XY_PKT(0x461e), + PSIL_PDMA_XY_PKT(0x461f), + /* PDMA11 (PDMA_MISC_G3) */ + PSIL_PDMA_XY_PKT(0x4624), + PSIL_PDMA_XY_PKT(0x4625), + PSIL_PDMA_XY_PKT(0x4626), + PSIL_PDMA_XY_PKT(0x4627), + PSIL_PDMA_XY_PKT(0x4628), + PSIL_PDMA_XY_PKT(0x4629), + PSIL_PDMA_XY_PKT(0x4630), + PSIL_PDMA_XY_PKT(0x463a), + /* PDMA13 (PDMA_USART_G0) - UART0-1 */ + PSIL_PDMA_XY_PKT(0x4700), + PSIL_PDMA_XY_PKT(0x4701), + /* PDMA14 (PDMA_USART_G1) - UART2-3 */ + PSIL_PDMA_XY_PKT(0x4702), + PSIL_PDMA_XY_PKT(0x4703), + /* PDMA15 (PDMA_USART_G2) - UART4-9 */ + PSIL_PDMA_XY_PKT(0x4704), + PSIL_PDMA_XY_PKT(0x4705), + PSIL_PDMA_XY_PKT(0x4706), + PSIL_PDMA_XY_PKT(0x4707), + PSIL_PDMA_XY_PKT(0x4708), + PSIL_PDMA_XY_PKT(0x4709), + /* CPSW9 */ + PSIL_ETHERNET(0x4a00), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ + PSIL_PDMA_XY_PKT(0x7100), + PSIL_PDMA_XY_PKT(0x7101), + PSIL_PDMA_XY_PKT(0x7102), + PSIL_PDMA_XY_PKT(0x7103), + /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ + PSIL_PDMA_XY_PKT(0x7300), + /* MCU_PDMA_ADC - ADC0-1 */ + PSIL_PDMA_XY_TR(0x7400), + PSIL_PDMA_XY_TR(0x7401), + PSIL_PDMA_XY_TR(0x7402), + PSIL_PDMA_XY_TR(0x7403), + /* SA2UL */ + PSIL_SA2UL(0x7500, 0), + PSIL_SA2UL(0x7501, 0), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j721e_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* CPSW9 */ + PSIL_ETHERNET(0xca00), + PSIL_ETHERNET(0xca01), + PSIL_ETHERNET(0xca02), + PSIL_ETHERNET(0xca03), + PSIL_ETHERNET(0xca04), + PSIL_ETHERNET(0xca05), + PSIL_ETHERNET(0xca06), + PSIL_ETHERNET(0xca07), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* SA2UL */ + PSIL_SA2UL(0xf500, 1), +}; + +struct psil_ep_map j721e_ep_map = { + .name = "j721e", + .src = j721e_src_ep_map, + .src_count = ARRAY_SIZE(j721e_src_ep_map), + .dst = j721e_dst_ep_map, + .dst_count = ARRAY_SIZE(j721e_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h new file mode 100644 index 000000000000..a1f389ca371e --- /dev/null +++ b/drivers/dma/ti/k3-psil-priv.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_PSIL_PRIV_H_ +#define K3_PSIL_PRIV_H_ + +#include <linux/dma/k3-psil.h> + +struct psil_ep { + u32 thread_id; + struct psil_endpoint_config ep_config; +}; + +/** + * struct psil_ep_map - PSI-L thread ID configuration maps + * @name: Name of the map, set it to the name of the SoC + * @src: Array of source PSI-L thread configurations + * @src_count: Number of entries in the src array + * @dst: Array of destination PSI-L thread configurations + * @dst_count: Number of entries in the dst array + * + * In case of symmetric configuration for a matching src/dst thread (for example + * 0x4400 and 0xc400) only the src configuration can be present. If no dst + * configuration found the code will look for (dst_thread_id & ~0x8000) to find + * the symmetric match. + */ +struct psil_ep_map { + char *name; + struct psil_ep *src; + int src_count; + struct psil_ep *dst; + int dst_count; +}; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id); + +/* SoC PSI-L endpoint maps */ +extern struct psil_ep_map am654_ep_map; +extern struct psil_ep_map j721e_ep_map; + +#endif /* K3_PSIL_PRIV_H_ */ diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c new file mode 100644 index 000000000000..d7b965049ccb --- /dev/null +++ b/drivers/dma/ti/k3-psil.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mutex.h> +#include <linux/of.h> + +#include "k3-psil-priv.h" + +static DEFINE_MUTEX(ep_map_mutex); +static struct psil_ep_map *soc_ep_map; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) +{ + int i; + + mutex_lock(&ep_map_mutex); + if (!soc_ep_map) { + if (of_machine_is_compatible("ti,am654")) { + soc_ep_map = &am654_ep_map; + } else if (of_machine_is_compatible("ti,j721e")) { + soc_ep_map = &j721e_ep_map; + } else { + pr_err("PSIL: No compatible machine found for map\n"); + return ERR_PTR(-ENOTSUPP); + } + pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); + } + mutex_unlock(&ep_map_mutex); + + if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) { + /* check in destination thread map */ + for (i = 0; i < soc_ep_map->dst_count; i++) { + if (soc_ep_map->dst[i].thread_id == thread_id) + return &soc_ep_map->dst[i].ep_config; + } + } + + thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET; + if (soc_ep_map->src) { + for (i = 0; i < soc_ep_map->src_count; i++) { + if (soc_ep_map->src[i].thread_id == thread_id) + return &soc_ep_map->src[i].ep_config; + } + } + + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL_GPL(psil_get_ep_config); + +int psil_set_new_ep_config(struct device *dev, const char *name, + struct psil_endpoint_config *ep_config) +{ + struct psil_endpoint_config *dst_ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int index; + + if (!dev || !dev->of_node) + return -EINVAL; + + index = of_property_match_string(dev->of_node, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells", + index, &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + dst_ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(dst_ep_config)) { + pr_err("PSIL: thread ID 0x%04x not defined in map\n", + thread_id); + of_node_put(dma_spec.np); + return PTR_ERR(dst_ep_config); + } + + memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config)); + + of_node_put(dma_spec.np); + return 0; +} +EXPORT_SYMBOL_GPL(psil_set_new_ep_config); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c new file mode 100644 index 000000000000..c1511298ece2 --- /dev/null +++ b/drivers/dma/ti/k3-udma-glue.c @@ -0,0 +1,1198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * K3 NAVSS DMA glue interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/dma/ti-cppi5.h> +#include <linux/dma/k3-udma-glue.h> + +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct k3_udma_glue_common { + struct device *dev; + struct udma_dev *udmax; + const struct udma_tisci_rm *tisci_rm; + struct k3_ringacc *ringacc; + u32 src_thread; + u32 dst_thread; + + u32 hdesc_size; + bool epib; + u32 psdata_size; + u32 swdata_size; +}; + +struct k3_udma_glue_tx_channel { + struct k3_udma_glue_common common; + + struct udma_tchan *udma_tchanx; + int udma_tchan_id; + + struct k3_ring *ringtx; + struct k3_ring *ringtxcq; + + bool psil_paired; + + int virq; + + atomic_t free_pkts; + bool tx_pause_on_err; + bool tx_filt_einfo; + bool tx_filt_pswords; + bool tx_supr_tdpkt; +}; + +struct k3_udma_glue_rx_flow { + struct udma_rflow *udma_rflow; + int udma_rflow_id; + struct k3_ring *ringrx; + struct k3_ring *ringrxfdq; + + int virq; +}; + +struct k3_udma_glue_rx_channel { + struct k3_udma_glue_common common; + + struct udma_rchan *udma_rchanx; + int udma_rchan_id; + bool remote; + + bool psil_paired; + + u32 swdata_size; + int flow_id_base; + + struct k3_udma_glue_rx_flow *flows; + u32 flow_num; + u32 flows_ready; +}; + +#define K3_UDMAX_TDOWN_TIMEOUT_US 1000 + +static int of_k3_udma_glue_parse(struct device_node *udmax_np, + struct k3_udma_glue_common *common) +{ + common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np, + "ti,ringacc"); + if (IS_ERR(common->ringacc)) + return PTR_ERR(common->ringacc); + + common->udmax = of_xudma_dev_get(udmax_np, NULL); + if (IS_ERR(common->udmax)) + return PTR_ERR(common->udmax); + + common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); + + return 0; +} + +static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, + const char *name, struct k3_udma_glue_common *common, + bool tx_chn) +{ + struct psil_endpoint_config *ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int ret = 0; + int index; + + if (unlikely(!name)) + return -EINVAL; + + index = of_property_match_string(chn_np, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, + &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + /* get psil endpoint config */ + ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(ep_config)) { + dev_err(common->dev, + "No configuration for psi-l thread 0x%04x\n", + thread_id); + ret = PTR_ERR(ep_config); + goto out_put_spec; + } + + common->epib = ep_config->needs_epib; + common->psdata_size = ep_config->psd_size; + + if (tx_chn) + common->dst_thread = thread_id; + else + common->src_thread = thread_id; + + ret = of_k3_udma_glue_parse(dma_spec.np, common); + +out_put_spec: + of_node_put(dma_spec.np); + return ret; +}; + +static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + struct device *dev = tx_chn->common.dev; + + dev_dbg(dev, "dump_tx_chn:\n" + "udma_tchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n", + tx_chn->udma_tchan_id, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); +} + +static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG, + xudma_tchanrt_read(chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG)); +} + +static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.index = tx_chn->udma_tchan_id; + if (tx_chn->tx_pause_on_err) + req.tx_pause_on_err = 1; + if (tx_chn->tx_filt_einfo) + req.tx_filt_einfo = 1; + if (tx_chn->tx_filt_pswords) + req.tx_filt_pswords = 1; + req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + if (tx_chn->tx_supr_tdpkt) + req.tx_supr_tdpkt = 1; + req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; + req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + + return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); +} + +struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, + const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) +{ + struct k3_udma_glue_tx_channel *tx_chn; + int ret; + + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); + if (!tx_chn) + return ERR_PTR(-ENOMEM); + + tx_chn->common.dev = dev; + tx_chn->common.swdata_size = cfg->swdata_size; + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &tx_chn->common, true); + if (ret) + goto err; + + tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, + tx_chn->common.psdata_size, + tx_chn->common.swdata_size); + + /* request and cfg UDMAP TX channel */ + tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1); + if (IS_ERR(tx_chn->udma_tchanx)) { + ret = PTR_ERR(tx_chn->udma_tchanx); + dev_err(dev, "UDMAX tchanx get err %d\n", ret); + goto err; + } + tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); + + atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); + + /* request and cfg rings */ + tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc, + tx_chn->udma_tchan_id, 0); + if (!tx_chn->ringtx) { + ret = -ENODEV; + dev_err(dev, "Failed to get TX ring %u\n", + tx_chn->udma_tchan_id); + goto err; + } + + tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc, + -1, 0); + if (!tx_chn->ringtxcq) { + ret = -ENODEV; + dev_err(dev, "Failed to get TXCQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + /* request and cfg psi-l */ + tx_chn->common.src_thread = + xudma_dev_get_psil_base(tx_chn->common.udmax) + + tx_chn->udma_tchan_id; + + ret = k3_udma_glue_cfg_tx_chn(tx_chn); + if (ret) { + dev_err(dev, "Failed to cfg tchan %d\n", ret); + goto err; + } + + ret = xudma_navss_psil_pair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + tx_chn->psil_paired = true; + + /* reset TX RT registers */ + k3_udma_glue_disable_tx_chn(tx_chn); + + k3_udma_glue_dump_tx_chn(tx_chn); + + return tx_chn; + +err: + k3_udma_glue_release_tx_chn(tx_chn); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); + +void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + if (tx_chn->psil_paired) { + xudma_navss_psil_unpair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + tx_chn->psil_paired = false; + } + + if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) + xudma_tchan_put(tx_chn->common.udmax, + tx_chn->udma_tchanx); + + if (tx_chn->ringtxcq) + k3_ringacc_ring_free(tx_chn->ringtxcq); + + if (tx_chn->ringtx) + k3_ringacc_ring_free(tx_chn->ringtx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); + +int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma) +{ + u32 ringtxcq_id; + + if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) + return -ENOMEM; + + ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); + + return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn); + +int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *desc_dma) +{ + int ret; + + ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); + if (!ret) + atomic_inc(&tx_chn->free_pkts); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); + +int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + u32 txrt_ctl; + + txrt_ctl = UDMA_PEER_RT_EN_ENABLE; + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + txrt_ctl); + + txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + txrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + txrt_ctl); + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn); + +void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0); + + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn); + +void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + bool sync) +{ + int i = 0; + u32 val; + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(tx_chn->common.dev, "TX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn); + +void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma)) +{ + dma_addr_t desc_dma; + int occ_tx, i, ret; + + /* reset TXCQ as it is not input for udma - expected to be empty */ + if (tx_chn->ringtxcq) + k3_ringacc_ring_reset(tx_chn->ringtxcq); + + /* + * TXQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save TXQ occ + * 2) clean up TXQ and call callback .cleanup() for each desc + * 3) reset TXQ in a special way + */ + occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); + dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx); + + for (i = 0; i < occ_tx; i++) { + ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); + if (ret) { + dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); + +u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn) +{ + return tx_chn->common.hdesc_size; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size); + +u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn) +{ + return k3_ringacc_get_ring_id(tx_chn->ringtxcq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); + +int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) +{ + tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); + + return tx_chn->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); + +static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req; + int ret; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + + req.nav_id = tisci_rm->tisci_dev_id; + req.index = rx_chn->udma_rchan_id; + req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; + /* + * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw + * and udmax impl, so just configure it to invalid value. + * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); + */ + req.rxcq_qnum = 0xFFFF; + if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { + /* Default flow + extra ones */ + req.flowid_start = rx_chn->flow_id_base; + req.flowid_cnt = rx_chn->flow_num; + } + req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + + ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); + if (ret) + dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", + rx_chn->udma_rchan_id, ret); + + return ret; +} + +static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + if (IS_ERR_OR_NULL(flow->udma_rflow)) + return; + + if (flow->ringrxfdq) + k3_ringacc_ring_free(flow->ringrxfdq); + + if (flow->ringrx) + k3_ringacc_ring_free(flow->ringrx); + + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + flow->udma_rflow = NULL; + rx_chn->flows_ready--; +} + +static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, + flow->udma_rflow_id); + if (IS_ERR(flow->udma_rflow)) { + ret = PTR_ERR(flow->udma_rflow); + dev_err(dev, "UDMAX rflow get err %d\n", ret); + goto err; + } + + if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + return -ENODEV; + } + + /* request and cfg rings */ + flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxq_id, 0); + if (!flow->ringrx) { + ret = -ENODEV; + dev_err(dev, "Failed to get RX ring\n"); + goto err; + } + + flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxfdq0_id, 0); + if (!flow->ringrxfdq) { + ret = -ENODEV; + dev_err(dev, "Failed to get RXFDQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); + goto err; + } + + if (rx_chn->remote) { + rx_ring_id = TI_SCI_RESOURCE_NULL; + rx_ringfdq_id = TI_SCI_RESOURCE_NULL; + } else { + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + } + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + if (rx_chn->common.epib) + req.rx_einfo_present = 1; + if (rx_chn->common.psdata_size) + req.rx_psinfo_present = 1; + if (flow_cfg->rx_error_handling) + req.rx_error_handling = 1; + req.rx_desc_type = 0; + req.rx_dest_qnum = rx_ring_id; + req.rx_src_tag_hi_sel = 0; + req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; + req.rx_dest_tag_hi_sel = 0; + req.rx_dest_tag_lo_sel = 0; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, + ret); + goto err; + } + + rx_chn->flows_ready++; + dev_dbg(dev, "flow%d config done. ready:%d\n", + flow->udma_rflow_id, rx_chn->flows_ready); + + return 0; +err: + k3_udma_glue_release_rx_flow(rx_chn, flow_idx); + return ret; +} + +static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "dump_rx_chn:\n" + "udma_rchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n" + "epib: %d\n" + "hdesc_size: %u\n" + "psdata_size: %u\n" + "swdata_size: %u\n" + "flow_id_base: %d\n" + "flow_num: %d\n", + chn->udma_rchan_id, + chn->common.src_thread, + chn->common.dst_thread, + chn->common.epib, + chn->common.hdesc_size, + chn->common.psdata_size, + chn->common.swdata_size, + chn->flow_id_base, + chn->flow_num); +} + +static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG, + xudma_rchanrt_read(chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG)); +} + +static int +k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + int ret; + + /* default rflow */ + if (cfg->flow_id_use_rxchan_id) + return 0; + + /* not a GP rflows */ + if (rx_chn->flow_id_base != -1 && + !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + return 0; + + /* Allocate range of GP rflows */ + ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + if (ret < 0) { + dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", + rx_chn->flow_id_base, rx_chn->flow_num, ret); + return ret; + } + rx_chn->flow_id_base = ret; + + return 0; +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0) + return ERR_PTR(-EINVAL); + + if (cfg->flow_id_num != 1 && + (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) + return ERR_PTR(-EINVAL); + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + /* request and cfg UDMAP RX channel */ + rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); + if (IS_ERR(rx_chn->udma_rchanx)) { + ret = PTR_ERR(rx_chn->udma_rchanx); + dev_err(dev, "UDMAX rchanx get err %d\n", ret); + goto err; + } + rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); + + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + + /* Use RX channel id as flow id: target dev can't generate flow_id */ + if (cfg->flow_id_use_rxchan_id) + rx_chn->flow_id_base = rx_chn->udma_rchan_id; + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + /* request and cfg psi-l */ + rx_chn->common.dst_thread = + xudma_dev_get_psil_base(rx_chn->common.udmax) + + rx_chn->udma_rchan_id; + + ret = k3_udma_glue_cfg_rx_chn(rx_chn); + if (ret) { + dev_err(dev, "Failed to cfg rchan %d\n", ret); + goto err; + } + + /* init default RX flow only if flow_num = 1 */ + if (cfg->def_flow_cfg) { + ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); + if (ret) + goto err; + } + + ret = xudma_navss_psil_pair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + rx_chn->psil_paired = true; + + /* reset RX RT registers */ + k3_udma_glue_disable_rx_chn(rx_chn); + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0 || + cfg->flow_id_use_rxchan_id || + cfg->def_flow_cfg || + cfg->flow_id_base < 0) + return ERR_PTR(-EINVAL); + + /* + * Remote RX channel is under control of Remote CPU core, so + * Linux can only request and manipulate by dedicated RX flows + */ + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = true; + rx_chn->udma_rchan_id = -1; + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + rx_chn->psil_paired = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + if (cfg->remote) + return k3_udma_glue_request_remote_rx_chn(dev, name, cfg); + else + return k3_udma_glue_request_rx_chn_priv(dev, name, cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn); + +void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + int i; + + if (IS_ERR_OR_NULL(rx_chn->common.udmax)) + return; + + if (rx_chn->psil_paired) { + xudma_navss_psil_unpair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + rx_chn->psil_paired = false; + } + + for (i = 0; i < rx_chn->flow_num; i++) + k3_udma_glue_release_rx_flow(rx_chn, i); + + if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + xudma_free_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + + if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) + xudma_rchan_put(rx_chn->common.udmax, + rx_chn->udma_rchanx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); + +int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init); + +u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow; + + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + flow = &rx_chn->flows[flow_idx]; + + return k3_ringacc_get_ring_id(flow->ringrxfdq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id); + +u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) +{ + return rx_chn->flow_id_base; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base); + +int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = rx_ring_id; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable); + +int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + memset(&req, 0, sizeof(req)); + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); + +int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + u32 rxrt_ctl; + + if (rx_chn->remote) + return -EINVAL; + + if (rx_chn->flows_ready < rx_chn->flow_num) + return -EINVAL; + + rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + rxrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, + rxrt_ctl); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn); + +void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + 0); + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn); + +void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + bool sync) +{ + int i = 0; + u32 val; + + if (rx_chn->remote) + return; + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(rx_chn->common.dev, "RX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); + +void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + struct device *dev = rx_chn->common.dev; + dma_addr_t desc_dma; + int occ_rx, i, ret; + + /* reset RXCQ as it is not input for udma - expected to be empty */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); + dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); + if (flow->ringrx) + k3_ringacc_ring_reset(flow->ringrx); + + /* Skip RX FDQ in case one FDQ is used for the set of flows */ + if (skip_fdq) + return; + + /* + * RX FDQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save RX FDQ occ + * 2) clean up RX FDQ and call callback .cleanup() for each desc + * 3) reset RX FDQ in a special way + */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); + dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx); + + for (i = 0; i < occ_rx; i++) { + ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); + if (ret) { + dev_err(dev, "RX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); + +int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, struct cppi5_host_desc_t *desc_rx, + dma_addr_t desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn); + +int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, dma_addr_t *desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_pop(flow->ringrx, desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn); + +int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow; + + flow = &rx_chn->flows[flow_num]; + + flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); + + return flow->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c new file mode 100644 index 000000000000..0b8f3dd6b146 --- /dev/null +++ b/drivers/dma/ti/k3-udma-private.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_pair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_pair); + +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_unpair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_unpair); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) +{ + struct device_node *udma_node = np; + struct platform_device *pdev; + struct udma_dev *ud; + + if (property) { + udma_node = of_parse_phandle(np, property, 0); + if (!udma_node) { + pr_err("UDMA node is not found\n"); + return ERR_PTR(-ENODEV); + } + } + + pdev = of_find_device_by_node(udma_node); + if (!pdev) { + pr_debug("UDMA device not found\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + if (np != udma_node) + of_node_put(udma_node); + + ud = platform_get_drvdata(pdev); + if (!ud) { + pr_debug("UDMA has not been probed\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + return ud; +} +EXPORT_SYMBOL(of_xudma_dev_get); + +u32 xudma_dev_get_psil_base(struct udma_dev *ud) +{ + return ud->psil_base; +} +EXPORT_SYMBOL(xudma_dev_get_psil_base); + +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud) +{ + return &ud->tisci_rm; +} +EXPORT_SYMBOL(xudma_dev_get_tisci_rm); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_alloc_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_alloc_gp_rflow_range); + +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_free_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_free_gp_rflow_range); + +bool xudma_rflow_is_gp(struct udma_dev *ud, int id) +{ + return !test_bit(id, ud->rflow_gp_map); +} +EXPORT_SYMBOL(xudma_rflow_is_gp); + +#define XUDMA_GET_PUT_RESOURCE(res) \ +struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \ +{ \ + return __udma_reserve_##res(ud, false, id); \ +} \ +EXPORT_SYMBOL(xudma_##res##_get); \ + \ +void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \ +{ \ + clear_bit(p->id, ud->res##_map); \ +} \ +EXPORT_SYMBOL(xudma_##res##_put) +XUDMA_GET_PUT_RESOURCE(tchan); +XUDMA_GET_PUT_RESOURCE(rchan); + +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id) +{ + return __udma_get_rflow(ud, id); +} +EXPORT_SYMBOL(xudma_rflow_get); + +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p) +{ + __udma_put_rflow(ud, p); +} +EXPORT_SYMBOL(xudma_rflow_put); + +#define XUDMA_GET_RESOURCE_ID(res) \ +int xudma_##res##_get_id(struct udma_##res *p) \ +{ \ + return p->id; \ +} \ +EXPORT_SYMBOL(xudma_##res##_get_id) +XUDMA_GET_RESOURCE_ID(tchan); +XUDMA_GET_RESOURCE_ID(rchan); +XUDMA_GET_RESOURCE_ID(rflow); + +/* Exported register access functions */ +#define XUDMA_RT_IO_FUNCTIONS(res) \ +u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \ +{ \ + return udma_##res##rt_read(p, reg); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_read); \ + \ +void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \ +{ \ + udma_##res##rt_write(p, reg, val); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_write) +XUDMA_RT_IO_FUNCTIONS(tchan); +XUDMA_RT_IO_FUNCTIONS(rchan); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c new file mode 100644 index 000000000000..ea79c2df28e0 --- /dev/null +++ b/drivers/dma/ti/k3-udma.c @@ -0,0 +1,3432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/of_dma.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/dma/ti-cppi5.h> + +#include "../virt-dma.h" +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct udma_static_tr { + u8 elsize; /* RPSTR0 */ + u16 elcnt; /* RPSTR0 */ + u16 bstcnt; /* RPSTR1 */ +}; + +#define K3_UDMA_MAX_RFLOWS 1024 +#define K3_UDMA_DEFAULT_RING_SIZE 16 + +/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ +#define UDMA_RFLOW_SRCTAG_NONE 0 +#define UDMA_RFLOW_SRCTAG_CFG_TAG 1 +#define UDMA_RFLOW_SRCTAG_FLOW_ID 2 +#define UDMA_RFLOW_SRCTAG_SRC_TAG 4 + +#define UDMA_RFLOW_DSTTAG_NONE 0 +#define UDMA_RFLOW_DSTTAG_CFG_TAG 1 +#define UDMA_RFLOW_DSTTAG_FLOW_ID 2 +#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 +#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 + +struct udma_chan; + +enum udma_mmr { + MMR_GCFG = 0, + MMR_RCHANRT, + MMR_TCHANRT, + MMR_LAST, +}; + +static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" }; + +struct udma_tchan { + void __iomem *reg_rt; + + int id; + struct k3_ring *t_ring; /* Transmit ring */ + struct k3_ring *tc_ring; /* Transmit Completion ring */ +}; + +struct udma_rflow { + int id; + struct k3_ring *fd_ring; /* Free Descriptor ring */ + struct k3_ring *r_ring; /* Receive ring */ +}; + +struct udma_rchan { + void __iomem *reg_rt; + + int id; +}; + +#define UDMA_FLAG_PDMA_ACC32 BIT(0) +#define UDMA_FLAG_PDMA_BURST BIT(1) + +struct udma_match_data { + u32 psil_base; + bool enable_memcpy_support; + u32 flags; + u32 statictr_z_mask; + u32 rchan_oes_offset; + + u8 tpl_levels; + u32 level_start_idx[]; +}; + +struct udma_dev { + struct dma_device ddev; + struct device *dev; + void __iomem *mmrs[MMR_LAST]; + const struct udma_match_data *match_data; + + size_t desc_align; /* alignment to use for descriptors */ + + struct udma_tisci_rm tisci_rm; + + struct k3_ringacc *ringacc; + + struct work_struct purge_work; + struct list_head desc_to_purge; + spinlock_t lock; + + int tchan_cnt; + int echan_cnt; + int rchan_cnt; + int rflow_cnt; + unsigned long *tchan_map; + unsigned long *rchan_map; + unsigned long *rflow_gp_map; + unsigned long *rflow_gp_map_allocated; + unsigned long *rflow_in_use; + + struct udma_tchan *tchans; + struct udma_rchan *rchans; + struct udma_rflow *rflows; + + struct udma_chan *channels; + u32 psil_base; +}; + +struct udma_hwdesc { + size_t cppi5_desc_size; + void *cppi5_desc_vaddr; + dma_addr_t cppi5_desc_paddr; + + /* TR descriptor internal pointers */ + void *tr_req_base; + struct cppi5_tr_resp_t *tr_resp_base; +}; + +struct udma_desc { + struct virt_dma_desc vd; + + bool terminated; + + enum dma_transfer_direction dir; + + struct udma_static_tr static_tr; + u32 residue; + + unsigned int sglen; + unsigned int desc_idx; /* Only used for cyclic in packet mode */ + unsigned int tr_idx; + + u32 metadata_size; + void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ + + unsigned int hwdesc_count; + struct udma_hwdesc hwdesc[0]; +}; + +enum udma_chan_state { + UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ + UDMA_CHAN_IS_ACTIVE, /* Normal operation */ + UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ +}; + +struct udma_tx_drain { + struct delayed_work work; + unsigned long jiffie; + u32 residue; +}; + +struct udma_chan_config { + bool pkt_mode; /* TR or packet */ + bool needs_epib; /* EPIB is needed for the communication or not */ + u32 psd_size; /* size of Protocol Specific Data */ + u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ + u32 hdesc_size; /* Size of a packet descriptor in packet mode */ + bool notdpkt; /* Suppress sending TDC packet */ + int remote_thread_id; + u32 src_thread; + u32 dst_thread; + enum psil_endpoint_type ep_type; + bool enable_acc32; + bool enable_burst; + enum udma_tp_level channel_tpl; /* Channel Throughput Level */ + + enum dma_transfer_direction dir; +}; + +struct udma_chan { + struct virt_dma_chan vc; + struct dma_slave_config cfg; + struct udma_dev *ud; + struct udma_desc *desc; + struct udma_desc *terminated_desc; + struct udma_static_tr static_tr; + char *name; + + struct udma_tchan *tchan; + struct udma_rchan *rchan; + struct udma_rflow *rflow; + + bool psil_paired; + + int irq_num_ring; + int irq_num_udma; + + bool cyclic; + bool paused; + + enum udma_chan_state state; + struct completion teardown_completed; + + struct udma_tx_drain tx_drain; + + u32 bcnt; /* number of bytes completed since the start of the channel */ + u32 in_ring_cnt; /* number of descriptors in flight */ + + /* Channel configuration parameters */ + struct udma_chan_config config; + + /* dmapool for packet mode descriptors */ + bool use_dma_pool; + struct dma_pool *hdesc_pool; + + u32 id; +}; + +static inline struct udma_dev *to_udma_dev(struct dma_device *d) +{ + return container_of(d, struct udma_dev, ddev); +} + +static inline struct udma_chan *to_udma_chan(struct dma_chan *c) +{ + return container_of(c, struct udma_chan, vc.chan); +} + +static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct udma_desc, vd.tx); +} + +/* Generic register access functions */ +static inline u32 udma_read(void __iomem *base, int reg) +{ + return readl(base + reg); +} + +static inline void udma_write(void __iomem *base, int reg, u32 val) +{ + writel(val, base + reg); +} + +static inline void udma_update_bits(void __iomem *base, int reg, + u32 mask, u32 val) +{ + u32 tmp, orig; + + orig = readl(base + reg); + tmp = orig & ~mask; + tmp |= (val & mask); + + if (tmp != orig) + writel(tmp, base + reg); +} + +/* TCHANRT */ +static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg) +{ + if (!tchan) + return 0; + return udma_read(tchan->reg_rt, reg); +} + +static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg, + u32 val) +{ + if (!tchan) + return; + udma_write(tchan->reg_rt, reg, val); +} + +static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg, + u32 mask, u32 val) +{ + if (!tchan) + return; + udma_update_bits(tchan->reg_rt, reg, mask, val); +} + +/* RCHANRT */ +static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg) +{ + if (!rchan) + return 0; + return udma_read(rchan->reg_rt, reg); +} + +static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg, + u32 val) +{ + if (!rchan) + return; + udma_write(rchan->reg_rt, reg, val); +} + +static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg, + u32 mask, u32 val) +{ + if (!rchan) + return; + udma_update_bits(rchan->reg_rt, reg, mask, val); +} + +static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static void udma_reset_uchan(struct udma_chan *uc) +{ + memset(&uc->config, 0, sizeof(uc->config)); + uc->config.remote_thread_id = -1; + uc->state = UDMA_CHAN_IS_IDLE; +} + +static void udma_dump_chan_stdata(struct udma_chan *uc) +{ + struct device *dev = uc->ud->dev; + u32 offset; + int i; + + if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "TCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_TCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, + udma_tchanrt_read(uc->tchan, offset)); + } + } + + if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "RCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_RCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, + udma_rchanrt_read(uc->rchan, offset)); + } + } +} + +static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, + int idx) +{ + return d->hwdesc[idx].cppi5_desc_paddr; +} + +static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) +{ + return d->hwdesc[idx].cppi5_desc_vaddr; +} + +static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, + dma_addr_t paddr) +{ + struct udma_desc *d = uc->terminated_desc; + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + + if (!d) { + d = uc->desc; + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + } + + return d; +} + +static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) +{ + if (uc->use_dma_pool) { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_pool_free(uc->hdesc_pool, + d->hwdesc[i].cppi5_desc_vaddr, + d->hwdesc[i].cppi5_desc_paddr); + + d->hwdesc[i].cppi5_desc_vaddr = NULL; + } + } else if (d->hwdesc[0].cppi5_desc_vaddr) { + struct udma_dev *ud = uc->ud; + + dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size, + d->hwdesc[0].cppi5_desc_vaddr, + d->hwdesc[0].cppi5_desc_paddr); + + d->hwdesc[0].cppi5_desc_vaddr = NULL; + } +} + +static void udma_purge_desc_work(struct work_struct *work) +{ + struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); + struct virt_dma_desc *vd, *_vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&ud->lock, flags); + list_splice_tail_init(&ud->desc_to_purge, &head); + spin_unlock_irqrestore(&ud->lock, flags); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + + udma_free_hwdesc(uc, d); + list_del(&vd->node); + kfree(d); + } + + /* If more to purge, schedule the work again */ + if (!list_empty(&ud->desc_to_purge)) + schedule_work(&ud->purge_work); +} + +static void udma_desc_free(struct virt_dma_desc *vd) +{ + struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + unsigned long flags; + + if (uc->terminated_desc == d) + uc->terminated_desc = NULL; + + if (uc->use_dma_pool) { + udma_free_hwdesc(uc, d); + kfree(d); + return; + } + + spin_lock_irqsave(&ud->lock, flags); + list_add_tail(&vd->node, &ud->desc_to_purge); + spin_unlock_irqrestore(&ud->lock, flags); + + schedule_work(&ud->purge_work); +} + +static bool udma_is_chan_running(struct udma_chan *uc) +{ + u32 trt_ctl = 0; + u32 rrt_ctl = 0; + + if (uc->tchan) + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + if (uc->rchan) + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + + if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) + return true; + + return false; +} + +static bool udma_is_chan_paused(struct udma_chan *uc) +{ + u32 val, pause_mask; + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + val = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_DEV: + val = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_MEM: + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + pause_mask = UDMA_CHAN_RT_CTL_PAUSE; + break; + default: + return false; + } + + if (val & pause_mask) + return true; + + return false; +} + +static void udma_sync_for_device(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + if (uc->cyclic && uc->config.pkt_mode) { + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[idx].cppi5_desc_paddr, + d->hwdesc[idx].cppi5_desc_size, + DMA_TO_DEVICE); + } else { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[i].cppi5_desc_paddr, + d->hwdesc[i].cppi5_desc_size, + DMA_TO_DEVICE); + } + } +} + +static int udma_push_to_ring(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + struct k3_ring *ring = NULL; + int ret = -EINVAL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->fd_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->t_ring; + break; + default: + break; + } + + if (ring) { + dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx); + + wmb(); /* Ensure that writes are not moved over this point */ + udma_sync_for_device(uc, idx); + ret = k3_ringacc_ring_push(ring, &desc_addr); + uc->in_ring_cnt++; + } + + return ret; +} + +static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) +{ + struct k3_ring *ring = NULL; + int ret = -ENOENT; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->r_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring && k3_ringacc_ring_get_occ(ring)) { + struct udma_desc *d = NULL; + + ret = k3_ringacc_ring_pop(ring, addr); + if (ret) + return ret; + + /* Teardown completion */ + if (cppi5_desc_is_tdcm(*addr)) + return ret; + + d = udma_udma_desc_from_paddr(uc, *addr); + + if (d) + dma_sync_single_for_cpu(uc->ud->dev, *addr, + d->hwdesc[0].cppi5_desc_size, + DMA_FROM_DEVICE); + rmb(); /* Ensure that reads are not moved before this point */ + + if (!ret) + uc->in_ring_cnt--; + } + + return ret; +} + +static void udma_reset_rings(struct udma_chan *uc) +{ + struct k3_ring *ring1 = NULL; + struct k3_ring *ring2 = NULL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + if (uc->rchan) { + ring1 = uc->rflow->fd_ring; + ring2 = uc->rflow->r_ring; + } + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + if (uc->tchan) { + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + } + break; + default: + break; + } + + if (ring1) + k3_ringacc_ring_reset_dma(ring1, + k3_ringacc_ring_get_occ(ring1)); + if (ring2) + k3_ringacc_ring_reset(ring2); + + /* make sure we are not leaking memory by stalled descriptor */ + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + uc->in_ring_cnt = 0; +} + +static void udma_reset_counters(struct udma_chan *uc) +{ + u32 val; + + if (uc->tchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + } + + if (uc->rchan) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val); + } + + uc->bcnt = 0; +} + +static int udma_reset_chan(struct udma_chan *uc, bool hard) +{ + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + /* Reset all counters */ + udma_reset_counters(uc); + + /* Hard reset: re-initialize the channel to reset */ + if (hard) { + struct udma_chan_config ucc_backup; + int ret; + + memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); + uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); + + /* restore the channel configuration */ + memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); + ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); + if (ret) + return ret; + + /* + * Setting forced teardown after forced reset helps recovering + * the rchan. + */ + if (uc->config.dir == DMA_DEV_TO_MEM) + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN | + UDMA_CHAN_RT_CTL_FTDOWN); + } + uc->state = UDMA_CHAN_IS_IDLE; + + return 0; +} + +static void udma_start_desc(struct udma_chan *uc) +{ + struct udma_chan_config *ucc = &uc->config; + + if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { + int i; + + /* Push all descriptors to ring for packet mode cyclic or RX */ + for (i = 0; i < uc->desc->sglen; i++) + udma_push_to_ring(uc, i); + } else { + udma_push_to_ring(uc, 0); + } +} + +static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) +{ + /* Only PDMAs have staticTR */ + if (uc->config.ep_type == PSIL_EP_NATIVE) + return false; + + /* Check if the staticTR configuration has changed for TX */ + if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) + return true; + + return false; +} + +static int udma_start(struct udma_chan *uc) +{ + struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); + + if (!vd) { + uc->desc = NULL; + return -ENOENT; + } + + list_del(&vd->node); + + uc->desc = to_udma_desc(&vd->tx); + + /* Channel is already running and does not need reconfiguration */ + if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { + udma_start_desc(uc); + goto out; + } + + /* Make sure that we clear the teardown bit, if it is set */ + udma_reset_chan(uc, false); + + /* Push descriptors before we start the channel */ + udma_start_desc(uc); + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + const struct udma_match_data *match_data = + uc->ud->match_data; + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG, + PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, + match_data->statictr_z_mask)); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + /* Enable remote */ + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + break; + case DMA_MEM_TO_DEV: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_tchanrt_write(uc->tchan, + UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + /* Enable remote */ + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + default: + return -EINVAL; + } + + uc->state = UDMA_CHAN_IS_ACTIVE; +out: + + return 0; +} + +static int udma_stop(struct udma_chan *uc) +{ + enum udma_chan_state old_state = uc->state; + + uc->state = UDMA_CHAN_IS_TERMINATING; + reinit_completion(&uc->teardown_completed); + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_TEARDOWN); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_FLUSH); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + default: + uc->state = old_state; + complete_all(&uc->teardown_completed); + return -EINVAL; + } + + return 0; +} + +static void udma_cyclic_packet_elapsed(struct udma_chan *uc) +{ + struct udma_desc *d = uc->desc; + struct cppi5_host_desc_t *h_desc; + + h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; + cppi5_hdesc_reset_to_original(h_desc); + udma_push_to_ring(uc, d->desc_idx); + d->desc_idx = (d->desc_idx + 1) % d->sglen; +} + +static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) +{ + struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + memcpy(d->metadata, h_desc->epib, d->metadata_size); +} + +static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) +{ + u32 peer_bcnt, bcnt; + + /* Only TX towards PDMA is affected */ + if (uc->config.ep_type == PSIL_EP_NATIVE || + uc->config.dir != DMA_MEM_TO_DEV) + return true; + + peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + + if (peer_bcnt < bcnt) { + uc->tx_drain.residue = bcnt - peer_bcnt; + uc->tx_drain.jiffie = jiffies; + return false; + } + + return true; +} + +static void udma_check_tx_completion(struct work_struct *work) +{ + struct udma_chan *uc = container_of(work, typeof(*uc), + tx_drain.work.work); + bool desc_done = true; + u32 residue_diff; + unsigned long jiffie_diff, delay; + + if (uc->desc) { + residue_diff = uc->tx_drain.residue; + jiffie_diff = uc->tx_drain.jiffie; + desc_done = udma_is_desc_really_done(uc, uc->desc); + } + + if (!desc_done) { + jiffie_diff = uc->tx_drain.jiffie - jiffie_diff; + residue_diff -= uc->tx_drain.residue; + if (residue_diff) { + /* Try to guess when we should check next time */ + residue_diff /= jiffie_diff; + delay = uc->tx_drain.residue / residue_diff / 3; + if (jiffies_to_msecs(delay) < 5) + delay = 0; + } else { + /* No progress, check again in 1 second */ + delay = HZ; + } + + schedule_delayed_work(&uc->tx_drain.work, delay); + } else if (uc->desc) { + struct udma_desc *d = uc->desc; + + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } +} + +static irqreturn_t udma_ring_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + dma_addr_t paddr = 0; + + if (udma_pop_from_ring(uc, &paddr) || !paddr) + return IRQ_HANDLED; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* Teardown completion message */ + if (cppi5_desc_is_tdcm(paddr)) { + /* Compensate our internal pop/push counter */ + uc->in_ring_cnt++; + + complete_all(&uc->teardown_completed); + + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + if (!uc->desc) + udma_start(uc); + + goto out; + } + + d = udma_udma_desc_from_paddr(uc, paddr); + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + if (desc_paddr != paddr) { + dev_err(uc->ud->dev, "not matching descriptors!\n"); + goto out; + } + + if (uc->cyclic) { + /* push the descriptor back to the ring */ + if (d == uc->desc) { + udma_cyclic_packet_elapsed(uc); + vchan_cyclic_callback(&d->vd); + } + } else { + bool desc_done = false; + + if (d == uc->desc) { + desc_done = udma_is_desc_really_done(uc, d); + + if (desc_done) { + uc->bcnt += d->residue; + udma_start(uc); + } else { + schedule_delayed_work(&uc->tx_drain.work, + 0); + } + } + + if (desc_done) + vchan_cookie_complete(&d->vd); + } + } +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t udma_udma_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + d = uc->desc; + if (d) { + d->tr_idx = (d->tr_idx + 1) % d->sglen; + + if (uc->cyclic) { + vchan_cyclic_callback(&d->vd); + } else { + /* TODO: figure out the real amount of data */ + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +/** + * __udma_alloc_gp_rflow_range - alloc range of GP RX flows + * @ud: UDMA device + * @from: Start the search from this flow id number + * @cnt: Number of consecutive flow ids to allocate + * + * Allocate range of RX flow ids for future use, those flows can be requested + * only using explicit flow id number. if @from is set to -1 it will try to find + * first free range. if @from is positive value it will force allocation only + * of the specified range of flows. + * + * Returns -ENOMEM if can't find free range. + * -EEXIST if requested range is busy. + * -EINVAL if wrong input values passed. + * Returns flow id on success. + */ +static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + int start, tmp_from; + DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); + + tmp_from = from; + if (tmp_from < 0) + tmp_from = ud->rchan_cnt; + /* default flows can't be allocated and accessible only by id */ + if (tmp_from < ud->rchan_cnt) + return -EINVAL; + + if (tmp_from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, + ud->rflow_cnt); + + start = bitmap_find_next_zero_area(tmp, + ud->rflow_cnt, + tmp_from, cnt, 0); + if (start >= ud->rflow_cnt) + return -ENOMEM; + + if (from >= 0 && start != from) + return -EEXIST; + + bitmap_set(ud->rflow_gp_map_allocated, start, cnt); + return start; +} + +static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + if (from < ud->rchan_cnt) + return -EINVAL; + if (from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); + return 0; +} + +static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) +{ + /* + * Attempt to request rflow by ID can be made for any rflow + * if not in use with assumption that caller knows what's doing. + * TI-SCI FW will perform additional permission check ant way, it's + * safe + */ + + if (id < 0 || id >= ud->rflow_cnt) + return ERR_PTR(-ENOENT); + + if (test_bit(id, ud->rflow_in_use)) + return ERR_PTR(-ENOENT); + + /* GP rflow has to be allocated first */ + if (!test_bit(id, ud->rflow_gp_map) && + !test_bit(id, ud->rflow_gp_map_allocated)) + return ERR_PTR(-EINVAL); + + dev_dbg(ud->dev, "get rflow%d\n", id); + set_bit(id, ud->rflow_in_use); + return &ud->rflows[id]; +} + +static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) +{ + if (!test_bit(rflow->id, ud->rflow_in_use)) { + dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); + return; + } + + dev_dbg(ud->dev, "put rflow%d\n", rflow->id); + clear_bit(rflow->id, ud->rflow_in_use); +} + +#define UDMA_RESERVE_RESOURCE(res) \ +static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ + enum udma_tp_level tpl, \ + int id) \ +{ \ + if (id >= 0) { \ + if (test_bit(id, ud->res##_map)) { \ + dev_err(ud->dev, "res##%d is in use\n", id); \ + return ERR_PTR(-ENOENT); \ + } \ + } else { \ + int start; \ + \ + if (tpl >= ud->match_data->tpl_levels) \ + tpl = ud->match_data->tpl_levels - 1; \ + \ + start = ud->match_data->level_start_idx[tpl]; \ + \ + id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ + start); \ + if (id == ud->res##_cnt) { \ + return ERR_PTR(-ENOENT); \ + } \ + } \ + \ + set_bit(id, ud->res##_map); \ + return &ud->res##s[id]; \ +} + +UDMA_RESERVE_RESOURCE(tchan); +UDMA_RESERVE_RESOURCE(rchan); + +static int udma_get_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->tchan)) + return PTR_ERR(uc->tchan); + + return 0; +} + +static int udma_get_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return 0; + } + + uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->rchan)) + return PTR_ERR(uc->rchan); + + return 0; +} + +static int udma_get_chan_pair(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + const struct udma_match_data *match_data = ud->match_data; + int chan_id, end; + + if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { + dev_info(ud->dev, "chan%d: already have %d pair allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + if (uc->tchan) { + dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return -EBUSY; + } else if (uc->rchan) { + dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return -EBUSY; + } + + /* Can be optimized, but let's have it like this for now */ + end = min(ud->tchan_cnt, ud->rchan_cnt); + /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */ + chan_id = match_data->level_start_idx[match_data->tpl_levels - 1]; + for (; chan_id < end; chan_id++) { + if (!test_bit(chan_id, ud->tchan_map) && + !test_bit(chan_id, ud->rchan_map)) + break; + } + + if (chan_id == end) + return -ENOENT; + + set_bit(chan_id, ud->tchan_map); + set_bit(chan_id, ud->rchan_map); + uc->tchan = &ud->tchans[chan_id]; + uc->rchan = &ud->rchans[chan_id]; + + return 0; +} + +static int udma_get_rflow(struct udma_chan *uc, int flow_id) +{ + struct udma_dev *ud = uc->ud; + + if (!uc->rchan) { + dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); + return -EINVAL; + } + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", + uc->id, uc->rflow->id); + return 0; + } + + uc->rflow = __udma_get_rflow(ud, flow_id); + if (IS_ERR(uc->rflow)) + return PTR_ERR(uc->rflow); + + return 0; +} + +static void udma_put_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, + uc->rchan->id); + clear_bit(uc->rchan->id, ud->rchan_map); + uc->rchan = NULL; + } +} + +static void udma_put_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, + uc->tchan->id); + clear_bit(uc->tchan->id, ud->tchan_map); + uc->tchan = NULL; + } +} + +static void udma_put_rflow(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, + uc->rflow->id); + __udma_put_rflow(ud, uc->rflow); + uc->rflow = NULL; + } +} + +static void udma_free_tx_resources(struct udma_chan *uc) +{ + if (!uc->tchan) + return; + + k3_ringacc_ring_free(uc->tchan->t_ring); + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->t_ring = NULL; + uc->tchan->tc_ring = NULL; + + udma_put_tchan(uc); +} + +static int udma_alloc_tx_resources(struct udma_chan *uc) +{ + struct k3_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int ret; + + ret = udma_get_tchan(uc); + if (ret) + return ret; + + uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc, + uc->tchan->id, 0); + if (!uc->tchan->t_ring) { + ret = -EBUSY; + goto err_tx_ring; + } + + uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!uc->tchan->tc_ring) { + ret = -EBUSY; + goto err_txc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); + ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->tc_ring = NULL; +err_txc_ring: + k3_ringacc_ring_free(uc->tchan->t_ring); + uc->tchan->t_ring = NULL; +err_tx_ring: + udma_put_tchan(uc); + + return ret; +} + +static void udma_free_rx_resources(struct udma_chan *uc) +{ + if (!uc->rchan) + return; + + if (uc->rflow) { + struct udma_rflow *rflow = uc->rflow; + + k3_ringacc_ring_free(rflow->fd_ring); + k3_ringacc_ring_free(rflow->r_ring); + rflow->fd_ring = NULL; + rflow->r_ring = NULL; + + udma_put_rflow(uc); + } + + udma_put_rchan(uc); +} + +static int udma_alloc_rx_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct k3_ring_cfg ring_cfg; + struct udma_rflow *rflow; + int fd_ring_id; + int ret; + + ret = udma_get_rchan(uc); + if (ret) + return ret; + + /* For MEM_TO_MEM we don't need rflow or rings */ + if (uc->config.dir == DMA_MEM_TO_MEM) + return 0; + + ret = udma_get_rflow(uc, uc->rchan->id); + if (ret) { + ret = -EBUSY; + goto err_rflow; + } + + rflow = uc->rflow; + fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; + rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0); + if (!rflow->fd_ring) { + ret = -EBUSY; + goto err_rx_ring; + } + + rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!rflow->r_ring) { + ret = -EBUSY; + goto err_rxc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + + if (uc->config.pkt_mode) + ring_cfg.size = SG_MAX_SEGMENTS; + else + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(rflow->r_ring); + rflow->r_ring = NULL; +err_rxc_ring: + k3_ringacc_ring_free(rflow->fd_ring); + rflow->fd_ring = NULL; +err_rx_ring: + udma_put_rflow(uc); +err_rflow: + udma_put_rchan(uc); + + return ret; +} + +#define TISCI_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID) + +#define TISCI_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID) + +static int udma_tisci_m2m_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + struct udma_rchan *rchan = uc->rchan; + int ret = 0; + + /* Non synchronized - mem to mem type of transfer */ + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) { + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + return ret; + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_rx.rxcq_qnum = tc_ring; + req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) + dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); + + return ret; +} + +static int udma_tisci_tx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = mode; + req_tx.tx_supr_tdpkt = uc->config.notdpkt; + req_tx.tx_fetch_size = fetch_size >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + + return ret; +} + +static int udma_tisci_rx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_rchan *rchan = uc->rchan; + int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); + int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = fetch_size >> 2; + req_rx.rxcq_qnum = rx_ring; + req_rx.rx_chan_type = mode; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) { + dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); + return ret; + } + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + + flow_req.nav_id = tisci_rm->tisci_dev_id; + flow_req.flow_index = rchan->id; + + if (uc->config.needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + if (uc->config.psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + flow_req.rx_error_handling = 1; + flow_req.rx_dest_qnum = rx_ring; + flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; + flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; + flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; + flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; + flow_req.rx_fdq0_sz0_qnum = fd_ring; + flow_req.rx_fdq1_qnum = fd_ring; + flow_req.rx_fdq2_qnum = fd_ring; + flow_req.rx_fdq3_qnum = fd_ring; + + ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); + + if (ret) + dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); + + return 0; +} + +static int udma_alloc_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + const struct udma_match_data *match_data = ud->match_data; + struct k3_ring *irq_ring; + u32 irq_udma_idx; + int ret; + + if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { + uc->use_dma_pool = true; + /* in case of MEM_TO_MEM we have maximum of two TRs */ + if (uc->config.dir == DMA_MEM_TO_MEM) { + uc->config.hdesc_size = cppi5_trdesc_calc_size( + sizeof(struct cppi5_tr_type15_t), 2); + uc->config.pkt_mode = false; + } + } + + if (uc->use_dma_pool) { + uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, + uc->config.hdesc_size, + ud->desc_align, + 0); + if (!uc->hdesc_pool) { + dev_err(ud->ddev.dev, + "Descriptor pool allocation failed\n"); + uc->use_dma_pool = false; + return -ENOMEM; + } + } + + /* + * Make sure that the completion is in a known state: + * No teardown, the channel is idle + */ + reinit_completion(&uc->teardown_completed); + complete_all(&uc->teardown_completed); + uc->state = UDMA_CHAN_IS_IDLE; + + switch (uc->config.dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, + uc->id); + + ret = udma_get_chan_pair(uc); + if (ret) + return ret; + + ret = udma_alloc_tx_resources(uc); + if (ret) + return ret; + + ret = udma_alloc_rx_resources(uc); + if (ret) { + udma_free_tx_resources(uc); + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_m2m_channel_config(uc); + break; + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, + uc->id); + + ret = udma_alloc_tx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = uc->config.remote_thread_id; + uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_tx_channel_config(uc); + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, + uc->id); + + ret = udma_alloc_rx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = uc->config.remote_thread_id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->rflow->r_ring; + irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id; + + ret = udma_tisci_rx_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + /* PSI-L pairing */ + ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); + if (ret) { + dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", + uc->config.src_thread, uc->config.dst_thread); + goto err_res_free; + } + + uc->psil_paired = true; + + uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); + if (uc->irq_num_ring <= 0) { + dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", + k3_ringacc_get_ring_id(irq_ring)); + ret = -EINVAL; + goto err_psi_free; + } + + ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, + IRQF_TRIGGER_HIGH, uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); + goto err_irq_free; + } + + /* Event from UDMA (TR events) only needed for slave TR mode channels */ + if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { + uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, + irq_udma_idx); + if (uc->irq_num_udma <= 0) { + dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", + irq_udma_idx); + free_irq(uc->irq_num_ring, uc); + ret = -EINVAL; + goto err_irq_free; + } + + ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, + uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: UDMA irq request failed\n", + uc->id); + free_irq(uc->irq_num_ring, uc); + goto err_irq_free; + } + } else { + uc->irq_num_udma = 0; + } + + udma_reset_rings(uc); + + INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, + udma_check_tx_completion); + return 0; + +err_irq_free: + uc->irq_num_ring = 0; + uc->irq_num_udma = 0; +err_psi_free: + navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); + uc->psil_paired = false; +err_res_free: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } + + return ret; +} + +static int udma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct udma_chan *uc = to_udma_chan(chan); + + memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); + + return 0; +} + +static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, + size_t tr_size, int tr_count, + enum dma_transfer_direction dir) +{ + struct udma_hwdesc *hwdesc; + struct cppi5_desc_hdr_t *tr_desc; + struct udma_desc *d; + u32 reload_count = 0; + u32 ring_id; + + switch (tr_size) { + case 16: + case 32: + case 64: + case 128: + break; + default: + dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); + return NULL; + } + + /* We have only one descriptor containing multiple TRs */ + d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = tr_count; + + d->hwdesc_count = 1; + hwdesc = &d->hwdesc[0]; + + /* Allocate memory for DMA ring descriptor */ + if (uc->use_dma_pool) { + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + } else { + hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, + tr_count); + hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, + uc->ud->desc_align); + hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, + hwdesc->cppi5_desc_size, + &hwdesc->cppi5_desc_paddr, + GFP_NOWAIT); + } + + if (!hwdesc->cppi5_desc_vaddr) { + kfree(d); + return NULL; + } + + /* Start of the TR req records */ + hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; + /* Start address of the TR response array */ + hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; + + tr_desc = hwdesc->cppi5_desc_vaddr; + + if (uc->cyclic) + reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); + cppi5_desc_set_pktids(tr_desc, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); + + return d; +} + +static struct udma_desc * +udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req = NULL; + unsigned int i; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, sglen, dir); + if (!d) + return NULL; + + d->sglen = sglen; + + tr_req = d->hwdesc[0].tr_req_base; + for_each_sg(sgl, sgent, sglen, i) { + d->residue += sg_dma_len(sgent); + + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[i].addr = sg_dma_address(sgent); + tr_req[i].icnt0 = burst * dev_width; + tr_req[i].dim1 = burst * dev_width; + tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0; + } + + cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP); + + return d; +} + +static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, + enum dma_slave_buswidth dev_width, + u16 elcnt) +{ + if (uc->config.ep_type != PSIL_EP_PDMA_XY) + return 0; + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + d->static_tr.elsize = 0; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + d->static_tr.elsize = 1; + break; + case DMA_SLAVE_BUSWIDTH_3_BYTES: + d->static_tr.elsize = 2; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + d->static_tr.elsize = 3; + break; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + d->static_tr.elsize = 4; + break; + default: /* not reached */ + return -EINVAL; + } + + d->static_tr.elcnt = elcnt; + + /* + * PDMA must to close the packet when the channel is in packet mode. + * For TR mode when the channel is not cyclic we also need PDMA to close + * the packet otherwise the transfer will stall because PDMA holds on + * the data it has received from the peripheral. + */ + if (uc->config.pkt_mode || !uc->cyclic) { + unsigned int div = dev_width * elcnt; + + if (uc->cyclic) + d->static_tr.bstcnt = d->residue / d->sglen / div; + else + d->static_tr.bstcnt = d->residue / div; + + if (uc->config.dir == DMA_DEV_TO_MEM && + d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) + return -EINVAL; + } else { + d->static_tr.bstcnt = 0; + } + + return 0; +} + +static struct udma_desc * +udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct scatterlist *sgent; + struct cppi5_host_desc_t *h_desc = NULL; + struct udma_desc *d; + u32 ring_id; + unsigned int i; + + d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = sglen; + d->hwdesc_count = sglen; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for_each_sg(sgl, sgent, sglen, i) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t sg_addr = sg_dma_address(sgent); + struct cppi5_host_desc_t *desc; + size_t sg_len = sg_dma_len(sgent); + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + d->residue += sg_len; + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + desc = hwdesc->cppi5_desc_vaddr; + + if (i == 0) { + cppi5_hdesc_init(desc, 0, 0); + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); + } else { + cppi5_hdesc_reset_hbdesc(desc); + cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); + } + + /* attach the sg buffer to the descriptor */ + cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); + + /* Attach link as host buffer descriptor */ + if (h_desc) + cppi5_hdesc_link_hbdesc(h_desc, + hwdesc->cppi5_desc_paddr); + + if (dir == DMA_MEM_TO_DEV) + h_desc = desc; + } + + if (d->residue >= SZ_4M) { + dev_err(uc->ud->dev, + "%s: Transfer size %u is over the supported 4M range\n", + __func__, d->residue); + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + cppi5_hdesc_set_pktlen(h_desc, d->residue); + + return d; +} + +static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (!data || len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + if (d->dir == DMA_MEM_TO_DEV) + memcpy(h_desc->epib, data, len); + + if (uc->config.needs_epib) + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + + d->metadata = data; + d->metadata_size = len; + if (uc->config.needs_epib) + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return ERR_PTR(-ENOTSUPP); + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + *max_len = uc->config.metadata_size; + + *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? + CPPI5_INFO0_HDESC_EPIB_SIZE : 0; + *payload_len += cppi5_hdesc_get_psdata_size(h_desc); + + return h_desc->epib; +} + +static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = payload_len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (payload_len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + if (uc->config.needs_epib) { + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + } + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static struct dma_descriptor_metadata_ops metadata_ops = { + .attach = udma_attach_metadata, + .get_ptr = udma_get_metadata_ptr, + .set_len = udma_set_metadata_len, +}; + +static struct dma_async_tx_descriptor * +udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, + context); + else + d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, + context); + + if (!d) + return NULL; + + d->dir = dir; + d->desc_idx = 0; + d->tr_idx = 0; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static struct udma_desc * +udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req; + unsigned int i; + unsigned int periods = buf_len / period_len; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, periods, dir); + if (!d) + return NULL; + + tr_req = d->hwdesc[0].tr_req_base; + for (i = 0; i < periods; i++) { + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[i].addr = buf_addr + period_len * i; + tr_req[i].icnt0 = dev_width; + tr_req[i].icnt1 = period_len / dev_width; + tr_req[i].dim1 = dev_width; + + if (!(flags & DMA_PREP_INTERRUPT)) + cppi5_tr_csf_set(&tr_req[i].flags, + CPPI5_TR_CSF_SUPR_EVT); + } + + return d; +} + +static struct udma_desc * +udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct udma_desc *d; + u32 ring_id; + int i; + int periods = buf_len / period_len; + + if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) + return NULL; + + if (period_len >= SZ_4M) + return NULL; + + d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->hwdesc_count = periods; + + /* TODO: re-check this... */ + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for (i = 0; i < periods; i++) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t period_addr = buf_addr + (period_len * i); + struct cppi5_host_desc_t *h_desc; + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + h_desc = hwdesc->cppi5_desc_vaddr; + + cppi5_hdesc_init(h_desc, 0, 0); + cppi5_hdesc_set_pktlen(h_desc, period_len); + + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&h_desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); + + /* attach each period to a new descriptor */ + cppi5_hdesc_attach_buf(h_desc, + period_addr, period_len, + period_addr, period_len); + } + + return d; +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + uc->cyclic = true; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, + dir, flags); + else + d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, + dir, flags); + + if (!d) + return NULL; + + d->sglen = buf_len / period_len; + + d->dir = dir; + d->residue = buf_len; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, flags); +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_desc *d; + struct cppi5_tr_type15_t *tr_req; + int num_tr; + size_t tr_size = sizeof(struct cppi5_tr_type15_t); + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + + if (uc->config.dir != DMA_MEM_TO_MEM) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(DMA_MEM_TO_MEM)); + return NULL; + } + + if (len < SZ_64K) { + num_tr = 1; + tr0_cnt0 = len; + tr0_cnt1 = 1; + } else { + unsigned long align_to = __ffs(src | dest); + + if (align_to > 3) + align_to = 3; + /* + * Keep simple: tr0: SZ_64K-alignment blocks, + * tr1: the remaining + */ + num_tr = 2; + tr0_cnt0 = (SZ_64K - BIT(align_to)); + if (len / tr0_cnt0 >= SZ_64K) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; + } + + tr0_cnt1 = len / tr0_cnt0; + tr1_cnt0 = len % tr0_cnt0; + } + + d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); + if (!d) + return NULL; + + d->dir = DMA_MEM_TO_MEM; + d->desc_idx = 0; + d->tr_idx = 0; + d->residue = len; + + tr_req = d->hwdesc[0].tr_req_base; + + cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[0].addr = src; + tr_req[0].icnt0 = tr0_cnt0; + tr_req[0].icnt1 = tr0_cnt1; + tr_req[0].icnt2 = 1; + tr_req[0].icnt3 = 1; + tr_req[0].dim1 = tr0_cnt0; + + tr_req[0].daddr = dest; + tr_req[0].dicnt0 = tr0_cnt0; + tr_req[0].dicnt1 = tr0_cnt1; + tr_req[0].dicnt2 = 1; + tr_req[0].dicnt3 = 1; + tr_req[0].ddim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; + tr_req[1].icnt0 = tr1_cnt0; + tr_req[1].icnt1 = 1; + tr_req[1].icnt2 = 1; + tr_req[1].icnt3 = 1; + + tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; + tr_req[1].dicnt0 = tr1_cnt0; + tr_req[1].dicnt1 = 1; + tr_req[1].dicnt2 = 1; + tr_req[1].dicnt3 = 1; + } + + cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static void udma_issue_pending(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* If we have something pending and no active descriptor, then */ + if (vchan_issue_pending(&uc->vc) && !uc->desc) { + /* + * start a descriptor if the channel is NOT [marked as + * terminating _and_ it is still running (teardown has not + * completed yet)]. + */ + if (!(uc->state == UDMA_CHAN_IS_TERMINATING && + udma_is_chan_running(uc))) + udma_start(uc); + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); +} + +static enum dma_status udma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_status ret; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + ret = dma_cookie_status(chan, cookie, txstate); + + if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) + ret = DMA_PAUSED; + + if (ret == DMA_COMPLETE || !txstate) + goto out; + + if (uc->desc && uc->desc->vd.tx.cookie == cookie) { + u32 peer_bcnt = 0; + u32 bcnt = 0; + u32 residue = uc->desc->residue; + u32 delay = 0; + + if (uc->desc->dir == DMA_MEM_TO_DEV) { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_SBCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_BCNT_REG); + + if (bcnt > peer_bcnt) + delay = bcnt - peer_bcnt; + } + } else if (uc->desc->dir == DMA_DEV_TO_MEM) { + bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_BCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_BCNT_REG); + + if (peer_bcnt > bcnt) + delay = peer_bcnt - bcnt; + } + } else { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_BCNT_REG); + } + + bcnt -= uc->bcnt; + if (bcnt && !(bcnt % uc->desc->residue)) + residue = 0; + else + residue -= bcnt % uc->desc->residue; + + if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { + ret = DMA_COMPLETE; + delay = 0; + } + + dma_set_residue(txstate, residue); + dma_set_in_flight_bytes(txstate, delay); + + } else { + ret = DMA_COMPLETE; + } + +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + return ret; +} + +static int udma_pause(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* pause the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, + UDMA_CHAN_RT_CTL_PAUSE); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_resume(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* resume the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_terminate_all(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&uc->vc.lock, flags); + + if (udma_is_chan_running(uc)) + udma_stop(uc); + + if (uc->desc) { + uc->terminated_desc = uc->desc; + uc->desc = NULL; + uc->terminated_desc->terminated = true; + cancel_delayed_work(&uc->tx_drain.work); + } + + uc->paused = false; + + vchan_get_all_descriptors(&uc->vc, &head); + spin_unlock_irqrestore(&uc->vc.lock, flags); + vchan_dma_desc_free_list(&uc->vc, &head); + + return 0; +} + +static void udma_synchronize(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long timeout = msecs_to_jiffies(1000); + + vchan_synchronize(&uc->vc); + + if (uc->state == UDMA_CHAN_IS_TERMINATING) { + timeout = wait_for_completion_timeout(&uc->teardown_completed, + timeout); + if (!timeout) { + dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", + uc->id); + udma_dump_chan_stdata(uc); + udma_reset_chan(uc, true); + } + } + + udma_reset_chan(uc, false); + if (udma_is_chan_running(uc)) + dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); + + cancel_delayed_work_sync(&uc->tx_drain.work); + udma_reset_rings(uc); +} + +static void udma_desc_pre_callback(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, + struct dmaengine_result *result) +{ + struct udma_chan *uc = to_udma_chan(&vc->chan); + struct udma_desc *d; + + if (!vd) + return; + + d = to_udma_desc(&vd->tx); + + if (d->metadata_size) + udma_fetch_epib(uc, d); + + /* Provide residue information for the client */ + if (result) { + void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); + + if (cppi5_desc_get_type(desc_vaddr) == + CPPI5_INFO0_DESC_TYPE_VAL_HOST) { + result->residue = d->residue - + cppi5_hdesc_get_pktlen(desc_vaddr); + if (result->residue) + result->result = DMA_TRANS_ABORTED; + else + result->result = DMA_TRANS_NOERROR; + } else { + result->residue = 0; + result->result = DMA_TRANS_NOERROR; + } + } +} + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void udma_vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd, *_vd; + struct dmaengine_desc_callback cb; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + dmaengine_desc_get_callback(&vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); + } + spin_unlock_irq(&vc->lock); + + udma_desc_pre_callback(vc, vd, NULL); + dmaengine_desc_callback_invoke(&cb, NULL); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct dmaengine_result result; + + dmaengine_desc_get_callback(&vd->tx, &cb); + + list_del(&vd->node); + + udma_desc_pre_callback(vc, vd, &result); + dmaengine_desc_callback_invoke(&cb, &result); + + vchan_vdesc_fini(vd); + } +} + +static void udma_free_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + + udma_terminate_all(chan); + if (uc->terminated_desc) { + udma_reset_chan(uc, false); + udma_reset_rings(uc); + } + + cancel_delayed_work_sync(&uc->tx_drain.work); + destroy_delayed_work_on_stack(&uc->tx_drain.work); + + if (uc->irq_num_ring > 0) { + free_irq(uc->irq_num_ring, uc); + + uc->irq_num_ring = 0; + } + if (uc->irq_num_udma > 0) { + free_irq(uc->irq_num_udma, uc); + + uc->irq_num_udma = 0; + } + + /* Release PSI-L pairing */ + if (uc->psil_paired) { + navss_psil_unpair(ud, uc->config.src_thread, + uc->config.dst_thread); + uc->psil_paired = false; + } + + vchan_free_chan_resources(&uc->vc); + tasklet_kill(&uc->vc.task); + + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } +} + +static struct platform_driver udma_driver; + +static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) +{ + struct udma_chan_config *ucc; + struct psil_endpoint_config *ep_config; + struct udma_chan *uc; + struct udma_dev *ud; + u32 *args; + + if (chan->device->dev->driver != &udma_driver.driver) + return false; + + uc = to_udma_chan(chan); + ucc = &uc->config; + ud = uc->ud; + args = param; + + ucc->remote_thread_id = args[0]; + + if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) + ucc->dir = DMA_MEM_TO_DEV; + else + ucc->dir = DMA_DEV_TO_MEM; + + ep_config = psil_get_ep_config(ucc->remote_thread_id); + if (IS_ERR(ep_config)) { + dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", + ucc->remote_thread_id); + ucc->dir = DMA_MEM_TO_MEM; + ucc->remote_thread_id = -1; + return false; + } + + ucc->pkt_mode = ep_config->pkt_mode; + ucc->channel_tpl = ep_config->channel_tpl; + ucc->notdpkt = ep_config->notdpkt; + ucc->ep_type = ep_config->ep_type; + + if (ucc->ep_type != PSIL_EP_NATIVE) { + const struct udma_match_data *match_data = ud->match_data; + + if (match_data->flags & UDMA_FLAG_PDMA_ACC32) + ucc->enable_acc32 = ep_config->pdma_acc32; + if (match_data->flags & UDMA_FLAG_PDMA_BURST) + ucc->enable_burst = ep_config->pdma_burst; + } + + ucc->needs_epib = ep_config->needs_epib; + ucc->psd_size = ep_config->psd_size; + ucc->metadata_size = + (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + + ucc->psd_size; + + if (ucc->pkt_mode) + ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + + ucc->metadata_size, ud->desc_align); + + dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, + ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); + + return true; +} + +static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct udma_dev *ud = ofdma->of_dma_data; + dma_cap_mask_t mask = ud->ddev.cap_mask; + struct dma_chan *chan; + + if (dma_spec->args_count != 1) + return NULL; + + chan = __dma_request_channel(&mask, udma_dma_filter_fn, + &dma_spec->args[0], ofdma->of_node); + if (!chan) { + dev_err(ud->dev, "get channel fail in %s.\n", __func__); + return ERR_PTR(-EINVAL); + } + + return chan; +} + +static struct udma_match_data am654_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 8, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data am654_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = true, /* TEST: DMA domains */ + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 3, + .level_start_idx = { + [0] = 16, /* Normal channels */ + [1] = 4, /* High Throughput channels */ + [2] = 0, /* Ultra High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static const struct of_device_id udma_of_match[] = { + { + .compatible = "ti,am654-navss-main-udmap", + .data = &am654_main_data, + }, + { + .compatible = "ti,am654-navss-mcu-udmap", + .data = &am654_mcu_data, + }, { + .compatible = "ti,j721e-navss-main-udmap", + .data = &j721e_main_data, + }, { + .compatible = "ti,j721e-navss-mcu-udmap", + .data = &j721e_mcu_data, + }, + { /* Sentinel */ }, +}; + +static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) +{ + struct resource *res; + int i; + + for (i = 0; i < MMR_LAST; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mmr_names[i]); + ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ud->mmrs[i])) + return PTR_ERR(ud->mmrs[i]); + } + + return 0; +} + +static int udma_setup_resources(struct udma_dev *ud) +{ + struct device *dev = ud->dev; + int ch_count, ret, i, j; + u32 cap2, cap3; + struct ti_sci_resource_desc *rm_desc; + struct ti_sci_resource *rm_res, irq_res; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + static const char * const range_names[] = { "ti,sci-rm-range-tchan", + "ti,sci-rm-range-rchan", + "ti,sci-rm-range-rflow" }; + + cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); + cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); + + ud->rflow_cnt = cap3 & 0x3fff; + ud->tchan_cnt = cap2 & 0x1ff; + ud->echan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + ch_count = ud->tchan_cnt + ud->rchan_cnt; + + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_gp_map_allocated = devm_kcalloc(dev, + BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + + if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || + !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || + !ud->rflows || !ud->rflow_in_use) + return -ENOMEM; + + /* + * RX flows with the same Ids as RX channels are reserved to be used + * as default flows if remote HW can't generate flow_ids. Those + * RX flows can be requested only explicitly by id. + */ + bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); + + /* by default no GP rflows are assigned to Linux */ + bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + irq_res.sets = rm_res->sets; + + /* rchan and matching default flow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + irq_res.sets += rm_res->sets; + irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + for (i = 0; i < rm_res->sets; i++) { + irq_res.desc[i].start = rm_res->desc[i].start; + irq_res.desc[i].num = rm_res->desc[i].num; + } + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + for (j = 0; j < rm_res->sets; j++, i++) { + irq_res.desc[i].start = rm_res->desc[j].start + + ud->match_data->rchan_oes_offset; + irq_res.desc[i].num = rm_res->desc[j].num; + } + ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); + kfree(irq_res.desc); + if (ret) { + dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); + return ret; + } + + /* GP rflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; + if (IS_ERR(rm_res)) { + /* all gp flows are assigned exclusively to Linux */ + bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, + ud->rflow_cnt - ud->rchan_cnt); + } else { + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rflow_gp_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); + ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); + if (!ch_count) + return -ENODEV; + + ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), + GFP_KERNEL); + if (!ud->channels) + return -ENOMEM; + + dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt), + ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, + ud->rflow_cnt)); + + return ch_count; +} + +#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +static int udma_probe(struct platform_device *pdev) +{ + struct device_node *navss_node = pdev->dev.parent->of_node; + struct device *dev = &pdev->dev; + struct udma_dev *ud; + const struct of_device_id *match; + int i, ret; + int ch_count; + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) + dev_err(dev, "failed to set dma mask stuff\n"); + + ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); + if (!ud) + return -ENOMEM; + + ret = udma_get_mmrs(pdev, ud); + if (ret) + return ret; + + ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); + if (IS_ERR(ud->tisci_rm.tisci)) + return PTR_ERR(ud->tisci_rm.tisci); + + ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); + return ret; + } + pdev->id = ud->tisci_rm.tisci_dev_id; + + ret = of_property_read_u32(navss_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_navss_dev_id); + if (ret) { + dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); + return ret; + } + + ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; + ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; + + ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); + if (IS_ERR(ud->ringacc)) + return PTR_ERR(ud->ringacc); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + match = of_match_node(udma_of_match, dev->of_node); + if (!match) { + dev_err(dev, "No compatible match found\n"); + return -ENODEV; + } + ud->match_data = match->data; + + dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); + + ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources; + ud->ddev.device_config = udma_slave_config; + ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; + ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; + ud->ddev.device_issue_pending = udma_issue_pending; + ud->ddev.device_tx_status = udma_tx_status; + ud->ddev.device_pause = udma_pause; + ud->ddev.device_resume = udma_resume; + ud->ddev.device_terminate_all = udma_terminate_all; + ud->ddev.device_synchronize = udma_synchronize; + + ud->ddev.device_free_chan_resources = udma_free_chan_resources; + ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES; + ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | + DESC_METADATA_ENGINE; + if (ud->match_data->enable_memcpy_support) { + dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); + ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; + ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); + } + + ud->ddev.dev = dev; + ud->dev = dev; + ud->psil_base = ud->match_data->psil_base; + + INIT_LIST_HEAD(&ud->ddev.channels); + INIT_LIST_HEAD(&ud->desc_to_purge); + + ch_count = udma_setup_resources(ud); + if (ch_count <= 0) + return ch_count; + + spin_lock_init(&ud->lock); + INIT_WORK(&ud->purge_work, udma_purge_desc_work); + + ud->desc_align = 64; + if (ud->desc_align < dma_get_cache_alignment()) + ud->desc_align = dma_get_cache_alignment(); + + for (i = 0; i < ud->tchan_cnt; i++) { + struct udma_tchan *tchan = &ud->tchans[i]; + + tchan->id = i; + tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rchan_cnt; i++) { + struct udma_rchan *rchan = &ud->rchans[i]; + + rchan->id = i; + rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rflow_cnt; i++) { + struct udma_rflow *rflow = &ud->rflows[i]; + + rflow->id = i; + } + + for (i = 0; i < ch_count; i++) { + struct udma_chan *uc = &ud->channels[i]; + + uc->ud = ud; + uc->vc.desc_free = udma_desc_free; + uc->id = i; + uc->tchan = NULL; + uc->rchan = NULL; + uc->config.remote_thread_id = -1; + uc->config.dir = DMA_MEM_TO_MEM; + uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", + dev_name(dev), i); + + vchan_init(&uc->vc, &ud->ddev); + /* Use custom vchan completion handling */ + tasklet_init(&uc->vc.task, udma_vchan_complete, + (unsigned long)&uc->vc); + init_completion(&uc->teardown_completed); + } + + ret = dma_async_device_register(&ud->ddev); + if (ret) { + dev_err(dev, "failed to register slave DMA engine: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, ud); + + ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); + if (ret) { + dev_err(dev, "failed to register of_dma controller\n"); + dma_async_device_unregister(&ud->ddev); + } + + return ret; +} + +static struct platform_driver udma_driver = { + .driver = { + .name = "ti-udma", + .of_match_table = udma_of_match, + .suppress_bind_attrs = true, + }, + .probe = udma_probe, +}; +builtin_platform_driver(udma_driver); + +/* Private interfaces to UDMA */ +#include "k3-udma-private.c" diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h new file mode 100644 index 000000000000..128d8744a435 --- /dev/null +++ b/drivers/dma/ti/k3-udma.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_UDMA_H_ +#define K3_UDMA_H_ + +#include <linux/soc/ti/ti_sci_protocol.h> + +/* Global registers */ +#define UDMA_REV_REG 0x0 +#define UDMA_PERF_CTL_REG 0x4 +#define UDMA_EMU_CTL_REG 0x8 +#define UDMA_PSIL_TO_REG 0x10 +#define UDMA_UTC_CTL_REG 0x1c +#define UDMA_CAP_REG(i) (0x20 + ((i) * 4)) +#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80 +#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88 + +/* TX chan RT regs */ +#define UDMA_TCHAN_RT_CTL_REG 0x0 +#define UDMA_TCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_TCHAN_RT_STDATA_REG 0x80 + +#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_TCHAN_RT_PEER_BCNT_REG \ + UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_TCHAN_RT_PEER_RT_EN_REG \ + UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_TCHAN_RT_PCNT_REG 0x400 +#define UDMA_TCHAN_RT_BCNT_REG 0x408 +#define UDMA_TCHAN_RT_SBCNT_REG 0x410 + +/* RX chan RT regs */ +#define UDMA_RCHAN_RT_CTL_REG 0x0 +#define UDMA_RCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_RCHAN_RT_STDATA_REG 0x80 + +#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_RCHAN_RT_PEER_BCNT_REG \ + UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_RCHAN_RT_PEER_RT_EN_REG \ + UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_RCHAN_RT_PCNT_REG 0x400 +#define UDMA_RCHAN_RT_BCNT_REG 0x408 +#define UDMA_RCHAN_RT_SBCNT_REG 0x410 + +/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */ +#define UDMA_CHAN_RT_CTL_EN BIT(31) +#define UDMA_CHAN_RT_CTL_TDOWN BIT(30) +#define UDMA_CHAN_RT_CTL_PAUSE BIT(29) +#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28) +#define UDMA_CHAN_RT_CTL_ERROR BIT(0) + +/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */ +#define UDMA_PEER_RT_EN_ENABLE BIT(31) +#define UDMA_PEER_RT_EN_TEARDOWN BIT(30) +#define UDMA_PEER_RT_EN_PAUSE BIT(29) +#define UDMA_PEER_RT_EN_FLUSH BIT(28) +#define UDMA_PEER_RT_EN_IDLE BIT(1) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG + */ +#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24) +#define PDMA_STATIC_TR_X_SHIFT (24) +#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Y_SHIFT (0) + +#define PDMA_STATIC_TR_Y(x) \ + (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK) +#define PDMA_STATIC_TR_X(x) \ + (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK) + +#define PDMA_STATIC_TR_XY_ACC32 BIT(30) +#define PDMA_STATIC_TR_XY_BURST BIT(31) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG + */ +#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask)) + +struct udma_dev; +struct udma_tchan; +struct udma_rchan; +struct udma_rflow; + +enum udma_rm_range { + RM_RANGE_TCHAN = 0, + RM_RANGE_RCHAN, + RM_RANGE_RFLOW, + RM_RANGE_LAST, +}; + +struct udma_tisci_rm { + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_udmap_ops *tisci_udmap_ops; + u32 tisci_dev_id; + + /* tisci information for PSI-L thread pairing/unpairing */ + const struct ti_sci_rm_psil_ops *tisci_psil_ops; + u32 tisci_navss_dev_id; + + struct ti_sci_resource *rm_ranges[RM_RANGE_LAST]; +}; + +/* Direct access to UDMA low lever resources for the glue layer */ +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread); +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property); +void xudma_dev_put(struct udma_dev *ud); +u32 xudma_dev_get_psil_base(struct udma_dev *ud); +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt); +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt); + +struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id); +struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id); +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id); + +void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p); +void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p); +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p); + +int xudma_tchan_get_id(struct udma_tchan *p); +int xudma_rchan_get_id(struct udma_rchan *p); +int xudma_rflow_get_id(struct udma_rflow *p); + +u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg); +void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val); +u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg); +void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val); +bool xudma_rflow_is_gp(struct udma_dev *ud, int id); + +#endif /* K3_UDMA_H_ */ diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 256fc662c500..23e33a85f033 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -114,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) struct virt_dma_desc *vd, *_vd; list_for_each_entry_safe(vd, _vd, head, node) { - if (dmaengine_desc_test_reuse(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + vchan_vdesc_fini(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -134,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); + INIT_LIST_HEAD(&vc->desc_terminated); tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index ab158bac03a7..e9f5250fbe4d 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -31,9 +31,9 @@ struct virt_dma_chan { struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; + struct list_head desc_terminated; struct virt_dma_desc *cyclic; - struct virt_dma_desc *vd_terminated; }; static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) @@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - if (dmaengine_desc_test_reuse(&vd->tx)) + if (dmaengine_desc_test_reuse(&vd->tx)) { + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); list_add(&vd->node, &vc->desc_allocated); - else + spin_unlock_irqrestore(&vc->lock, flags); + } else { vc->desc_free(vd); + } } /** @@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - /* free up stuck descriptor */ - if (vc->vd_terminated) - vchan_vdesc_fini(vc->vd_terminated); + list_add_tail(&vd->node, &vc->desc_terminated); - vc->vd_terminated = vd; if (vc->cyclic == vd) vc->cyclic = NULL; } @@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); + list_splice_tail_init(&vc->desc_terminated, head); } static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) @@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) */ static inline void vchan_synchronize(struct virt_dma_chan *vc) { + LIST_HEAD(head); unsigned long flags; tasklet_kill(&vc->task); spin_lock_irqsave(&vc->lock, flags); - if (vc->vd_terminated) { - vchan_vdesc_fini(vc->vd_terminated); - vc->vd_terminated = NULL; - } + + list_splice_tail_init(&vc->desc_terminated, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); } #endif diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9c845c07b107..d47749a35863 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -123,10 +123,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F @@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } @@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan); - chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); return 0; } @@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 5c8272329a65..b3c99bb5fe77 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -491,8 +491,7 @@ config EDAC_TI tristate "Texas Instruments DDR3 ECC Controller" depends on ARCH_KEYSTONE || SOC_DRA7XX help - Support for error detection and correction on the - TI SoCs. + Support for error detection and correction on the TI SoCs. config EDAC_QCOM tristate "QCOM EDAC Controller" diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 428ce98f6776..9fbad908a854 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate) scrubval = scrubrates[i].scrubval; - if (pvt->fam == 0x17 || pvt->fam == 0x18) { + if (pvt->umc) { __f17h_set_scrubval(pvt, scrubval); } else if (pvt->fam == 0x15 && pvt->model == 0x60) { f15h_select_dct(pvt, 0); @@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci) int i, retval = -EINVAL; u32 scrubval = 0; - switch (pvt->fam) { - case 0x15: - /* Erratum #505 */ - if (pvt->model < 0x10) - f15h_select_dct(pvt, 0); - - if (pvt->model == 0x60) - amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); - break; - - case 0x17: - case 0x18: + if (pvt->umc) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); if (scrubval & BIT(0)) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); @@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci) } else { scrubval = 0; } - break; + } else if (pvt->fam == 0x15) { + /* Erratum #505 */ + if (pvt->model < 0x10) + f15h_select_dct(pvt, 0); - default: + if (pvt->model == 0x60) + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); - break; } scrubval = scrubval & 0x001F; @@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; + if (pvt->umc) { + if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) + pvt->dram_type = MEM_LRDDR4; + else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) + pvt->dram_type = MEM_RDDR4; + else + pvt->dram_type = MEM_DDR4; + return; + } + switch (pvt->fam) { case 0xf: if (pvt->ext_model >= K8_REV_F) @@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt) case 0x16: goto ddr3; - case 0x17: - case 0x18: - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - default: WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); pvt->dram_type = MEM_EMPTY; @@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F19_CPUS] = { + .ctl_name = "F19h", + .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6, + .max_mcs = 8, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, }; /* @@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) family_types[F17_CPUS].ctl_name = "F18h"; break; + case 0x19: + fam_type = &family_types[F19_CPUS]; + pvt->ops = &family_types[F19_CPUS].ops; + family_types[F19_CPUS].ctl_name = "F19h"; + break; + default: amd64_err("Unsupported family!\n"); return NULL; @@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid) struct mem_ctl_info *mci; struct amd64_pvt *pvt; - mci = find_mci_by_dev(&F3->dev); - WARN_ON(!mci); - /* Remove from EDAC CORE tracking list */ mci = edac_mc_del_mc(&F3->dev); if (!mci) @@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = { { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { } }; MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 9be31688110b..abbf3c274d74 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -122,6 +122,8 @@ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 +#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 +#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 /* * Function 1 - Address Map @@ -292,6 +294,7 @@ enum amd_families { F17_M10H_CPUS, F17_M30H_CPUS, F17_M70H_CPUS, + F19_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c index 09a9e3de9595..b194658b8b5c 100644 --- a/drivers/edac/aspeed_edac.c +++ b/drivers/edac/aspeed_edac.c @@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (!np) { dev_err(mci->pdev, "dt: missing /memory node\n"); return -ENODEV; - }; + } rc = of_address_to_resource(np, 0, &r); @@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (rc) { dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n"); return rc; - }; + } dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n", r.start, resource_size(&r), PAGE_SHIFT); diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 0ddc41e47a96..191aa7c19ded 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a) return a & ((1 << 16) - 1); } -static inline u32 i5100_redmemb_ecc_locator(u32 a) -{ - return a & ((1 << 18) - 1); -} - static inline u32 i5100_recmema_merr(u32 a) { return i5100_nrecmema_merr(a); @@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, u32 dw; u32 dw2; unsigned syndrome = 0; - unsigned ecc_loc = 0; unsigned merr; unsigned bank; unsigned rank; @@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); syndrome = dw2; pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); - ecc_loc = i5100_redmemb_ecc_locator(dw2); } if (i5100_validlog_recmemvalid(dw)) { diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ea622c6f3a39..ea980c556f2e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -6,7 +6,7 @@ #include "mce_amd.h" -static struct amd_decoder_ops *fam_ops; +static struct amd_decoder_ops fam_ops; static u8 xec_mask = 0xf; @@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = { "L2 Fill Data error", }; +static const char * const smca_ls2_mce_desc[] = { + "An ECC error was detected on a data cache read by a probe or victimization", + "An ECC error or L2 poison was detected on a data cache read by a load", + "An ECC error was detected on a data cache read-modify-write by a store", + "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization", + "An ECC error or poison bit mismatch was detected on a tag read by a load", + "An ECC error or poison bit mismatch was detected on a tag read by a store", + "An ECC error was detected on an EMEM read by a load", + "An ECC error was detected on an EMEM read-modify-write by a store", + "A parity error was detected in an L1 TLB entry by any access", + "A parity error was detected in an L2 TLB entry by any access", + "A parity error was detected in a PWC entry by any access", + "A parity error was detected in an STQ entry by any access", + "A parity error was detected in an LDQ entry by any access", + "A parity error was detected in a MAB entry by any access", + "A parity error was detected in an SCB entry state field by any access", + "A parity error was detected in an SCB entry address field by any access", + "A parity error was detected in an SCB entry data field by any access", + "A parity error was detected in a WCB entry by any access", + "A poisoned line was detected in an SCB entry by any access", + "A SystemReadDataError error was reported on read data returned from L2 for a load", + "A SystemReadDataError error was reported on read data returned from L2 for an SCB store", + "A SystemReadDataError error was reported on read data returned from L2 for a WCB store", + "A hardware assertion error was reported", + "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access", +}; + static const char * const smca_if_mce_desc[] = { "Op Cache Microtag Probe Port Parity Error", "IC Microtag or Full Tag Multi-hit Error", @@ -378,6 +405,7 @@ struct smca_mce_desc { static struct smca_mce_desc smca_mce_descs[] = { [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) }, + [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) }, [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) }, [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) }, [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) }, @@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m) : (xec ? "multimatch" : "parity"))); return; } - } else if (fam_ops->mc0_mce(ec, xec)) + } else if (fam_ops.mc0_mce(ec, xec)) ; else pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n"); @@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m) pr_cont("Hardware Assert.\n"); else goto wrong_mc1_mce; - } else if (fam_ops->mc1_mce(ec, xec)) + } else if (fam_ops.mc1_mce(ec, xec)) ; else goto wrong_mc1_mce; @@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m) pr_emerg(HW_ERR "MC2 Error: "); - if (!fam_ops->mc2_mce(ec, xec)) + if (!fam_ops.mc2_mce(ec, xec)) pr_cont(HW_ERR "Corrupted MC2 MCE info?\n"); } @@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) if (m->tsc) pr_emerg(HW_ERR "TSC: %llu\n", m->tsc); - if (!fam_ops) + /* Doesn't matter which member to test. */ + if (!fam_ops.mc0_mce) goto err_code; switch (m->bank) { @@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void) c->x86_vendor != X86_VENDOR_HYGON) return -ENODEV; - fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); - if (!fam_ops) - return -ENOMEM; + if (boot_cpu_has(X86_FEATURE_SMCA)) { + xec_mask = 0x3f; + goto out; + } switch (c->x86) { case 0xf: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x10: - fam_ops->mc0_mce = f10h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f10h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x11: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x12: - fam_ops->mc0_mce = f12h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f12h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x14: - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x15: xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f; - fam_ops->mc0_mce = f15h_mc0_mce; - fam_ops->mc1_mce = f15h_mc1_mce; - fam_ops->mc2_mce = f15h_mc2_mce; + fam_ops.mc0_mce = f15h_mc0_mce; + fam_ops.mc1_mce = f15h_mc1_mce; + fam_ops.mc2_mce = f15h_mc2_mce; break; case 0x16: xec_mask = 0x1f; - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = f16h_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = f16h_mc2_mce; break; case 0x17: case 0x18: - xec_mask = 0x3f; - if (!boot_cpu_has(X86_FEATURE_SMCA)) { - printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n"); - goto err_out; - } - break; + pr_warn("Decoding supported only on Scalable MCA processors.\n"); + return -EINVAL; default: printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86); - goto err_out; + return -EINVAL; } +out: pr_info("MCE: In-kernel MCE decoding enabled.\n"); mce_register_decode_chain(&amd_mce_dec_nb); return 0; - -err_out: - kfree(fam_ops); - fam_ops = NULL; - return -EINVAL; } early_initcall(mce_amd_init); @@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init); static void __exit mce_amd_exit(void) { mce_unregister_decode_chain(&amd_mce_dec_nb); - kfree(fam_ops); } MODULE_DESCRIPTION("AMD MCE decoder"); diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index 413cdb4a591d..3a3dcb14ed99 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -10,7 +10,7 @@ #include <linux/edac.h> #include <linux/platform_device.h> #include "edac_module.h" -#include <asm/sifive_l2_cache.h> +#include <soc/sifive/sifive_l2_cache.h> #define DRVNAME "sifive_edac" @@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev) p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc", 1, 1, NULL, 0, edac_device_alloc_index()); - if (IS_ERR(p->dci)) - return PTR_ERR(p->dci); + if (!p->dci) + return -ENOMEM; p->dci->dev = &pdev->dev; p->dci->mod_name = "Sifive ECC Manager"; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 95662a4ff4c4..99bbaf629b8d 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL); if (!pdev) { - skx_printk(KERN_ERR, "Can't get tolm/tohm\n"); + edac_dbg(2, "Can't get tolm/tohm\n"); return -ENODEV; } diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index 5b7ef89eb701..ed10da5313e8 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev) fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); if (IS_ERR(fw_shm_pool)) { - tee_client_close_context(pvt_data.ctx); dev_err(pvt_data.dev, "tee_shm_alloc failed\n"); err = PTR_ERR(fw_shm_pool); goto out_sess; diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c index d4077db6dc97..5d4f84781aa0 100644 --- a/drivers/firmware/efi/earlycon.c +++ b/drivers/firmware/efi/earlycon.c @@ -17,7 +17,7 @@ static const struct console *earlycon_console __initdata; static const struct font_desc *font; static u32 efi_x, efi_y; static u64 fb_base; -static pgprot_t fb_prot; +static bool fb_wb; static void *efi_fb; /* @@ -33,10 +33,8 @@ static int __init efi_earlycon_remap_fb(void) if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED)) return 0; - if (pgprot_val(fb_prot) == pgprot_val(PAGE_KERNEL)) - efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WB); - else - efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WC); + efi_fb = memremap(fb_base, screen_info.lfb_size, + fb_wb ? MEMREMAP_WB : MEMREMAP_WC); return efi_fb ? 0 : -ENOMEM; } @@ -53,9 +51,12 @@ late_initcall(efi_earlycon_unmap_fb); static __ref void *efi_earlycon_map(unsigned long start, unsigned long len) { + pgprot_t fb_prot; + if (efi_fb) return efi_fb + start; + fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL); return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot)); } @@ -215,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) fb_base |= (u64)screen_info.ext_lfb_base << 32; - if (opt && !strcmp(opt, "ram")) - fb_prot = PAGE_KERNEL; - else - fb_prot = pgprot_writecombine(PAGE_KERNEL); + fb_wb = opt && !strcmp(opt, "ram"); si = &screen_info; xres = si->lfb_width; diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 35edd7cfb6a1..97378cf96a2e 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -33,7 +33,7 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_status_t status; - struct efi_rng_protocol *rng; + struct efi_rng_protocol *rng = NULL; status = efi_call_early(locate_protocol, &rng_proto, NULL, (void **)&rng); @@ -162,8 +162,8 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; - struct efi_rng_protocol *rng; - struct linux_efi_random_seed *seed; + struct efi_rng_protocol *rng = NULL; + struct linux_efi_random_seed *seed = NULL; efi_status_t status; status = efi_call_early(locate_protocol, &rng_proto, NULL, diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 6ab25fe1c423..4b6d2ef15c39 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -573,7 +573,6 @@ config GPIO_THUNDERX tristate "Cavium ThunderX/OCTEON-TX GPIO" depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) depends on PCI_MSI - select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS help @@ -1148,6 +1147,7 @@ config GPIO_MADERA config GPIO_MAX77620 tristate "GPIO support for PMIC MAX77620 and MAX20024" depends on MFD_MAX77620 + select GPIOLIB_IRQCHIP help GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor. MAX77620 PMIC has 8 pins that can be configured as GPIOs. The diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index c4fdc192ea4e..94b8d3ae27bc 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, mutex_lock(&chip->lock); if (test_bit(FLAG_REQUESTED, &desc->flags) && - !test_bit(FLAG_IS_OUT, &desc->flags)) { + !test_bit(FLAG_IS_OUT, &desc->flags)) { curr = __gpio_mockup_get(chip, offset); if (curr == value) goto out; @@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, irq_type = irq_get_trigger_type(irq); if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) || - (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) + (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) irq_sim_fire(sim, offset); } diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index d08d86a22b1f..462770479045 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -53,6 +53,7 @@ struct thunderx_line { struct thunderx_gpio { struct gpio_chip chip; u8 __iomem *register_base; + struct irq_domain *irqd; struct msix_entry *msix_entries; /* per line MSI-X */ struct thunderx_line *line_entries; /* per line irq info */ raw_spinlock_t lock; @@ -285,60 +286,54 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip, } } -static void thunderx_gpio_irq_ack(struct irq_data *d) +static void thunderx_gpio_irq_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask(struct irq_data *d) +static void thunderx_gpio_irq_mask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask_ack(struct irq_data *d) +static void thunderx_gpio_irq_mask_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_unmask(struct irq_data *d) +static void thunderx_gpio_irq_unmask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1S, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static int thunderx_gpio_irq_set_type(struct irq_data *d, +static int thunderx_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - struct thunderx_line *txline = - &txgpio->line_entries[irqd_to_hwirq(d)]; + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; u64 bit_cfg; - irqd_set_trigger_type(d, flow_type); + irqd_set_trigger_type(data, flow_type); bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN; if (flow_type & IRQ_TYPE_EDGE_BOTH) { - irq_set_handler_locked(d, handle_fasteoi_ack_irq); + irq_set_handler_locked(data, handle_fasteoi_ack_irq); bit_cfg |= GPIO_BIT_CFG_INT_TYPE; } else { - irq_set_handler_locked(d, handle_fasteoi_mask_irq); + irq_set_handler_locked(data, handle_fasteoi_mask_irq); } raw_spin_lock(&txgpio->lock); @@ -367,6 +362,33 @@ static void thunderx_gpio_irq_disable(struct irq_data *data) irq_chip_disable_parent(data); } +static int thunderx_gpio_irq_request_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + int r; + + r = gpiochip_lock_as_irq(&txgpio->chip, txline->line); + if (r) + return r; + + r = irq_chip_request_resources_parent(data); + if (r) + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); + + return r; +} + +static void thunderx_gpio_irq_release_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + + irq_chip_release_resources_parent(data); + + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); +} + /* * Interrupts are chained from underlying MSI-X vectors. We have * these irq_chip functions to be able to handle level triggering @@ -383,24 +405,50 @@ static struct irq_chip thunderx_gpio_irq_chip = { .irq_unmask = thunderx_gpio_irq_unmask, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_request_resources = thunderx_gpio_irq_request_resources, + .irq_release_resources = thunderx_gpio_irq_release_resources, .irq_set_type = thunderx_gpio_irq_set_type, .flags = IRQCHIP_SET_TYPE_MASKED }; -static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, - unsigned int child, - unsigned int child_type, - unsigned int *parent, - unsigned int *parent_type) +static int thunderx_gpio_irq_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) { - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - - *parent = txgpio->base_msi + (2 * child); - *parent_type = IRQ_TYPE_LEVEL_HIGH; + struct thunderx_gpio *txgpio = d->host_data; + + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (fwspec->param[0] >= txgpio->chip.ngpio) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } +static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct thunderx_line *txline = arg; + + return irq_domain_set_hwirq_and_chip(d, virq, txline->line, + &thunderx_gpio_irq_chip, txline); +} + +static const struct irq_domain_ops thunderx_gpio_irqd_ops = { + .alloc = thunderx_gpio_irq_alloc, + .translate = thunderx_gpio_irq_translate +}; + +static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) +{ + struct thunderx_gpio *txgpio = gpiochip_get_data(chip); + + return irq_find_mapping(txgpio->irqd, offset); +} + static int thunderx_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -408,7 +456,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, struct device *dev = &pdev->dev; struct thunderx_gpio *txgpio; struct gpio_chip *chip; - struct gpio_irq_chip *girq; int ngpio, i; int err = 0; @@ -453,8 +500,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, } txgpio->msix_entries = devm_kcalloc(dev, - ngpio, sizeof(struct msix_entry), - GFP_KERNEL); + ngpio, sizeof(struct msix_entry), + GFP_KERNEL); if (!txgpio->msix_entries) { err = -ENOMEM; goto out; @@ -495,6 +542,27 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, if (err < 0) goto out; + /* + * Push GPIO specific irqdomain on hierarchy created as a side + * effect of the pci_enable_msix() + */ + txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain, + 0, 0, of_node_to_fwnode(dev->of_node), + &thunderx_gpio_irqd_ops, txgpio); + if (!txgpio->irqd) { + err = -ENOMEM; + goto out; + } + + /* Push on irq_data and the domain for each line. */ + for (i = 0; i < ngpio; i++) { + err = irq_domain_push_irq(txgpio->irqd, + txgpio->msix_entries[i].vector, + &txgpio->line_entries[i]); + if (err < 0) + dev_err(dev, "irq_domain_push_irq: %d\n", err); + } + chip->label = KBUILD_MODNAME; chip->parent = dev; chip->owner = THIS_MODULE; @@ -509,28 +577,11 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, chip->set = thunderx_gpio_set; chip->set_multiple = thunderx_gpio_set_multiple; chip->set_config = thunderx_gpio_set_config; - girq = &chip->irq; - girq->chip = &thunderx_gpio_irq_chip; - girq->fwnode = of_node_to_fwnode(dev->of_node); - girq->parent_domain = - irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; - girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; - girq->handler = handle_bad_irq; - girq->default_type = IRQ_TYPE_NONE; - + chip->to_irq = thunderx_gpio_to_irq; err = devm_gpiochip_add_data(dev, chip, txgpio); if (err) goto out; - /* Push on irq_data and the domain for each line. */ - for (i = 0; i < ngpio; i++) { - err = irq_domain_push_irq(chip->irq.domain, - txgpio->msix_entries[i].vector, - chip); - if (err < 0) - dev_err(dev, "irq_domain_push_irq: %d\n", err); - } - dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n", ngpio, chip->base); return 0; @@ -545,10 +596,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev) struct thunderx_gpio *txgpio = pci_get_drvdata(pdev); for (i = 0; i < txgpio->chip.ngpio; i++) - irq_domain_pop_irq(txgpio->chip.irq.domain, + irq_domain_pop_irq(txgpio->irqd, txgpio->msix_entries[i].vector); - irq_domain_remove(txgpio->chip.irq.domain); + irq_domain_remove(txgpio->irqd); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 4c3f6370eab4..05ba16fffdad 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) unsigned int bank_num; for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { + writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); writel_relaxed(gpio->context.datalsw[bank_num], gpio->base_addr + ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); @@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.dirm[bank_num], gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); - writel_relaxed(gpio->context.int_en[bank_num], - gpio->base_addr + - ZYNQ_GPIO_INTEN_OFFSET(bank_num)); writel_relaxed(gpio->context.int_type[bank_num], gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); @@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.int_any[bank_num], gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); + writel_relaxed(~(gpio->context.int_en[bank_num]), + gpio->base_addr + + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); } } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d30e57dc755c..31fee5e918b7 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -21,11 +21,19 @@ #include "gpiolib.h" #include "gpiolib-acpi.h" +#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l +#define QUIRK_NO_WAKEUP 0x02l + static int run_edge_events_on_boot = -1; module_param(run_edge_events_on_boot, int, 0444); MODULE_PARM_DESC(run_edge_events_on_boot, "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); +static int honor_wakeup = -1; +module_param(honor_wakeup, int, 0444); +MODULE_PARM_DESC(honor_wakeup, + "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto"); + /** * struct acpi_gpio_event - ACPI GPIO event handler data * @@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, event->handle = evt_handle; event->handler = handler; event->irq = irq; - event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE; + event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE; event->pin = pin; event->desc = desc; @@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void) /* We must use _sync so that this runs after the first deferred_probe run */ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); -static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { +static const struct dmi_system_id gpiolib_acpi_quirks[] = { { /* * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for @@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, }, { /* @@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + }, + { + /* + * Various HP X2 10 Cherry Trail models use an external + * embedded-controller connected via I2C + an ACPI GPIO + * event handler. The embedded controller generates various + * spurious wakeup events when suspended. So disable wakeup + * for its handler (it uses the only ACPI GPIO event handler). + * This breaks wakeup when opening the lid, the user needs + * to press the power-button to wakeup the system. The + * alternative is suspend simply not working, which is worse. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), + }, + .driver_data = (void *)QUIRK_NO_WAKEUP, }, {} /* Terminating entry */ }; static int acpi_gpio_setup_params(void) { + const struct dmi_system_id *id; + long quirks = 0; + + id = dmi_first_match(gpiolib_acpi_quirks); + if (id) + quirks = (long)id->driver_data; + if (run_edge_events_on_boot < 0) { - if (dmi_check_system(run_edge_events_on_boot_blacklist)) + if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT) run_edge_events_on_boot = 0; else run_edge_events_on_boot = 1; } + if (honor_wakeup < 0) { + if (quirks & QUIRK_NO_WAKEUP) + honor_wakeup = 0; + else + honor_wakeup = 1; + } + return 0; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index b696e4598a24..1b3f217a35e2 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -132,27 +132,6 @@ static void of_gpio_flags_quirks(struct device_node *np, int index) { /* - * Handle MMC "cd-inverted" and "wp-inverted" semantics. - */ - if (IS_ENABLED(CONFIG_MMC)) { - /* - * Active low is the default according to the - * SDHCI specification and the device tree - * bindings. However the code in the current - * kernel was written such that the phandle - * flags were always respected, and "cd-inverted" - * would invert the flag from the device phandle. - */ - if (!strcmp(propname, "cd-gpios")) { - if (of_property_read_bool(np, "cd-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - if (!strcmp(propname, "wp-gpios")) { - if (of_property_read_bool(np, "wp-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - } - /* * Some GPIO fixed regulator quirks. * Note that active low is the default. */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 78a16e42f222..bcfbfded9ba3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3371,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc) } EXPORT_SYMBOL_GPL(gpiod_is_active_low); +/** + * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not + * @desc: the gpio descriptor to change + */ +void gpiod_toggle_active_low(struct gpio_desc *desc) +{ + VALIDATE_DESC_VOID(desc); + change_bit(FLAG_ACTIVE_LOW, &desc->flags); +} +EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); + /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0ffc9447b573..30a1e3ac21d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1; int amdgpu_mcbp = 0; int amdgpu_discovery = -1; int amdgpu_mes = 0; -int amdgpu_noretry = 1; +int amdgpu_noretry; int amdgpu_force_asic_type = -1; struct amdgpu_mgpu_info mgpu_info = { @@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes, module_param_named(mes, amdgpu_mes, int, 0444); MODULE_PARM_DESC(noretry, - "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); + "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)"); module_param_named(noretry, amdgpu_noretry, int, 0644); /** @@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ - {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, @@ -1359,7 +1359,8 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_ATOMIC | DRIVER_GEM | - DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, + DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | + DRIVER_SYNCOBJ_TIMELINE, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4ef4d31f5231..2f52b7f4d25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -254,7 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe) }; static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7aac9568d3be..803e59d97411 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3356,27 +3356,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) return color_space; } -static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) -{ - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; - - timing_out->display_color_depth--; -} - -static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, - const struct drm_display_info *info) +static bool adjust_colour_depth_from_display_info( + struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) { + enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) normalized_clk /= 2; /* Adjusting pix clock following on HDMI spec based on colour depth */ - switch (timing_out->display_color_depth) { + switch (depth) { + case COLOR_DEPTH_888: + break; case COLOR_DEPTH_101010: normalized_clk = (normalized_clk * 30) / 24; break; @@ -3387,14 +3381,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ normalized_clk = (normalized_clk * 48) / 24; break; default: - return; + /* The above depths are the only ones valid for HDMI. */ + return false; } - if (normalized_clk <= info->max_tmds_clock) - return; - reduce_mode_colour_depth(timing_out); - - } while (timing_out->display_color_depth > COLOR_DEPTH_888); - + if (normalized_clk <= info->max_tmds_clock) { + timing_out->display_color_depth = depth; + return true; + } + } while (--depth > COLOR_DEPTH_666); + return false; } static void fill_stream_properties_from_drm_display_mode( @@ -3474,8 +3469,14 @@ static void fill_stream_properties_from_drm_display_mode( stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) - adjust_colour_depth_from_display_info(timing_out, info); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + if (!adjust_colour_depth_from_display_info(timing_out, info) && + drm_mode_is_420_also(info, mode_in) && + timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + adjust_colour_depth_from_display_info(timing_out, info); + } + } } static void fill_audio_info(struct audio_info *audio_info, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 62d8289abb4e..4619f94f0ac7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -817,8 +817,8 @@ static bool dc_link_detect_helper(struct dc_link *link, } case SIGNAL_TYPE_EDP: { - read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); + read_current_link_settings_on_detect(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 273dd80fabf3..e6afe4faeca6 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -393,7 +393,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); idx += req->u.i2c_read.transactions[i].num_bytes; - buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); idx++; } @@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } + mgr->is_waiting_for_dwn_reply = false; + } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1199,6 +1201,7 @@ out: } mutex_unlock(&mgr->qlock); + drm_dp_mst_kick_tx(mgr); return ret; } @@ -1913,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) +static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) +{ + switch (pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + return true; + case DP_PEER_DEVICE_MST_BRANCHING: + /* For sst branch device */ + if (!mcs) + return true; + + return false; + } + return true; +} + +static int +drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, + bool new_mcs) { struct drm_dp_mst_topology_mgr *mgr = port->mgr; struct drm_dp_mst_branch *mstb; u8 rad[8], lct; int ret = 0; - if (port->pdt == new_pdt) + if (port->pdt == new_pdt && port->mcs == new_mcs) return 0; /* Teardown the old pdt, if there is one */ - switch (port->pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* - * If the new PDT would also have an i2c bus, don't bother - * with reregistering it - */ - if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - new_pdt == DP_PEER_DEVICE_SST_SINK) { - port->pdt = new_pdt; - return 0; - } + if (port->pdt != DP_PEER_DEVICE_NONE) { + if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + /* + * If the new PDT would also have an i2c bus, + * don't bother with reregistering it + */ + if (new_pdt != DP_PEER_DEVICE_NONE && + drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { + port->pdt = new_pdt; + port->mcs = new_mcs; + return 0; + } - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mutex_lock(&mgr->lock); - drm_dp_mst_topology_put_mstb(port->mstb); - port->mstb = NULL; - mutex_unlock(&mgr->lock); - break; + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + } else { + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + } } port->pdt = new_pdt; - switch (port->pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* add i2c over sideband */ - ret = drm_dp_mst_register_i2c_bus(&port->aux); - break; + port->mcs = new_mcs; - case DP_PEER_DEVICE_MST_BRANCHING: - lct = drm_dp_calculate_rad(port, rad); - mstb = drm_dp_add_mst_branch_device(lct, rad); - if (!mstb) { - ret = -ENOMEM; - DRM_ERROR("Failed to create MSTB for port %p", port); - goto out; - } + if (port->pdt != DP_PEER_DEVICE_NONE) { + if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + /* add i2c over sideband */ + ret = drm_dp_mst_register_i2c_bus(&port->aux); + } else { + lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", + port); + goto out; + } - mutex_lock(&mgr->lock); - port->mstb = mstb; - mstb->mgr = port->mgr; - mstb->port_parent = port; + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); - mutex_unlock(&mgr->lock); + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); - /* And make sure we send a link address for this */ - ret = 1; - break; + /* And make sure we send a link address for this */ + ret = 1; + } } out: @@ -2132,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, goto error; } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { + if (port->pdt != DP_PEER_DEVICE_NONE && + drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); drm_connector_set_tile_property(port->connector); @@ -2198,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port; int old_ddps = 0, ret; u8 new_pdt = DP_PEER_DEVICE_NONE; + bool new_mcs = 0; bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); @@ -2242,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, port->input = port_msg->input_port; if (!port->input) new_pdt = port_msg->peer_device_type; - port->mcs = port_msg->mcs; + new_mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; port->dpcd_rev = port_msg->dpcd_revision; @@ -2269,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, } } - ret = drm_dp_port_set_pdt(port, new_pdt); + ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); if (ret == 1) { send_link_addr = true; } else if (ret < 0) { @@ -2283,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, * we're coming out of suspend. In this case, always resend the link * address if there's an MSTB on this port */ - if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && + port->mcs) send_link_addr = true; if (port->connector) @@ -2318,8 +2339,9 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_ddps, ret; + int old_ddps, old_input, ret, i; u8 new_pdt; + bool new_mcs; bool dowork = false, create_connector = false; port = drm_dp_get_port(mstb, conn_stat->port_number); @@ -2349,8 +2371,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } old_ddps = port->ddps; + old_input = port->input; port->input = conn_stat->input_port; - port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -2363,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; - - ret = drm_dp_port_set_pdt(port, new_pdt); + new_mcs = conn_stat->message_capability_status; + ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); if (ret == 1) { dowork = true; } else if (ret < 0) { @@ -2373,6 +2395,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, dowork = false; } + if (!old_input && old_ddps != port->ddps && !port->ddps) { + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + struct drm_dp_mst_port *port_validated; + + if (!vcpi) + continue; + + port_validated = + container_of(vcpi, struct drm_dp_mst_port, vcpi); + port_validated = + drm_dp_mst_topology_get_port_validated(mgr, port_validated); + if (!port_validated) { + mutex_lock(&mgr->payload_lock); + vcpi->num_slots = 0; + mutex_unlock(&mgr->payload_lock); + } else { + drm_dp_mst_topology_put_port(port_validated); + } + } + } + if (port->connector) drm_modeset_unlock(&mgr->base.lock); else if (create_connector) @@ -2718,9 +2762,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ + mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2760,7 +2806,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq)) + if (list_is_singular(&mgr->tx_msg_downq) && + !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -3678,6 +3725,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; mstb->tx_slots[slot] = NULL; + mgr->is_waiting_for_dwn_reply = false; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); @@ -3687,6 +3735,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) no_msg: drm_dp_mst_topology_put_mstb(mstb); clear_down_rep_recv: + mutex_lock(&mgr->qlock); + mgr->is_waiting_for_dwn_reply = false; + mutex_unlock(&mgr->qlock); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -3896,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, switch (port->pdt) { case DP_PEER_DEVICE_NONE: case DP_PEER_DEVICE_MST_BRANCHING: + if (!port->mcs) + ret = connector_status_connected; break; case DP_PEER_DEVICE_SST_SINK: @@ -4497,7 +4550,7 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq)) + if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -4508,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) if (port->connector) port->mgr->cbs->destroy_connector(port->mgr, port->connector); - drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); drm_dp_mst_put_port_malloc(port); } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8ebeccdeed23..d8e8f3960f4d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1283,7 +1283,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. */ - if (var->bits_per_pixel != fb->format->cpp[0] * 8 || + if (var->bits_per_pixel > fb->format->cpp[0] * 8 || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb requested width/height/bpp can't fit in current fb " @@ -1309,6 +1309,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, } /* + * Likewise, bits_per_pixel should be rounded up to a supported value. + */ + var->bits_per_pixel = fb->format->cpp[0] * 8; + + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. */ diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 85e6b2bbb34f..3a5ac13d5801 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, true); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev, /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--dev_priv->audio_power_refcount == 0) - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index effc4250b230..301897791627 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4515,8 +4515,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg; - u32 trans_ddi_func_ctl2_val; if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) return; @@ -4524,10 +4522,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", transcoder_name(old_crtc_state->cpu_transcoder)); - reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); - trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | - PORT_SYNC_MODE_MASTER_SELECT_MASK); - I915_WRITE(reg, trans_ddi_func_ctl2_val); + I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0); } static void intel_fdi_normal_train(struct intel_crtc *crtc) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 3d4f5775a4ba..25235ef630c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -9,16 +9,16 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" -static __always_inline u32 __busy_read_flag(u8 id) +static __always_inline u32 __busy_read_flag(u16 id) { - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffff0000u; GEM_BUG_ON(id >= 16); return 0x10000u << id; } -static __always_inline u32 __busy_write_id(u8 id) +static __always_inline u32 __busy_write_id(u16 id) { /* * The uABI guarantees an active writer is also amongst the read @@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id) * last_read - hence we always set both read and write busy for * last_write. */ - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffffffffu; return (id + 1) | __busy_read_flag(id); } static __always_inline unsigned int -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) +__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; @@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) return 0; /* Beware type-expansion follies! */ - BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); + BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); return flag(rq->engine->uabi_class); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 4c72d74d6576..0dbb44d30885 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -402,7 +402,7 @@ struct get_pages_work { static struct sg_table * __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, - struct page **pvec, int num_pages) + struct page **pvec, unsigned long num_pages) { unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; @@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { struct get_pages_work *work = container_of(_work, typeof(*work), work); struct drm_i915_gem_object *obj = work->obj; - const int npages = obj->base.size >> PAGE_SHIFT; + const unsigned long npages = obj->base.size >> PAGE_SHIFT; + unsigned long pinned; struct page **pvec; - int pinned, ret; + int ret; ret = -ENOMEM; pinned = 0; @@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { - const int num_pages = obj->base.size >> PAGE_SHIFT; + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; struct mm_struct *mm = obj->userptr.mm->mm; struct page **pvec; struct sg_table *pages; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index ef7bc41ffffa..5b7ff3ccfa8e 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -123,6 +123,10 @@ static int __context_pin_state(struct i915_vma *vma) if (err) return err; + err = i915_active_acquire(&vma->active); + if (err) + goto err_unpin; + /* * And mark it as a globally pinned object to let the shrinker know * it cannot reclaim the object until we release it. @@ -131,14 +135,44 @@ static int __context_pin_state(struct i915_vma *vma) vma->obj->mm.dirty = true; return 0; + +err_unpin: + i915_vma_unpin(vma); + return err; } static void __context_unpin_state(struct i915_vma *vma) { i915_vma_make_shrinkable(vma); + i915_active_release(&vma->active); __i915_vma_unpin(vma); } +static int __ring_active(struct intel_ring *ring) +{ + int err; + + err = i915_active_acquire(&ring->vma->active); + if (err) + return err; + + err = intel_ring_pin(ring); + if (err) + goto err_active; + + return 0; + +err_active: + i915_active_release(&ring->vma->active); + return err; +} + +static void __ring_retire(struct intel_ring *ring) +{ + intel_ring_unpin(ring); + i915_active_release(&ring->vma->active); +} + __i915_active_call static void __intel_context_retire(struct i915_active *active) { @@ -151,7 +185,7 @@ static void __intel_context_retire(struct i915_active *active) __context_unpin_state(ce->state); intel_timeline_unpin(ce->timeline); - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); intel_context_put(ce); } @@ -163,7 +197,7 @@ static int __intel_context_active(struct i915_active *active) intel_context_get(ce); - err = intel_ring_pin(ce->ring); + err = __ring_active(ce->ring); if (err) goto err_put; @@ -183,7 +217,7 @@ static int __intel_context_active(struct i915_active *active) err_timeline: intel_timeline_unpin(ce->timeline); err_ring: - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); err_put: intel_context_put(ce); return err; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 17f1f1441efc..2b446474e010 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -274,8 +274,8 @@ struct intel_engine_cs { u8 class; u8 instance; - u8 uabi_class; - u8 uabi_instance; + u16 uabi_class; + u16 uabi_instance; u32 uabi_capabilities; u32 context_size; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 75dd0e0367b7..d925a1035c9d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2664,6 +2664,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE, + LRC_PPHWSP_SCRATCH_ADDR); + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); /* WaMediaPoolStateCmdInWABB:bxt,glk */ @@ -4416,9 +4424,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.gt = siblings[0]->gt; ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; + ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; /* * The decision on whether to submit a request using semaphores diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index a47d5a7c32c9..93026217c121 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -1413,14 +1413,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) int len; u32 *cs; - flags |= MI_MM_SPACE_GTT; - if (IS_HASWELL(i915)) - /* These flags are for resource streamer on HSW+ */ - flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; - else - /* We need to save the extended state for powersaving modes */ - flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; - len = 4; if (IS_GEN(i915, 7)) len += 2 + (num_engines ? 4 * num_engines + 6 : 0); @@ -1589,22 +1581,21 @@ static int switch_context(struct i915_request *rq) } if (ce->state) { - u32 hw_flags; + u32 flags; GEM_BUG_ON(rq->engine->id != RCS0); - /* - * The kernel context(s) is treated as pure scratch and is not - * expected to retain any state (as we sacrifice it during - * suspend and on resume it may be corrupted). This is ok, - * as nothing actually executes using the kernel context; it - * is purely used for flushing user contexts. - */ - hw_flags = 0; - if (i915_gem_context_is_kernel(rq->gem_context)) - hw_flags = MI_RESTORE_INHIBIT; + /* For resource streamer on HSW+ and power context elsewhere */ + BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); + BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); + + flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; + if (!i915_gem_context_is_kernel(rq->gem_context)) + flags |= MI_RESTORE_EXT_STATE_EN; + else + flags |= MI_RESTORE_INHIBIT; - ret = mi_set_context(rq, hw_flags); + ret = mi_set_context(rq, flags); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e29bc137e7ba..21aa08f55811 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1660,8 +1660,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9)) /* WaRsDisableCoarsePowerGating:skl,cnl */ -#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9)) +#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ + (IS_CANNONLAKE(dev_priv) || \ + IS_SKL_GT3(dev_priv) || \ + IS_SKL_GT4(dev_priv)) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f24064893b30..d6ce57d30958 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { + GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; @@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { + GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); iter.dma += I915_GTT_PAGE_SIZE; @@ -3304,7 +3306,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) static void ggtt_restore_mappings(struct i915_ggtt *ggtt) { - struct i915_vma *vma, *vn; + struct i915_vma *vma; bool flush = false; int open; @@ -3319,15 +3321,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) open = atomic_xchg(&ggtt->vm.open, 0); /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) continue; - if (!__i915_vma_unbind(vma)) - continue; - clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); WARN_ON(i915_vma_bind(vma, obj ? obj->cache_level : 0, diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 6f09aa0be80a..d6d2e6fb8674 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1074,12 +1074,17 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; - if (!is_igp(i915)) + if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, - "i915-%s", + "i915_%s", dev_name(i915->drm.dev)); - else + if (pmu->name) { + /* tools/perf reserves colons as special. */ + strreplace((char *)pmu->name, ':', '_'); + } + } else { pmu->name = "i915"; + } if (!pmu->name) goto err; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4fd3d76db346..094011b8f64d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4177,7 +4177,13 @@ enum { #define CPSSUNIT_CLKGATE_DIS REG_BIT(9) #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) -#define VFUNIT_CLKGATE_DIS (1 << 20) +#define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define HSUNIT_CLKGATE_DIS REG_BIT(8) +#define VSUNIT_CLKGATE_DIS REG_BIT(3) + +#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) +#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) +#define PSDUNIT_CLKGATE_DIS REG_BIT(5) #define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560) #define CGPSF_CLKGATE_DIS (1 << 3) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 75ae6f495161..86379eddc908 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6565,6 +6565,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) /* WaEnable32PlaneMode:icl */ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); + + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, + 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, + 0, PSDUNIT_CLKGATE_DIS); } static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 35cc69a3a1b9..05364eca20f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -25,6 +25,7 @@ #ifndef __I915_SELFTESTS_RANDOM_H__ #define __I915_SELFTESTS_RANDOM_H__ +#include <linux/math64.h> #include <linux/random.h> #include "../i915_selftest.h" diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index f61364f7c471..88b431a267af 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) { + struct panfrost_file_priv *priv = file->driver_priv; struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; + struct panfrost_gem_mapping *mapping; if (!args->size || args->pad || (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) @@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); - args->offset = bo->node.start << PAGE_SHIFT; + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + drm_gem_object_put_unlocked(&bo->base.base); + return -EINVAL; + } + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } @@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev, struct drm_panfrost_submit *args, struct panfrost_job *job) { + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo; + unsigned int i; + int ret; + job->bo_count = args->bo_handle_count; if (!job->bo_count) @@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev, if (!job->implicit_fences) return -ENOMEM; - return drm_gem_objects_lookup(file_priv, - (void __user *)(uintptr_t)args->bo_handles, - job->bo_count, &job->bos); + ret = drm_gem_objects_lookup(file_priv, + (void __user *)(uintptr_t)args->bo_handles, + job->bo_count, &job->bos); + if (ret) + return ret; + + job->mappings = kvmalloc_array(job->bo_count, + sizeof(struct panfrost_gem_mapping *), + GFP_KERNEL | __GFP_ZERO); + if (!job->mappings) + return -ENOMEM; + + for (i = 0; i < job->bo_count; i++) { + struct panfrost_gem_mapping *mapping; + + bo = to_panfrost_bo(job->bos[i]); + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + ret = -EINVAL; + break; + } + + job->mappings[i] = mapping; + } + + return ret; } /** @@ -320,7 +357,9 @@ out: static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_get_bo_offset *args = data; + struct panfrost_gem_mapping *mapping; struct drm_gem_object *gem_obj; struct panfrost_gem_object *bo; @@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, } bo = to_panfrost_bo(gem_obj); - args->offset = bo->node.start << PAGE_SHIFT; - + mapping = panfrost_gem_mapping_get(bo, priv); drm_gem_object_put_unlocked(gem_obj); + + if (!mapping) + return -EINVAL; + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_madvise *args = data; struct panfrost_device *pfdev = dev->dev_private; struct drm_gem_object *gem_obj; + struct panfrost_gem_object *bo; + int ret = 0; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { @@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, return -ENOENT; } + bo = to_panfrost_bo(gem_obj); + mutex_lock(&pfdev->shrinker_lock); + mutex_lock(&bo->mappings.lock); + if (args->madv == PANFROST_MADV_DONTNEED) { + struct panfrost_gem_mapping *first; + + first = list_first_entry(&bo->mappings.list, + struct panfrost_gem_mapping, + node); + + /* + * If we want to mark the BO purgeable, there must be only one + * user: the caller FD. + * We could do something smarter and mark the BO purgeable only + * when all its users have marked it purgeable, but globally + * visible/shared BOs are likely to never be marked purgeable + * anyway, so let's not bother. + */ + if (!list_is_singular(&bo->mappings.list) || + WARN_ON_ONCE(first->mmu != &priv->mmu)) { + ret = -EINVAL; + goto out_unlock_mappings; + } + } + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); if (args->retained) { - struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj); - if (args->madv == PANFROST_MADV_DONTNEED) list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list); else if (args->madv == PANFROST_MADV_WILLNEED) list_del_init(&bo->base.madv_list); } + +out_unlock_mappings: + mutex_unlock(&bo->mappings.lock); mutex_unlock(&pfdev->shrinker_lock); drm_gem_object_put_unlocked(gem_obj); - return 0; + return ret; } int panfrost_unstable_ioctl_check(void) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index fd766b1395fb..17b654e1eb94 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) list_del_init(&bo->base.madv_list); mutex_unlock(&pfdev->shrinker_lock); + /* + * If we still have mappings attached to the BO, there's a problem in + * our refcounting. + */ + WARN_ON_ONCE(!list_empty(&bo->mappings.list)); + if (bo->sgts) { int i; int n_sgt = bo->base.base.size / SZ_2M; @@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) drm_gem_shmem_free_object(obj); } +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv) +{ + struct panfrost_gem_mapping *iter, *mapping = NULL; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + kref_get(&iter->refcount); + mapping = iter; + break; + } + } + mutex_unlock(&bo->mappings.lock); + + return mapping; +} + +static void +panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) +{ + struct panfrost_file_priv *priv; + + if (mapping->active) + panfrost_mmu_unmap(mapping); + + priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); + spin_lock(&priv->mm_lock); + if (drm_mm_node_allocated(&mapping->mmnode)) + drm_mm_remove_node(&mapping->mmnode); + spin_unlock(&priv->mm_lock); +} + +static void panfrost_gem_mapping_release(struct kref *kref) +{ + struct panfrost_gem_mapping *mapping; + + mapping = container_of(kref, struct panfrost_gem_mapping, refcount); + + panfrost_gem_teardown_mapping(mapping); + drm_gem_object_put_unlocked(&mapping->obj->base.base); + kfree(mapping); +} + +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) +{ + if (!mapping) + return; + + kref_put(&mapping->refcount, panfrost_gem_mapping_release); +} + +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo) +{ + struct panfrost_gem_mapping *mapping; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(mapping, &bo->mappings.list, node) + panfrost_gem_teardown_mapping(mapping); + mutex_unlock(&bo->mappings.lock); +} + int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) { int ret; @@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) struct panfrost_gem_object *bo = to_panfrost_bo(obj); unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_mapping *mapping; + + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; + + INIT_LIST_HEAD(&mapping->node); + kref_init(&mapping->refcount); + drm_gem_object_get(obj); + mapping->obj = bo; /* * Executable buffers cannot cross a 16MB boundary as the program @@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) else align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; - bo->mmu = &priv->mmu; + mapping->mmu = &priv->mmu; spin_lock(&priv->mm_lock); - ret = drm_mm_insert_node_generic(&priv->mm, &bo->node, + ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode, size >> PAGE_SHIFT, align, color, 0); spin_unlock(&priv->mm_lock); if (ret) - return ret; + goto err; if (!bo->is_heap) { - ret = panfrost_mmu_map(bo); - if (ret) { - spin_lock(&priv->mm_lock); - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); - } + ret = panfrost_mmu_map(mapping); + if (ret) + goto err; } + + mutex_lock(&bo->mappings.lock); + WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED); + list_add_tail(&mapping->node, &bo->mappings.list); + mutex_unlock(&bo->mappings.lock); + +err: + if (ret) + panfrost_gem_mapping_put(mapping); return ret; } void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) { - struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + struct panfrost_gem_mapping *mapping = NULL, *iter; - if (bo->is_mapped) - panfrost_mmu_unmap(bo); + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + mapping = iter; + list_del(&iter->node); + break; + } + } + mutex_unlock(&bo->mappings.lock); - spin_lock(&priv->mm_lock); - if (drm_mm_node_allocated(&bo->node)) - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); + panfrost_gem_mapping_put(mapping); } static int panfrost_gem_pin(struct drm_gem_object *obj) @@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t if (!obj) return NULL; + INIT_LIST_HEAD(&obj->mappings.list); + mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; return &obj->base.base; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 4b17e7308764..ca1bc9019600 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -13,23 +13,46 @@ struct panfrost_gem_object { struct drm_gem_shmem_object base; struct sg_table *sgts; - struct panfrost_mmu *mmu; - struct drm_mm_node node; - bool is_mapped :1; + /* + * Use a list for now. If searching a mapping ever becomes the + * bottleneck, we should consider using an RB-tree, or even better, + * let the core store drm_gem_object_mapping entries (where we + * could place driver specific data) instead of drm_gem_object ones + * in its drm_file->object_idr table. + * + * struct drm_gem_object_mapping { + * struct drm_gem_object *obj; + * void *driver_priv; + * }; + */ + struct { + struct list_head list; + struct mutex lock; + } mappings; + bool noexec :1; bool is_heap :1; }; +struct panfrost_gem_mapping { + struct list_head node; + struct kref refcount; + struct panfrost_gem_object *obj; + struct drm_mm_node mmnode; + struct panfrost_mmu *mmu; + bool active :1; +}; + static inline struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) { return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); } -static inline -struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) +static inline struct panfrost_gem_mapping * +drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node) { - return container_of(node, struct panfrost_gem_object, node); + return container_of(node, struct panfrost_gem_mapping, mmnode); } struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); @@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv); +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv); +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo); + void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 458f0fa68111..f5dd7b29bc95 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc static bool panfrost_gem_purge(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct panfrost_gem_object *bo = to_panfrost_bo(obj); if (!mutex_trylock(&shmem->pages_lock)) return false; - panfrost_mmu_unmap(to_panfrost_bo(obj)); + panfrost_gem_teardown_mappings(bo); drm_gem_shmem_purge_locked(obj); mutex_unlock(&shmem->pages_lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index d411eb6c8eb9..e364ee00f3d0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref) dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); - if (job->bos) { + if (job->mappings) { for (i = 0; i < job->bo_count; i++) + panfrost_gem_mapping_put(job->mappings[i]); + kvfree(job->mappings); + } + + if (job->bos) { + struct panfrost_gem_object *bo; + + for (i = 0; i < job->bo_count; i++) { + bo = to_panfrost_bo(job->bos[i]); drm_gem_object_put_unlocked(job->bos[i]); + } + kvfree(job->bos); } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 62454128a792..bbd3ba97ff67 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -32,6 +32,7 @@ struct panfrost_job { /* Exclusive fences we have taken from the BOs to wait for */ struct dma_fence **implicit_fences; + struct panfrost_gem_mapping **mappings; struct drm_gem_object **bos; u32 bo_count; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index a3ed64a1f15e..763cfca886a7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, return 0; } -int panfrost_mmu_map(struct panfrost_gem_object *bo) +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE; - if (WARN_ON(bo->is_mapped)) + if (WARN_ON(mapping->active)) return 0; if (bo->noexec) @@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); - mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); - bo->is_mapped = true; + mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, + prot, sgt); + mapping->active = true; return 0; } -void panfrost_mmu_unmap(struct panfrost_gem_object *bo) +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; - size_t len = bo->node.size << PAGE_SHIFT; + struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; + u64 iova = mapping->mmnode.start << PAGE_SHIFT; + size_t len = mapping->mmnode.size << PAGE_SHIFT; size_t unmapped_len = 0; - if (WARN_ON(!bo->is_mapped)) + if (WARN_ON(!mapping->active)) return; - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); + dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", + mapping->mmu->as, iova, len); while (unmapped_len < len) { size_t unmapped_page; @@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) unmapped_len += pgsize; } - panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len); - bo->is_mapped = false; + panfrost_mmu_flush_range(pfdev, mapping->mmu, + mapping->mmnode.start << PAGE_SHIFT, len); + mapping->active = false; } static void mmu_tlb_inv_context_s1(void *cookie) @@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) free_io_pgtable_ops(mmu->pgtbl_ops); } -static struct panfrost_gem_object * -addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) +static struct panfrost_gem_mapping * +addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) { - struct panfrost_gem_object *bo = NULL; + struct panfrost_gem_mapping *mapping = NULL; struct panfrost_file_priv *priv; struct drm_mm_node *node; u64 offset = addr >> PAGE_SHIFT; @@ -418,8 +423,9 @@ found_mmu: drm_mm_for_each_node(node, &priv->mm) { if (offset >= node->start && offset < (node->start + node->size)) { - bo = drm_mm_node_to_panfrost_bo(node); - drm_gem_object_get(&bo->base.base); + mapping = drm_mm_node_to_panfrost_mapping(node); + + kref_get(&mapping->refcount); break; } } @@ -427,7 +433,7 @@ found_mmu: spin_unlock(&priv->mm_lock); out: spin_unlock(&pfdev->as_lock); - return bo; + return mapping; } #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) @@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) { int ret, i; + struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; - bo = addr_to_drm_mm_node(pfdev, as, addr); - if (!bo) + bomapping = addr_to_mapping(pfdev, as, addr); + if (!bomapping) return -ENOENT; + bo = bomapping->obj; if (!bo->is_heap) { dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", - bo->node.start << PAGE_SHIFT); + bomapping->mmnode.start << PAGE_SHIFT); ret = -EINVAL; goto err_bo; } - WARN_ON(bo->mmu->as != as); + WARN_ON(bomapping->mmu->as != as); /* Assume 2MB alignment and size multiple */ addr &= ~((u64)SZ_2M - 1); page_offset = addr >> PAGE_SHIFT; - page_offset -= bo->node.start; + page_offset -= bomapping->mmnode.start; mutex_lock(&bo->base.pages_lock); @@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_map; } - mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + mmu_map_sg(pfdev, bomapping->mmu, addr, + IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); - bo->is_mapped = true; + bomapping->active = true; dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); - drm_gem_object_put_unlocked(&bo->base.base); + panfrost_gem_mapping_put(bomapping); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index 7c5b6775ae23..44fc2edf63ce 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -4,12 +4,12 @@ #ifndef __PANFROST_MMU_H__ #define __PANFROST_MMU_H__ -struct panfrost_gem_object; +struct panfrost_gem_mapping; struct panfrost_file_priv; struct panfrost_mmu; -int panfrost_mmu_map(struct panfrost_gem_object *bo); -void panfrost_mmu_unmap(struct panfrost_gem_object *bo); +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping); +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); int panfrost_mmu_init(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 2c04e858c50a..684820448be3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -25,7 +25,7 @@ #define V4_SHADERS_PER_COREGROUP 4 struct panfrost_perfcnt { - struct panfrost_gem_object *bo; + struct panfrost_gem_mapping *mapping; size_t bosize; void *buf; struct panfrost_file_priv *user; @@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) int ret; reinit_completion(&pfdev->perfcnt->dump_comp); - gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT; + gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); gpu_write(pfdev, GPU_INT_CLEAR, @@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (IS_ERR(bo)) return PTR_ERR(bo); - perfcnt->bo = to_panfrost_bo(&bo->base); - /* Map the perfcnt buf in the address space attached to file_priv. */ - ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv); + ret = panfrost_gem_open(&bo->base, file_priv); if (ret) goto err_put_bo; + perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), + user); + if (!perfcnt->mapping) { + ret = -EINVAL; + goto err_close_bo; + } + perfcnt->buf = drm_gem_shmem_vmap(&bo->base); if (IS_ERR(perfcnt->buf)) { ret = PTR_ERR(perfcnt->buf); - goto err_close_bo; + goto err_put_mapping; } /* @@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); + /* The BO ref is retained by the mapping. */ + drm_gem_object_put_unlocked(&bo->base); + return 0; err_vunmap: - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); +err_put_mapping: + panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: - panfrost_gem_close(&perfcnt->bo->base.base, file_priv); + panfrost_gem_close(&bo->base, file_priv); err_put_bo: drm_gem_object_put_unlocked(&bo->base); return ret; @@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); perfcnt->buf = NULL; - panfrost_gem_close(&perfcnt->bo->base.base, file_priv); - drm_gem_object_put_unlocked(&perfcnt->bo->base.base); - perfcnt->bo = NULL; + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); + panfrost_gem_mapping_put(perfcnt->mapping); + perfcnt->mapping = NULL; pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index 83c4586665b4..81ac9b658a70 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -95,7 +95,7 @@ struct cdn_dp_device { struct cdn_dp_port *port[MAX_PHY]; u8 ports; u8 max_lanes; - u8 max_rate; + unsigned int max_rate; u8 lanes; int active_port; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 42651d737c55..c81cdce6ed55 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, WARN_ON(!tcon->quirks->has_channel_0); - tcon->dclk_min_div = 1; + tcon->dclk_min_div = tcon->quirks->dclk_min_div; tcon->dclk_max_div = 127; sun4i_tcon0_mode_set_common(tcon, mode); @@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, static const struct sun4i_tcon_quirks sun4i_a10_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun4i_a10_tcon_set_mux, }; static const struct sun4i_tcon_quirks sun5i_a13_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun5i_a13_tcon_set_mux, }; @@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = { .has_channel_1 = true, .has_lvds_alt = true, .needs_de_be_mux = true, + .dclk_min_div = 1, .set_mux = sun6i_tcon_set_mux, }; @@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = { .has_channel_0 = true, .has_channel_1 = true, .needs_de_be_mux = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun7i_a20_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, /* Same display pipeline structure as A10 */ .set_mux = sun4i_a10_tcon_set_mux, }; @@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = { static const struct sun4i_tcon_quirks sun8i_a33_quirks = { .has_channel_0 = true, .has_lvds_alt = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { .supports_lvds = true, .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = { @@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = { - .has_channel_0 = true, - .needs_edp_reset = true, + .has_channel_0 = true, + .needs_edp_reset = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index f9f1fe80b206..a62ec826ae71 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -224,6 +224,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool supports_lvds; /* Does the TCON support an LVDS output? */ + u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ /* callback to handle tcon muxing options */ int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 390524143139..1635a9ff4794 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (!objs) return; virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); + virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_transfer_to_host_2d (vgdev, 0, plane->state->crtc_w, diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 8063b1d567b1..e6e4c841fb06 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 && - (usage->hid & HID_USAGE) != 0x00 && !usage->type) { + (usage->hid & HID_USAGE) != 0x00 && + (usage->hid & HID_USAGE) != 0xff && !usage->type) { hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n", usage->hid & HID_USAGE); } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e0b241bd3070..851fe54ea59e 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign offset = report->size; report->size += parser->global.report_size * parser->global.report_count; + /* Total size check: Allow for possible report index byte */ + if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { + hid_err(parser->device, "report is too long\n"); + return -1; + } + if (!parser->local.usage_index) /* Ignore padding fields */ return 0; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7e1689ef35f5..3a400ce603c4 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -631,6 +631,7 @@ #define USB_VENDOR_ID_ITE 0x048d #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 +#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a #define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396 #define USB_DEVICE_ID_ITE8595 0x8595 @@ -730,6 +731,7 @@ #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 #define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 #define I2C_DEVICE_ID_LG_8001 0x8001 +#define I2C_DEVICE_ID_LG_7010 0x7010 #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e @@ -1102,6 +1104,7 @@ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 +#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 63855f275a38..dea9cc65bf80 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: - if (device->driver->input_mapped && device->driver->input_mapped(device, - hidinput, field, usage, &bit, &max) < 0) - goto ignore; + if (device->driver->input_mapped && + device->driver->input_mapped(device, hidinput, field, usage, + &bit, &max) < 0) { + /* + * The driver indicated that no further generic handling + * of the usage is desired. + */ + return; + } set_bit(usage->type, input->evbit); @@ -1215,9 +1221,11 @@ mapped: set_bit(MSC_SCAN, input->mscbit); } -ignore: return; +ignore: + usage->type = 0; + usage->code = 0; } static void hidinput_handle_scroll(struct hid_usage *usage, diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index a45f2352618d..c436e12feb23 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index cd9193078525..70e1cb928bf0 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64 +#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0) +#define HIDPP_REPORT_LONG_SUPPORTED BIT(1) +#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2) + #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03 #define HIDPP_SUB_ID_ROLLER 0x05 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06 @@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1) #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) +#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4) /* * There are two hidpp protocols in use, the first version hidpp10 is known @@ -135,12 +140,15 @@ struct hidpp_report { struct hidpp_battery { u8 feature_index; u8 solar_feature_index; + u8 voltage_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; + int voltage; + int charge_type; bool online; }; @@ -183,9 +191,12 @@ struct hidpp_device { unsigned long quirks; unsigned long capabilities; + u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; + + u8 wireless_feature_index; }; /* HID++ 1.0 error codes */ @@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, struct hidpp_report *message; int ret, max_count; + /* Send as long report if short reports are not supported. */ + if (report_id == REPORT_ID_HIDPP_SHORT && + !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED)) + report_id = REPORT_ID_HIDPP_LONG; + switch (report_id) { case REPORT_ID_HIDPP_SHORT: max_count = HIDPP_REPORT_SHORT_LENGTH - 4; @@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question, (answer->fap.params[0] == question->fap.funcindex_clientid); } -static inline bool hidpp_report_is_connect_event(struct hidpp_report *report) +static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp, + struct hidpp_report *report) { - return (report->report_id == REPORT_ID_HIDPP_SHORT) && - (report->rap.sub_id == 0x41); + return (hidpp->wireless_feature_index && + (report->fap.feature_index == hidpp->wireless_feature_index)) || + ((report->report_id == REPORT_ID_HIDPP_SHORT) && + (report->rap.sub_id == 0x41)); } /** @@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp, return 0; } +/* -------------------------------------------------------------------------- */ +/* 0x1001: Battery voltage */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001 + +#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00 + +#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00 + +static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, + int *level, int *charge_type) +{ + int status; + + long charge_sts = (long)data[2]; + + *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + switch (data[2] & 0xe0) { + case 0x00: + status = POWER_SUPPLY_STATUS_CHARGING; + break; + case 0x20: + status = POWER_SUPPLY_STATUS_FULL; + *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + break; + case 0x40: + status = POWER_SUPPLY_STATUS_DISCHARGING; + break; + case 0xe0: + status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + default: + status = POWER_SUPPLY_STATUS_UNKNOWN; + } + + *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; + if (test_bit(3, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; + } + if (test_bit(4, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + } + + if (test_bit(5, &charge_sts)) { + *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + } + + *voltage = get_unaligned_be16(data); + + return status; +} + +static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp, + u8 feature_index, + int *status, int *voltage, + int *level, int *charge_type) +{ + struct hidpp_report response; + int ret; + u8 *params = (u8 *)response.fap.params; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE, + NULL, 0, &response); + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE; + + *status = hidpp20_battery_map_status_voltage(params, voltage, + level, charge_type); + + return 0; +} + +static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + int status, voltage, level, charge_type; + + if (hidpp->battery.voltage_feature_index == 0xff) { + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE, + &hidpp->battery.voltage_feature_index, + &feature_type); + if (ret) + return ret; + } + + ret = hidpp20_battery_get_battery_voltage(hidpp, + hidpp->battery.voltage_feature_index, + &status, &voltage, &level, &charge_type); + + if (ret) + return ret; + + hidpp->battery.status = status; + hidpp->battery.voltage = voltage; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + return 0; +} + +static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp, + u8 *data, int size) +{ + struct hidpp_report *report = (struct hidpp_report *)data; + int status, voltage, level, charge_type; + + if (report->fap.feature_index != hidpp->battery.voltage_feature_index || + report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST) + return 0; + + status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage, + &level, &charge_type); + + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { + hidpp->battery.voltage = voltage; + hidpp->battery.status = status; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + if (hidpp->battery.ps) + power_supply_changed(hidpp->battery.ps); + } + return 0; +} + static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_STATUS, @@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */ 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */ + 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */ }; static int hidpp_battery_get_property(struct power_supply *psy, @@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = hidpp->hid_dev->uniq; break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + /* hardware reports voltage in in mV. sysfs expects uV */ + val->intval = hidpp->battery.voltage * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = hidpp->battery.charge_type; + break; default: ret = -EINVAL; break; @@ -1277,6 +1442,24 @@ static int hidpp_battery_get_property(struct power_supply *psy, } /* -------------------------------------------------------------------------- */ +/* 0x1d4b: Wireless device status */ +/* -------------------------------------------------------------------------- */ +#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b + +static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + + ret = hidpp_root_get_feature(hidpp, + HIDPP_PAGE_WIRELESS_DEVICE_STATUS, + &hidpp->wireless_feature_index, + &feature_type); + + return ret; +} + +/* -------------------------------------------------------------------------- */ /* 0x2120: Hi-resolution scrolling */ /* -------------------------------------------------------------------------- */ @@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, } } - if (unlikely(hidpp_report_is_connect_event(report))) { + if (unlikely(hidpp_report_is_connect_event(hidpp, report))) { atomic_set(&hidpp->connected, !(report->rap.params[0] & (1 << 6))); if (schedule_work(&hidpp->work) == 0) @@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, ret = hidpp_solar_battery_event(hidpp, data, size); if (ret != 0) return ret; + ret = hidpp20_battery_voltage_event(hidpp, data, size); + if (ret != 0) + return ret; } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { @@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) hidpp->battery.feature_index = 0xff; hidpp->battery.solar_feature_index = 0xff; + hidpp->battery.voltage_feature_index = 0xff; if (hidpp->protocol_major >= 2) { if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750) ret = hidpp_solar_request_battery_event(hidpp); - else - ret = hidpp20_query_battery_info(hidpp); + else { + ret = hidpp20_query_battery_voltage_info(hidpp); + if (ret) + ret = hidpp20_query_battery_info(hidpp); + } if (ret) return ret; @@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) if (!battery_props) return -ENOMEM; - num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2; + num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) battery_props[num_battery_props++] = @@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY_LEVEL; + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + battery_props[num_battery_props++] = + POWER_SUPPLY_PROP_VOLTAGE_NOW; + battery = &hidpp->battery; n = atomic_inc_return(&battery_no) - 1; @@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp) else hidpp10_query_battery_status(hidpp); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { - hidpp20_query_battery_info(hidpp); + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + hidpp20_query_battery_voltage_info(hidpp); + else + hidpp20_query_battery_info(hidpp); } if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); @@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id) return report->field[0]->report_count + 1; } -static bool hidpp_validate_device(struct hid_device *hdev) +static u8 hidpp_validate_device(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); - int id, report_length, supported_reports = 0; + int id, report_length; + u8 supported_reports = 0; id = REPORT_ID_HIDPP_SHORT; report_length = hidpp_get_report_length(hdev, id); @@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_SHORT_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED; } id = REPORT_ID_HIDPP_LONG; @@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_LONG_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_LONG_SUPPORTED; } id = REPORT_ID_HIDPP_VERY_LONG; @@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED; hidpp->very_long_report_length = report_length; } @@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) /* * Make sure the device is HID++ capable, otherwise treat as generic HID */ - if (!hidpp_validate_device(hdev)) { + hidpp->supported_reports = hidpp_validate_device(hdev); + + if (!hidpp->supported_reports) { hid_set_drvdata(hdev, NULL); devm_kfree(&hdev->dev, hidpp); return hid_hw_start(hdev, HID_CONNECT_DEFAULT); @@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret < 0) { dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n", __func__, ret); - hid_hw_stop(hdev); goto hid_hw_open_fail; } @@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp_overwrite_name(hdev); } + if (connected && hidpp->protocol_major >= 2) { + ret = hidpp_set_wireless_feature_index(hidpp); + if (ret == -ENOENT) + hidpp->wireless_feature_index = 0; + else if (ret) + goto hid_hw_init_fail; + } + if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { ret = wtp_get_config(hidpp); if (ret) @@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = { { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech MX Master 2S */ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Master 3 */ + LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech Performance MX */ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Keyboard logitech K400 */ @@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* MX Master mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* MX Master 3 mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, {} }; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 3cfeb1629f79..362805ddf377 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input, tool = MT_TOOL_DIAL; else if (unlikely(!confidence_state)) { tool = MT_TOOL_PALM; - if (!active && + if (!active && mt && input_mt_is_active(&mt->slots[slotnum])) { /* * The non-confidence was reported for @@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_LG, HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, + { .driver_data = MT_CLS_LG, + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index d1b39c29e353..0e7b2d998395 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, { 0 } }; diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8dae0f9b819e..6286204d4c56 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev, if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); + /* If using a wireless adaptor ask for connection status */ + steam->connected = false; steam_request_conn_status(steam); } else { + /* A wired connection is always present */ + steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index c3fc0ceb8096..2eee5e31c2b7 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -249,13 +249,14 @@ out: static __poll_t hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) - return EPOLLIN | EPOLLRDNORM | EPOLLOUT; + mask |= EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist) - return EPOLLERR | EPOLLHUP; - return 0; + mask |= EPOLLERR | EPOLLHUP; + return mask; } static int hidraw_open(struct inode *inode, struct file *file) @@ -450,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, -EFAULT : len; break; } + + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) { + int len = strlen(hid->uniq) + 1; + if (len > _IOC_SIZE(cmd)) + len = _IOC_SIZE(cmd); + ret = copy_to_user(user_arg, hid->uniq, len) ? + -EFAULT : len; + break; + } } ret = -ENOTTY; diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index a358e61fbc82..009000c5d55c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -49,6 +49,8 @@ #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) +#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) + /* flags */ #define I2C_HID_STARTED 0 @@ -175,6 +177,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_BOGUS_IRQ }, { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, I2C_HID_QUIRK_RESET_ON_RESUME }, + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, + I2C_HID_QUIRK_BAD_INPUT_SIZE }, { 0, 0 } }; @@ -496,9 +500,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) } if ((ret_size > size) || (ret_size < 2)) { - dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", - __func__, size, ret_size); - return; + if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) { + ihid->inbuf[0] = size & 0xff; + ihid->inbuf[1] = size >> 8; + ret_size = size; + } else { + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", + __func__, size, ret_size); + return; + } } i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf); diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 6c1e6110867f..1fb294ca463e 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -24,7 +24,9 @@ #define ICL_MOBILE_DEVICE_ID 0x34FC #define SPT_H_DEVICE_ID 0xA135 #define CML_LP_DEVICE_ID 0x02FC +#define CMP_H_DEVICE_ID 0x06FC #define EHL_Ax_DEVICE_ID 0x4BB3 +#define TGL_LP_DEVICE_ID 0xA0FC #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 784dcc8c7022..f491d8b4e24c 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index fa0cc0899827..8fe3efcb8327 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -766,13 +766,14 @@ unlock: static __poll_t uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */ poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) - return EPOLLIN | EPOLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; - return 0; + return mask; } static const struct file_operations uhid_fops = { diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index e421cdf2d1a4..a970b809d778 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file) return 0; } +static int __hiddev_open(struct hiddev *hiddev, struct file *file) +{ + struct hiddev_list *list; + int error; + + lockdep_assert_held(&hiddev->existancelock); + + list = vzalloc(sizeof(*list)); + if (!list) + return -ENOMEM; + + mutex_init(&list->thread_lock); + list->hiddev = hiddev; + + if (!hiddev->open++) { + error = hid_hw_power(hiddev->hid, PM_HINT_FULLON); + if (error < 0) + goto err_drop_count; + + error = hid_hw_open(hiddev->hid); + if (error < 0) + goto err_normal_power; + } + + spin_lock_irq(&hiddev->list_lock); + list_add_tail(&list->node, &hiddev->list); + spin_unlock_irq(&hiddev->list_lock); + + file->private_data = list; + + return 0; + +err_normal_power: + hid_hw_power(hiddev->hid, PM_HINT_NORMAL); +err_drop_count: + hiddev->open--; + vfree(list); + return error; +} + /* * open file op */ static int hiddev_open(struct inode *inode, struct file *file) { - struct hiddev_list *list; struct usb_interface *intf; struct hid_device *hid; struct hiddev *hiddev; @@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file) intf = usbhid_find_interface(iminor(inode)); if (!intf) return -ENODEV; + hid = usb_get_intfdata(intf); hiddev = hid->hiddev; - if (!(list = vzalloc(sizeof(struct hiddev_list)))) - return -ENOMEM; - mutex_init(&list->thread_lock); - list->hiddev = hiddev; - file->private_data = list; - - /* - * no need for locking because the USB major number - * is shared which usbcore guards against disconnect - */ - if (list->hiddev->exist) { - if (!list->hiddev->open++) { - res = hid_hw_open(hiddev->hid); - if (res < 0) - goto bail; - } - } else { - res = -ENODEV; - goto bail; - } - - spin_lock_irq(&list->hiddev->list_lock); - list_add_tail(&list->node, &hiddev->list); - spin_unlock_irq(&list->hiddev->list_lock); - mutex_lock(&hiddev->existancelock); - /* - * recheck exist with existance lock held to - * avoid opening a disconnected device - */ - if (!list->hiddev->exist) { - res = -ENODEV; - goto bail_unlock; - } - if (!list->hiddev->open++) - if (list->hiddev->exist) { - struct hid_device *hid = hiddev->hid; - res = hid_hw_power(hid, PM_HINT_FULLON); - if (res < 0) - goto bail_unlock; - res = hid_hw_open(hid); - if (res < 0) - goto bail_normal_power; - } - mutex_unlock(&hiddev->existancelock); - return 0; -bail_normal_power: - hid_hw_power(hid, PM_HINT_NORMAL); -bail_unlock: + res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV; mutex_unlock(&hiddev->existancelock); - spin_lock_irq(&list->hiddev->list_lock); - list_del(&list->node); - spin_unlock_irq(&list->hiddev->list_lock); -bail: - file->private_data = NULL; - vfree(list); return res; } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index ccb74529bc78..d99a9d407671 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */ hdev->product == 0x392 || /* Intuos Pro 2 */ - hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */ + hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */ + hdev->product == 0x3AA)) { /* MobileStudio Pro */ value = (field->logical_maximum - value); if (hdev->product == 0x357 || hdev->product == 0x358 || hdev->product == 0x392) value = wacom_offset_rotation(input, usage, value, 3, 16); else if (hdev->product == 0x34d || hdev->product == 0x34e || - hdev->product == 0x398 || hdev->product == 0x399) + hdev->product == 0x398 || hdev->product == 0x399 || + hdev->product == 0x3AA) value = wacom_offset_rotation(input, usage, value, 1, 2); } else { diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 23dfe848979a..47ac20aee06f 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -164,6 +164,16 @@ config SENSORS_ADM1031 This driver can also be built as a module. If so, the module will be called adm1031. +config SENSORS_ADM1177 + tristate "Analog Devices ADM1177 and compatibles" + depends on I2C + help + If you say yes here you get support for Analog Devices ADM1177 + sensor chips. + + This driver can also be built as a module. If so, the module + will be called adm1177. + config SENSORS_ADM9240 tristate "Analog Devices ADM9240 and compatibles" depends on I2C @@ -385,6 +395,16 @@ config SENSORS_ATXP1 This driver can also be built as a module. If so, the module will be called atxp1. +config SENSORS_DRIVETEMP + tristate "Hard disk drives with temperature sensors" + depends on SCSI && ATA + help + If you say yes you get support for the temperature sensor on + hard disk drives. + + This driver can also be built as a module. If so, the module + will be called satatemp. + config SENSORS_DS620 tristate "Dallas Semiconductor DS620" depends on I2C @@ -889,7 +909,7 @@ config SENSORS_MAX197 will be called max197. config SENSORS_MAX31722 -tristate "MAX31722 temperature sensor" + tristate "MAX31722 temperature sensor" depends on SPI help Support for the Maxim Integrated MAX31722/MAX31723 digital @@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor" This driver can also be built as a module. If so, the module will be called max31722. +config SENSORS_MAX31730 + tristate "MAX31730 temperature sensor" + depends on I2C + help + Support for the Maxim Integrated MAX31730 3-Channel Remote + Temperature Sensor. + + This driver can also be built as a module. If so, the module + will be called max31730. + config SENSORS_MAX6621 tristate "Maxim MAX6621 sensor chip" depends on I2C @@ -1905,7 +1935,7 @@ config SENSORS_W83627HF will be called w83627hf. config SENSORS_W83627EHF - tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F" + tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG" depends on !PPC select HWMON_VID help @@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF the Core 2 Duo. And also the W83627UHG, which is a stripped down version of the W83627DHG (as far as hardware monitoring goes.) - This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F - (also known as W83667HG-I), and NCT6776F. + This driver also supports Nuvoton W83667HG and W83667HG-B. This driver can also be built as a module. If so, the module will be called w83627ehf. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 6db5db9cdc29..613f50987965 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o +obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o @@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o obj-$(CONFIG_SENSORS_DME1737) += dme1737.o +obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o obj-$(CONFIG_SENSORS_DS620) += ds620.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o @@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o obj-$(CONFIG_SENSORS_MAX1668) += max1668.o obj-$(CONFIG_SENSORS_MAX197) += max197.o obj-$(CONFIG_SENSORS_MAX31722) += max31722.o +obj-$(CONFIG_SENSORS_MAX31730) += max31730.o obj-$(CONFIG_SENSORS_MAX6621) += max6621.o obj-$(CONFIG_SENSORS_MAX6639) += max6639.o obj-$(CONFIG_SENSORS_MAX6642) += max6642.o diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c new file mode 100644 index 000000000000..d314223a404a --- /dev/null +++ b/drivers/hwmon/adm1177.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin + * + * Copyright 2015-2019 Analog Devices Inc. + */ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +/* Command Byte Operations */ +#define ADM1177_CMD_V_CONT BIT(0) +#define ADM1177_CMD_I_CONT BIT(2) +#define ADM1177_CMD_VRANGE BIT(4) + +/* Extended Register */ +#define ADM1177_REG_ALERT_TH 2 + +#define ADM1177_BITS 12 + +/** + * struct adm1177_state - driver instance specific data + * @client pointer to i2c client + * @reg regulator info for the the power supply of the device + * @r_sense_uohm current sense resistor value + * @alert_threshold_ua current limit for shutdown + * @vrange_high internal voltage divider + */ +struct adm1177_state { + struct i2c_client *client; + struct regulator *reg; + u32 r_sense_uohm; + u32 alert_threshold_ua; + bool vrange_high; +}; + +static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data) +{ + return i2c_master_recv(st->client, data, num); +} + +static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd) +{ + return i2c_smbus_write_byte(st->client, cmd); +} + +static int adm1177_write_alert_thr(struct adm1177_state *st, + u32 alert_threshold_ua) +{ + u64 val; + int ret; + + val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm; + val = div_u64(val, 105840000U); + val = div_u64(val, 1000U); + if (val > 0xFF) + val = 0xFF; + + ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH, + val); + if (ret) + return ret; + + st->alert_threshold_ua = alert_threshold_ua; + return 0; +} + +static int adm1177_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + u8 data[3]; + long dummy; + int ret; + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[1] << 4) | (data[2] & 0xF); + /* + * convert to milliamperes + * ((105.84mV / 4096) x raw) / senseResistor(ohm) + */ + *val = div_u64((105840000ull * dummy), + 4096 * st->r_sense_uohm); + return 0; + case hwmon_curr_max_alarm: + *val = st->alert_threshold_ua; + return 0; + default: + return -EOPNOTSUPP; + } + case hwmon_in: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[0] << 4) | (data[2] >> 4); + /* + * convert to millivolts based on resistor devision + * (V_fullscale / 4096) * raw + */ + if (st->vrange_high) + dummy *= 26350; + else + dummy *= 6650; + + *val = DIV_ROUND_CLOSEST(dummy, 4096); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int adm1177_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_max_alarm: + adm1177_write_alert_thr(st, val); + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static umode_t adm1177_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct adm1177_state *st = data; + + switch (type) { + case hwmon_in: + switch (attr) { + case hwmon_in_input: + return 0444; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + if (st->r_sense_uohm) + return 0444; + return 0; + case hwmon_curr_max_alarm: + if (st->r_sense_uohm) + return 0644; + return 0; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *adm1177_info[] = { + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_MAX_ALARM), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT), + NULL +}; + +static const struct hwmon_ops adm1177_hwmon_ops = { + .is_visible = adm1177_is_visible, + .read = adm1177_read, + .write = adm1177_write, +}; + +static const struct hwmon_chip_info adm1177_chip_info = { + .ops = &adm1177_hwmon_ops, + .info = adm1177_info, +}; + +static void adm1177_remove(void *data) +{ + struct adm1177_state *st = data; + + regulator_disable(st->reg); +} + +static int adm1177_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct adm1177_state *st; + u32 alert_threshold_ua; + int ret; + + st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->client = client; + + st->reg = devm_regulator_get_optional(&client->dev, "vref"); + if (IS_ERR(st->reg)) { + if (PTR_ERR(st->reg) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + st->reg = NULL; + } else { + ret = regulator_enable(st->reg); + if (ret) + return ret; + ret = devm_add_action_or_reset(&client->dev, adm1177_remove, + st); + if (ret) + return ret; + } + + if (device_property_read_u32(dev, "shunt-resistor-micro-ohms", + &st->r_sense_uohm)) + st->r_sense_uohm = 0; + if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp", + &alert_threshold_ua)) { + if (st->r_sense_uohm) + /* + * set maximum default value from datasheet based on + * shunt-resistor + */ + alert_threshold_ua = div_u64(105840000000, + st->r_sense_uohm); + else + alert_threshold_ua = 0; + } + st->vrange_high = device_property_read_bool(dev, + "adi,vrange-high-enable"); + if (alert_threshold_ua && st->r_sense_uohm) + adm1177_write_alert_thr(st, alert_threshold_ua); + + ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT | + ADM1177_CMD_I_CONT | + (st->vrange_high ? 0 : ADM1177_CMD_VRANGE)); + if (ret) + return ret; + + hwmon_dev = + devm_hwmon_device_register_with_info(dev, client->name, st, + &adm1177_chip_info, NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id adm1177_id[] = { + {"adm1177", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, adm1177_id); + +static const struct of_device_id adm1177_dt_ids[] = { + { .compatible = "adi,adm1177" }, + {}, +}; +MODULE_DEVICE_TABLE(of, adm1177_dt_ids); + +static struct i2c_driver adm1177_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "adm1177", + .of_match_table = adm1177_dt_ids, + }, + .probe = adm1177_probe, + .id_table = adm1177_id, +}; +module_i2c_driver(adm1177_driver); + +MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>"); +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 6c64d50c9aae..01c2eeb02aa9 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) long reg; if (bypass_attn & (1 << channel)) - reg = (volt * 1024) / 2250; + reg = DIV_ROUND_CLOSEST(volt * 1024, 2250); else - reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250); + reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024, + (r[0] + r[1]) * 2250); return clamp_val(reg, 0, 1023) & (0xff << 2); } diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c new file mode 100644 index 000000000000..370d0c74eb01 --- /dev/null +++ b/drivers/hwmon/drivetemp.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hwmon client for disk and solid state drives with temperature sensors + * Copyright (C) 2019 Zodiac Inflight Innovations + * + * With input from: + * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors. + * (C) 2018 Linus Walleij + * + * hwmon: Driver for SCSI/ATA temperature sensors + * by Constantin Baranov <const@mimas.ru>, submitted September 2009 + * + * This drive supports reporting the temperatire of SATA drives. It can be + * easily extended to report the temperature of SCSI drives. + * + * The primary means to read drive temperatures and temperature limits + * for ATA drives is the SCT Command Transport feature set as specified in + * ATA8-ACS. + * It can be used to read the current drive temperature, temperature limits, + * and historic minimum and maximum temperatures. The SCT Command Transport + * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set + * (ATA8-ACS)". + * + * If the SCT Command Transport feature set is not available, drive temperatures + * may be readable through SMART attributes. Since SMART attributes are not well + * defined, this method is only used as fallback mechanism. + * + * There are three SMART attributes which may report drive temperatures. + * Those are defined as follows (from + * http://www.cropel.com/library/smart-attribute-list.aspx). + * + * 190 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 194 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 231 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * Wikipedia defines attributes a bit differently. + * + * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer + * Difference or to set a minimum threshold which corresponds to a + * Airflow maximum temperature. This also follows the convention of + * Temperature 100 being a best-case value and lower values being + * undesirable. However, some older drives may instead + * report raw Temperature (identical to 0xC2) or + * Temperature minus 50 here. + * 194 Temperature or Indicates the device temperature, if the appropriate + * Temperature sensor is fitted. Lowest byte of the raw value contains + * Celsius the exact temperature value (Celsius degrees). + * 231 Life Left Indicates the approximate SSD life left, in terms of + * (SSDs) or program/erase cycles or available reserved blocks. + * Temperature A normalized value of 100 represents a new drive, with + * a threshold value at 10 indicating a need for + * replacement. A value of 0 may mean that the drive is + * operating in read-only mode to allow data recovery. + * Previously (pre-2010) occasionally used for Drive + * Temperature (more typically reported at 0xC2). + * + * Common denominator is that the first raw byte reports the temperature + * in degrees C on almost all drives. Some drives may report a fractional + * temperature in the second raw byte. + * + * Known exceptions (from libatasmart): + * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th + * degrees C in the first two raw bytes. + * - A few Maxtor drives report an unknown or bad value in attribute 194. + * - Certain Apple SSD drives report an unknown value in attribute 190. + * Only certain firmware versions are affected. + * + * Those exceptions affect older ATA drives and are currently ignored. + * Also, the second raw byte (possibly reporting the fractional temperature) + * is currently ignored. + * + * Many drives also report temperature limits in additional SMART data raw + * bytes. The format of those is not well defined and varies widely. + * The driver does not currently attempt to report those limits. + * + * According to data in smartmontools, attribute 231 is rarely used to report + * drive temperatures. At the same time, several drives report SSD life left + * in attribute 231, but do not support temperature sensors. For this reason, + * attribute 231 is currently ignored. + * + * Following above definitions, temperatures are reported as follows. + * If SCT Command Transport is supported, it is used to read the + * temperature and, if available, temperature limits. + * - Otherwise, if SMART attribute 194 is supported, it is used to read + * the temperature. + * - Otherwise, if SMART attribute 190 is supported, it is used to read + * the temperature. + */ + +#include <linux/ata.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_driver.h> +#include <scsi/scsi_proto.h> + +struct drivetemp_data { + struct list_head list; /* list of instantiated devices */ + struct mutex lock; /* protect data buffer accesses */ + struct scsi_device *sdev; /* SCSI device */ + struct device *dev; /* instantiating device */ + struct device *hwdev; /* hardware monitoring device */ + u8 smartdata[ATA_SECT_SIZE]; /* local buffer */ + int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val); + bool have_temp_lowest; /* lowest temp in SCT status */ + bool have_temp_highest; /* highest temp in SCT status */ + bool have_temp_min; /* have min temp */ + bool have_temp_max; /* have max temp */ + bool have_temp_lcrit; /* have lower critical limit */ + bool have_temp_crit; /* have critical limit */ + int temp_min; /* min temp */ + int temp_max; /* max temp */ + int temp_lcrit; /* lower critical limit */ + int temp_crit; /* critical limit */ +}; + +static LIST_HEAD(drivetemp_devlist); + +#define ATA_MAX_SMART_ATTRS 30 +#define SMART_TEMP_PROP_190 190 +#define SMART_TEMP_PROP_194 194 + +#define SCT_STATUS_REQ_ADDR 0xe0 +#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */ +#define SCT_STATUS_VERSION_HIGH 1 +#define SCT_STATUS_TEMP 200 +#define SCT_STATUS_TEMP_LOWEST 201 +#define SCT_STATUS_TEMP_HIGHEST 202 +#define SCT_READ_LOG_ADDR 0xe1 +#define SMART_READ_LOG 0xd5 +#define SMART_WRITE_LOG 0xd6 + +#define INVALID_TEMP 0x80 + +#define temp_is_valid(temp) ((temp) != INVALID_TEMP) +#define temp_from_sct(temp) (((s8)(temp)) * 1000) + +static inline bool ata_id_smart_supported(u16 *id) +{ + return id[ATA_ID_COMMAND_SET_1] & BIT(0); +} + +static inline bool ata_id_smart_enabled(u16 *id) +{ + return id[ATA_ID_CFS_ENABLE_1] & BIT(0); +} + +static int drivetemp_scsi_command(struct drivetemp_data *st, + u8 ata_command, u8 feature, + u8 lba_low, u8 lba_mid, u8 lba_high) +{ + u8 scsi_cmd[MAX_COMMAND_SIZE]; + int data_dir; + + memset(scsi_cmd, 0, sizeof(scsi_cmd)); + scsi_cmd[0] = ATA_16; + if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) { + scsi_cmd[1] = (5 << 1); /* PIO Data-out */ + /* + * No off.line or cc, write to dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x06; + data_dir = DMA_TO_DEVICE; + } else { + scsi_cmd[1] = (4 << 1); /* PIO Data-in */ + /* + * No off.line or cc, read from dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x0e; + data_dir = DMA_FROM_DEVICE; + } + scsi_cmd[4] = feature; + scsi_cmd[6] = 1; /* 1 sector */ + scsi_cmd[8] = lba_low; + scsi_cmd[10] = lba_mid; + scsi_cmd[12] = lba_high; + scsi_cmd[14] = ata_command; + + return scsi_execute_req(st->sdev, scsi_cmd, data_dir, + st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5, + NULL); +} + +static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature, + u8 select) +{ + return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select, + ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS); +} + +static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr, + long *temp) +{ + u8 *buf = st->smartdata; + bool have_temp = false; + u8 temp_raw; + u8 csum; + int err; + int i; + + err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0); + if (err) + return err; + + /* Checksum the read value table */ + csum = 0; + for (i = 0; i < ATA_SECT_SIZE; i++) + csum += buf[i]; + if (csum) { + dev_dbg(&st->sdev->sdev_gendev, + "checksum error reading SMART values\n"); + return -EIO; + } + + for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) { + u8 *attr = buf + i * 12; + int id = attr[2]; + + if (!id) + continue; + + if (id == SMART_TEMP_PROP_190) { + temp_raw = attr[7]; + have_temp = true; + } + if (id == SMART_TEMP_PROP_194) { + temp_raw = attr[7]; + have_temp = true; + break; + } + } + + if (have_temp) { + *temp = temp_raw * 1000; + return 0; + } + + return -ENXIO; +} + +static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val) +{ + u8 *buf = st->smartdata; + int err; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + return err; + switch (attr) { + case hwmon_temp_input: + *val = temp_from_sct(buf[SCT_STATUS_TEMP]); + break; + case hwmon_temp_lowest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]); + break; + case hwmon_temp_highest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]); + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int drivetemp_identify_sata(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + u8 *buf = st->smartdata; + struct scsi_vpd *vpd; + bool is_ata, is_sata; + bool have_sct_data_table; + bool have_sct_temp; + bool have_smart; + bool have_sct; + u16 *ata_id; + u16 version; + long temp; + int err; + + /* SCSI-ATA Translation present? */ + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + + /* + * Verify that ATA IDENTIFY DEVICE data is included in ATA Information + * VPD and that the drive implements the SATA protocol. + */ + if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA || + vpd->data[36] != 0x34) { + rcu_read_unlock(); + return -ENODEV; + } + ata_id = (u16 *)&vpd->data[60]; + is_ata = ata_id_is_ata(ata_id); + is_sata = ata_id_is_sata(ata_id); + have_sct = ata_id_sct_supported(ata_id); + have_sct_data_table = ata_id_sct_data_tables(ata_id); + have_smart = ata_id_smart_supported(ata_id) && + ata_id_smart_enabled(ata_id); + + rcu_read_unlock(); + + /* bail out if this is not a SATA device */ + if (!is_ata || !is_sata) + return -ENODEV; + if (!have_sct) + goto skip_sct; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct; + + version = (buf[SCT_STATUS_VERSION_HIGH] << 8) | + buf[SCT_STATUS_VERSION_LOW]; + if (version != 2 && version != 3) + goto skip_sct; + + have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]); + if (!have_sct_temp) + goto skip_sct; + + st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]); + st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]); + + if (!have_sct_data_table) + goto skip_sct; + + /* Request and read temperature history table */ + memset(buf, '\0', sizeof(st->smartdata)); + buf[0] = 5; /* data table command */ + buf[2] = 1; /* read table */ + buf[4] = 2; /* temperature history table */ + + err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct_data; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR); + if (err) + goto skip_sct_data; + + /* + * Temperature limits per AT Attachment 8 - + * ATA/ATAPI Command Set (ATA8-ACS) + */ + st->have_temp_max = temp_is_valid(buf[6]); + st->have_temp_crit = temp_is_valid(buf[7]); + st->have_temp_min = temp_is_valid(buf[8]); + st->have_temp_lcrit = temp_is_valid(buf[9]); + + st->temp_max = temp_from_sct(buf[6]); + st->temp_crit = temp_from_sct(buf[7]); + st->temp_min = temp_from_sct(buf[8]); + st->temp_lcrit = temp_from_sct(buf[9]); + +skip_sct_data: + if (have_sct_temp) { + st->get_temp = drivetemp_get_scttemp; + return 0; + } +skip_sct: + if (!have_smart) + return -ENODEV; + st->get_temp = drivetemp_get_smarttemp; + return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp); +} + +static int drivetemp_identify(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + + /* Bail out immediately if there is no inquiry data */ + if (!sdev->inquiry || sdev->inquiry_len < 16) + return -ENODEV; + + /* Disk device? */ + if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC) + return -ENODEV; + + return drivetemp_identify_sata(st); +} + +static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct drivetemp_data *st = dev_get_drvdata(dev); + int err = 0; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_lowest: + case hwmon_temp_highest: + mutex_lock(&st->lock); + err = st->get_temp(st, attr, val); + mutex_unlock(&st->lock); + break; + case hwmon_temp_lcrit: + *val = st->temp_lcrit; + break; + case hwmon_temp_min: + *val = st->temp_min; + break; + case hwmon_temp_max: + *val = st->temp_max; + break; + case hwmon_temp_crit: + *val = st->temp_crit; + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static umode_t drivetemp_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct drivetemp_data *st = data; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_lowest: + if (st->have_temp_lowest) + return 0444; + break; + case hwmon_temp_highest: + if (st->have_temp_highest) + return 0444; + break; + case hwmon_temp_min: + if (st->have_temp_min) + return 0444; + break; + case hwmon_temp_max: + if (st->have_temp_max) + return 0444; + break; + case hwmon_temp_lcrit: + if (st->have_temp_lcrit) + return 0444; + break; + case hwmon_temp_crit: + if (st->have_temp_crit) + return 0444; + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *drivetemp_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | + HWMON_T_LOWEST | HWMON_T_HIGHEST | + HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_LCRIT | HWMON_T_CRIT), + NULL +}; + +static const struct hwmon_ops drivetemp_ops = { + .is_visible = drivetemp_is_visible, + .read = drivetemp_read, +}; + +static const struct hwmon_chip_info drivetemp_chip_info = { + .ops = &drivetemp_ops, + .info = drivetemp_info, +}; + +/* + * The device argument points to sdev->sdev_dev. Its parent is + * sdev->sdev_gendev, which we can use to get the scsi_device pointer. + */ +static int drivetemp_add(struct device *dev, struct class_interface *intf) +{ + struct scsi_device *sdev = to_scsi_device(dev->parent); + struct drivetemp_data *st; + int err; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->sdev = sdev; + st->dev = dev; + mutex_init(&st->lock); + + if (drivetemp_identify(st)) { + err = -ENODEV; + goto abort; + } + + st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp", + st, &drivetemp_chip_info, + NULL); + if (IS_ERR(st->hwdev)) { + err = PTR_ERR(st->hwdev); + goto abort; + } + + list_add(&st->list, &drivetemp_devlist); + return 0; + +abort: + kfree(st); + return err; +} + +static void drivetemp_remove(struct device *dev, struct class_interface *intf) +{ + struct drivetemp_data *st, *tmp; + + list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) { + if (st->dev == dev) { + list_del(&st->list); + hwmon_device_unregister(st->hwdev); + kfree(st); + break; + } + } +} + +static struct class_interface drivetemp_interface = { + .add_dev = drivetemp_add, + .remove_dev = drivetemp_remove, +}; + +static int __init drivetemp_init(void) +{ + return scsi_register_interface(&drivetemp_interface); +} + +static void __exit drivetemp_exit(void) +{ + scsi_unregister_interface(&drivetemp_interface); +} + +module_init(drivetemp_init); +module_exit(drivetemp_exit); + +MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>"); +MODULE_DESCRIPTION("Hard drive temperature monitor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 1f3b30b085b9..6a30fb453f7a 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -51,6 +51,7 @@ struct hwmon_device_attribute { #define to_hwmon_attr(d) \ container_of(d, struct hwmon_device_attribute, dev_attr) +#define to_dev_attr(a) container_of(a, struct device_attribute, attr) /* * Thermal zone information @@ -58,7 +59,7 @@ struct hwmon_device_attribute { * also provides the sensor index. */ struct hwmon_thermal_data { - struct hwmon_device *hwdev; /* Reference to hwmon device */ + struct device *dev; /* Reference to hwmon device */ int index; /* sensor index */ }; @@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = { NULL }; +static void hwmon_free_attrs(struct attribute **attrs) +{ + int i; + + for (i = 0; attrs[i]; i++) { + struct device_attribute *dattr = to_dev_attr(attrs[i]); + struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr); + + kfree(hattr); + } + kfree(attrs); +} + static void hwmon_dev_release(struct device *dev) { - kfree(to_hwmon_device(dev)); + struct hwmon_device *hwdev = to_hwmon_device(dev); + + if (hwdev->group.attrs) + hwmon_free_attrs(hwdev->group.attrs); + kfree(hwdev->groups); + kfree(hwdev); } static struct class hwmon_class = { @@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida); static int hwmon_thermal_get_temp(void *data, int *temp) { struct hwmon_thermal_data *tdata = data; - struct hwmon_device *hwdev = tdata->hwdev; + struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); int ret; long t; - ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input, + ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input, tdata->index, &t); if (ret < 0) return ret; @@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = { .get_temp = hwmon_thermal_get_temp, }; -static int hwmon_thermal_add_sensor(struct device *dev, - struct hwmon_device *hwdev, int index) +static int hwmon_thermal_add_sensor(struct device *dev, int index) { struct hwmon_thermal_data *tdata; struct thermal_zone_device *tzd; @@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev, if (!tdata) return -ENOMEM; - tdata->hwdev = hwdev; + tdata->dev = dev; tdata->index = index; - tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata, + tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); /* * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, @@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, return 0; } #else -static int hwmon_thermal_add_sensor(struct device *dev, - struct hwmon_device *hwdev, int index) +static int hwmon_thermal_add_sensor(struct device *dev, int index) { return 0; } @@ -171,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, static int hwmon_attr_base(enum hwmon_sensor_types type) { - if (type == hwmon_in) + if (type == hwmon_in || type == hwmon_intrusion) return 0; return 1; } @@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr) (type == hwmon_fan && attr == hwmon_fan_label); } -static struct attribute *hwmon_genattr(struct device *dev, - const void *drvdata, +static struct attribute *hwmon_genattr(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int index, @@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev, if ((mode & 0222) && !ops->write) return ERR_PTR(-EINVAL); - hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL); + hattr = kzalloc(sizeof(*hattr), GFP_KERNEL); if (!hattr) return ERR_PTR(-ENOMEM); @@ -327,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = { }; static const char * const hwmon_temp_attr_templates[] = { + [hwmon_temp_enable] = "temp%d_enable", [hwmon_temp_input] = "temp%d_input", [hwmon_temp_type] = "temp%d_type", [hwmon_temp_lcrit] = "temp%d_lcrit", @@ -354,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = { }; static const char * const hwmon_in_attr_templates[] = { + [hwmon_in_enable] = "in%d_enable", [hwmon_in_input] = "in%d_input", [hwmon_in_min] = "in%d_min", [hwmon_in_max] = "in%d_max", @@ -369,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = { [hwmon_in_max_alarm] = "in%d_max_alarm", [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm", [hwmon_in_crit_alarm] = "in%d_crit_alarm", - [hwmon_in_enable] = "in%d_enable", }; static const char * const hwmon_curr_attr_templates[] = { + [hwmon_curr_enable] = "curr%d_enable", [hwmon_curr_input] = "curr%d_input", [hwmon_curr_min] = "curr%d_min", [hwmon_curr_max] = "curr%d_max", @@ -391,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = { }; static const char * const hwmon_power_attr_templates[] = { + [hwmon_power_enable] = "power%d_enable", [hwmon_power_average] = "power%d_average", [hwmon_power_average_interval] = "power%d_average_interval", [hwmon_power_average_interval_max] = "power%d_interval_max", @@ -422,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = { }; static const char * const hwmon_energy_attr_templates[] = { + [hwmon_energy_enable] = "energy%d_enable", [hwmon_energy_input] = "energy%d_input", [hwmon_energy_label] = "energy%d_label", }; static const char * const hwmon_humidity_attr_templates[] = { + [hwmon_humidity_enable] = "humidity%d_enable", [hwmon_humidity_input] = "humidity%d_input", [hwmon_humidity_label] = "humidity%d_label", [hwmon_humidity_min] = "humidity%d_min", @@ -438,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = { }; static const char * const hwmon_fan_attr_templates[] = { + [hwmon_fan_enable] = "fan%d_enable", [hwmon_fan_input] = "fan%d_input", [hwmon_fan_label] = "fan%d_label", [hwmon_fan_min] = "fan%d_min", @@ -458,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = { [hwmon_pwm_freq] = "pwm%d_freq", }; +static const char * const hwmon_intrusion_attr_templates[] = { + [hwmon_intrusion_alarm] = "intrusion%d_alarm", + [hwmon_intrusion_beep] = "intrusion%d_beep", +}; + static const char * const *__templates[] = { [hwmon_chip] = hwmon_chip_attrs, [hwmon_temp] = hwmon_temp_attr_templates, @@ -468,6 +495,7 @@ static const char * const *__templates[] = { [hwmon_humidity] = hwmon_humidity_attr_templates, [hwmon_fan] = hwmon_fan_attr_templates, [hwmon_pwm] = hwmon_pwm_attr_templates, + [hwmon_intrusion] = hwmon_intrusion_attr_templates, }; static const int __templates_size[] = { @@ -480,6 +508,7 @@ static const int __templates_size[] = { [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates), [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates), [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates), + [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates), }; static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) @@ -492,8 +521,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) return n; } -static int hwmon_genattrs(struct device *dev, - const void *drvdata, +static int hwmon_genattrs(const void *drvdata, struct attribute **attrs, const struct hwmon_ops *ops, const struct hwmon_channel_info *info) @@ -519,7 +547,7 @@ static int hwmon_genattrs(struct device *dev, attr_mask &= ~BIT(attr); if (attr >= template_size) return -EINVAL; - a = hwmon_genattr(dev, drvdata, info->type, attr, i, + a = hwmon_genattr(drvdata, info->type, attr, i, templates[attr], ops); if (IS_ERR(a)) { if (PTR_ERR(a) != -ENOENT) @@ -533,8 +561,7 @@ static int hwmon_genattrs(struct device *dev, } static struct attribute ** -__hwmon_create_attrs(struct device *dev, const void *drvdata, - const struct hwmon_chip_info *chip) +__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip) { int ret, i, aindex = 0, nattrs = 0; struct attribute **attrs; @@ -545,15 +572,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata, if (nattrs == 0) return ERR_PTR(-EINVAL); - attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL); + attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) return ERR_PTR(-ENOMEM); for (i = 0; chip->info[i]; i++) { - ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops, + ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops, chip->info[i]); - if (ret < 0) + if (ret < 0) { + hwmon_free_attrs(attrs); return ERR_PTR(ret); + } aindex += ret; } @@ -595,14 +624,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, for (i = 0; groups[i]; i++) ngroups++; - hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), - GFP_KERNEL); + hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL); if (!hwdev->groups) { err = -ENOMEM; goto free_hwmon; } - attrs = __hwmon_create_attrs(dev, drvdata, chip); + attrs = __hwmon_create_attrs(drvdata, chip); if (IS_ERR(attrs)) { err = PTR_ERR(attrs); goto free_hwmon; @@ -647,8 +675,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, hwmon_temp_input, j)) continue; if (info[i]->config[j] & HWMON_T_INPUT) { - err = hwmon_thermal_add_sensor(dev, - hwdev, j); + err = hwmon_thermal_add_sensor(hdev, j); if (err) { device_unregister(hdev); /* @@ -667,7 +694,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, return hdev; free_hwmon: - kfree(hwdev); + hwmon_dev_release(hdev); ida_remove: ida_simple_remove(&hwmon_ida, id); return ERR_PTR(err); diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 5c1dddde193c..e39354ffe973 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,13 +1,29 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h + * processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> + * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net> + * + * Implementation notes: + * - CCD register address information as well as the calculation to + * convert raw register values is from https://github.com/ocerman/zenpower. + * The information is not confirmed from chip datasheets, but experiments + * suggest that it provides reasonable temperature values. + * - Register addresses to read chip voltage and current are also from + * https://github.com/ocerman/zenpower, and not confirmed from chip + * datasheets. Current calibration is board specific and not typically + * shared by board vendors. For this reason, current values are + * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC + * current. Reported values can be adjusted using the sensors configuration + * file. */ +#include <linux/bitops.h> +#include <linux/debugfs.h> #include <linux/err.h> #include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> @@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #endif /* CPUID function 0x80000001, ebx */ -#define CPUID_PKGTYPE_MASK 0xf0000000 +#define CPUID_PKGTYPE_MASK GENMASK(31, 28) #define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 /* DRAM controller (PCI function 2) */ #define REG_DCT0_CONFIG_HIGH 0x094 -#define DDR3_MODE 0x00000100 +#define DDR3_MODE BIT(8) /* miscellaneous (PCI function 3) */ #define REG_HARDWARE_THERMAL_CONTROL 0x64 -#define HTC_ENABLE 0x00000001 +#define HTC_ENABLE BIT(0) #define REG_REPORTED_TEMPERATURE 0xa4 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 -#define NB_CAP_HTC 0x00000400 +#define NB_CAP_HTC BIT(10) /* * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL @@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); /* F17h M01h Access througn SMN */ #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 +#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4)) +#define F17H_M70H_CCD_TEMP_VALID BIT(11) +#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0) + +#define F17H_M01H_SVI 0x0005A000 +#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc) +#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10) + +#define CUR_TEMP_SHIFT 21 +#define CUR_TEMP_RANGE_SEL_MASK BIT(19) + +#define CFACTOR_ICORE 1000000 /* 1A / LSB */ +#define CFACTOR_ISOC 250000 /* 0.25A / LSB */ + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -67,6 +97,10 @@ struct k10temp_data { int temp_offset; u32 temp_adjust_mask; bool show_tdie; + u32 show_tccd; + u32 svi_addr[2]; + bool show_current; + int cfactor[2]; }; struct tctl_offset { @@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ }; +static bool is_threadripper(void) +{ + return strstr(boot_cpu_data.x86_model_id, "Threadripper"); +} + +static bool is_epyc(void) +{ + return strstr(boot_cpu_data.x86_model_id, "EPYC"); +} + static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) { pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); @@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); } -static unsigned int get_raw_temp(struct k10temp_data *data) +static long get_raw_temp(struct k10temp_data *data) { - unsigned int temp; u32 regval; + long temp; data->read_tempreg(data->pdev, ®val); - temp = (regval >> 21) * 125; + temp = (regval >> CUR_TEMP_SHIFT) * 125; if (regval & data->temp_adjust_mask) temp -= 49000; return temp; } -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); +const char *k10temp_temp_label[] = { + "Tdie", + "Tctl", + "Tccd1", + "Tccd2", + "Tccd3", + "Tccd4", + "Tccd5", + "Tccd6", + "Tccd7", + "Tccd8", +}; - if (temp > data->temp_offset) - temp -= data->temp_offset; - else - temp = 0; +const char *k10temp_in_label[] = { + "Vcore", + "Vsoc", +}; - return sprintf(buf, "%u\n", temp); -} +const char *k10temp_curr_label[] = { + "Icore", + "Isoc", +}; -static ssize_t temp2_input_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) { - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - return sprintf(buf, "%u\n", temp); + switch (type) { + case hwmon_temp: + *str = k10temp_temp_label[channel]; + break; + case hwmon_in: + *str = k10temp_in_label[channel]; + break; + case hwmon_curr: + *str = k10temp_curr_label[channel]; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_label_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_curr(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; - return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie"); + switch (attr) { + case hwmon_curr_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + *val = DIV_ROUND_CLOSEST(data->cfactor[channel] * + (regval & 0xff), + 1000); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) +static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val) { - return sprintf(buf, "%d\n", 70 * 1000); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; + + switch (attr) { + case hwmon_in_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + regval = (regval >> 16) & 0xff; + *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_crit_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct k10temp_data *data = dev_get_drvdata(dev); - int show_hyst = attr->index; u32 regval; - int value; - data->read_htcreg(data->pdev, ®val); - value = ((regval >> 16) & 0x7f) * 500 + 52000; - if (show_hyst) - value -= ((regval >> 24) & 0xf) * 500; - return sprintf(buf, "%d\n", value); + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie */ + *val = get_raw_temp(data) - data->temp_offset; + if (*val < 0) + *val = 0; + break; + case 1: /* Tctl */ + *val = get_raw_temp(data); + if (*val < 0) + *val = 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + F17H_M70H_CCD_TEMP(channel - 2), ®val); + *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_temp_max: + *val = 70 * 1000; + break; + case hwmon_temp_crit: + data->read_htcreg(data->pdev, ®val); + *val = ((regval >> 16) & 0x7f) * 500 + 52000; + break; + case hwmon_temp_crit_hyst: + data->read_htcreg(data->pdev, ®val); + *val = (((regval >> 16) & 0x7f) + - ((regval >> 24) & 0xf)) * 500 + 52000; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static DEVICE_ATTR_RO(temp1_input); -static DEVICE_ATTR_RO(temp1_max); -static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1); - -static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0); -static DEVICE_ATTR_RO(temp2_input); -static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1); +static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_temp: + return k10temp_read_temp(dev, attr, channel, val); + case hwmon_in: + return k10temp_read_in(dev, attr, channel, val); + case hwmon_curr: + return k10temp_read_curr(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} -static umode_t k10temp_is_visible(struct kobject *kobj, - struct attribute *attr, int index) +static umode_t k10temp_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) { - struct device *dev = container_of(kobj, struct device, kobj); - struct k10temp_data *data = dev_get_drvdata(dev); + const struct k10temp_data *data = _data; struct pci_dev *pdev = data->pdev; u32 reg; - switch (index) { - case 0 ... 1: /* temp1_input, temp1_max */ - default: - break; - case 2 ... 3: /* temp1_crit, temp1_crit_hyst */ - if (!data->read_htcreg) - return 0; - - pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, - ®); - if (!(reg & NB_CAP_HTC)) - return 0; - - data->read_htcreg(data->pdev, ®); - if (!(reg & HTC_ENABLE)) + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie, or Tctl if we don't show it */ + break; + case 1: /* Tctl */ + if (!data->show_tdie) + return 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + case hwmon_temp_max: + if (channel || data->show_tdie) + return 0; + break; + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + if (channel || !data->read_htcreg) + return 0; + + pci_read_config_dword(pdev, + REG_NORTHBRIDGE_CAPABILITIES, + ®); + if (!(reg & NB_CAP_HTC)) + return 0; + + data->read_htcreg(data->pdev, ®); + if (!(reg & HTC_ENABLE)) + return 0; + break; + case hwmon_temp_label: + /* No labels if we don't show the die temperature */ + if (!data->show_tdie) + return 0; + switch (channel) { + case 0: /* Tdie */ + case 1: /* Tctl */ + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + default: return 0; + } break; - case 4 ... 6: /* temp1_label, temp2_input, temp2_label */ - if (!data->show_tdie) + case hwmon_in: + case hwmon_curr: + if (!data->show_current) return 0; break; + default: + return 0; } - return attr->mode; + return 0444; } -static struct attribute *k10temp_attrs[] = { - &dev_attr_temp1_input.attr, - &dev_attr_temp1_max.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - &dev_attr_temp2_input.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL -}; - -static const struct attribute_group k10temp_group = { - .attrs = k10temp_attrs, - .is_visible = k10temp_is_visible, -}; -__ATTRIBUTE_GROUPS(k10temp); - static bool has_erratum_319(struct pci_dev *pdev) { u32 pkg_type, reg_dram_cfg; @@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev) (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } -static int k10temp_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +#ifdef CONFIG_DEBUG_FS + +static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev, + u32 addr, int count) +{ + u32 reg; + int i; + + for (i = 0; i < count; i++) { + if (!(i & 3)) + seq_printf(s, "0x%06x: ", addr + i * 4); + amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, ®); + seq_printf(s, "%08x ", reg); + if ((i & 3) == 3) + seq_puts(s, "\n"); + } +} + +static int svi_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(svi); + +static int thm_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, + F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(thm); + +static void k10temp_debugfs_cleanup(void *ddir) +{ + debugfs_remove_recursive(ddir); +} + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ + struct dentry *debugfs; + char name[32]; + + /* Only show debugfs data for Family 17h/18h CPUs */ + if (!data->show_tdie) + return; + + scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev)); + + debugfs = debugfs_create_dir(name, NULL); + if (debugfs) { + debugfs_create_file("svi", 0444, debugfs, data, &svi_fops); + debugfs_create_file("thm", 0444, debugfs, data, &thm_fops); + devm_add_action_or_reset(&data->pdev->dev, + k10temp_debugfs_cleanup, debugfs); + } +} + +#else + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ +} + +#endif + +static const struct hwmon_channel_info *k10temp_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL), + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_LABEL, + HWMON_C_INPUT | HWMON_C_LABEL), + NULL +}; + +static const struct hwmon_ops k10temp_hwmon_ops = { + .is_visible = k10temp_is_visible, + .read = k10temp_read, + .read_string = k10temp_read_labels, +}; + +static const struct hwmon_chip_info k10temp_chip_info = { + .ops = &k10temp_hwmon_ops, + .info = k10temp_info, +}; + +static void k10temp_get_ccd_support(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + u32 regval; + int i; + + for (i = 0; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + F17H_M70H_CCD_TEMP(i), ®val); + if (regval & F17H_M70H_CCD_TEMP_VALID) + data->show_tccd |= BIT(i); + } +} + +static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; @@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev, data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { - data->temp_adjust_mask = 0x80000; + data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_f17; data->show_tdie = true; + + switch (boot_cpu_data.x86_model) { + case 0x1: /* Zen */ + case 0x8: /* Zen+ */ + case 0x11: /* Zen APU */ + case 0x18: /* Zen+ APU */ + data->show_current = !is_threadripper() && !is_epyc(); + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1; + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + k10temp_get_ccd_support(pdev, data, 4); + break; + case 0x31: /* Zen2 Threadripper */ + case 0x71: /* Zen2 */ + data->show_current = !is_threadripper() && !is_epyc(); + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0; + k10temp_get_ccd_support(pdev, data, 8); + break; + } } else { data->read_htcreg = read_htcreg_pci; data->read_tempreg = read_tempreg_pci; @@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev, } } - hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, - k10temp_groups); - return PTR_ERR_OR_ZERO(hwmon_dev); + hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data, + &k10temp_chip_info, + NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + k10temp_init_debugfs(data); + + return 0; } static const struct pci_device_id k10temp_id_table[] = { diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c new file mode 100644 index 000000000000..eb22a34dc36b --- /dev/null +++ b/drivers/hwmon/max31730.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX31730 3-Channel Remote Temperature Sensor + * + * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net> + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/hwmon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/slab.h> + +/* Addresses scanned */ +static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c, + 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; + +/* The MAX31730 registers */ +#define MAX31730_REG_TEMP 0x00 +#define MAX31730_REG_CONF 0x13 +#define MAX31730_STOP BIT(7) +#define MAX31730_EXTRANGE BIT(1) +#define MAX31730_REG_TEMP_OFFSET 0x16 +#define MAX31730_TEMP_OFFSET_BASELINE 0x77 +#define MAX31730_REG_OFFSET_ENABLE 0x17 +#define MAX31730_REG_TEMP_MAX 0x20 +#define MAX31730_REG_TEMP_MIN 0x30 +#define MAX31730_REG_STATUS_HIGH 0x32 +#define MAX31730_REG_STATUS_LOW 0x33 +#define MAX31730_REG_CHANNEL_ENABLE 0x35 +#define MAX31730_REG_TEMP_FAULT 0x36 + +#define MAX31730_REG_MFG_ID 0x50 +#define MAX31730_MFG_ID 0x4d +#define MAX31730_REG_MFG_REV 0x51 +#define MAX31730_MFG_REV 0x01 + +#define MAX31730_TEMP_MIN (-128000) +#define MAX31730_TEMP_MAX 127937 + +/* Each client has this additional data */ +struct max31730_data { + struct i2c_client *client; + u8 orig_conf; + u8 current_conf; + u8 offset_enable; + u8 channel_enable; +}; + +/*-----------------------------------------------------------------------*/ + +static inline long max31730_reg_to_mc(s16 temp) +{ + return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16); +} + +static int max31730_write_config(struct max31730_data *data, u8 set_mask, + u8 clr_mask) +{ + u8 value; + + clr_mask |= MAX31730_EXTRANGE; + value = data->current_conf & ~clr_mask; + value |= set_mask; + + if (data->current_conf != value) { + s32 err; + + err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF, + value); + if (err) + return err; + data->current_conf = value; + } + return 0; +} + +static int max31730_set_enable(struct i2c_client *client, int reg, + u8 *confdata, int channel, bool enable) +{ + u8 regval = *confdata; + int err; + + if (enable) + regval |= BIT(channel); + else + regval &= ~BIT(channel); + + if (regval != *confdata) { + err = i2c_smbus_write_byte_data(client, reg, regval); + if (err) + return err; + *confdata = regval; + } + return 0; +} + +static int max31730_set_offset_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE, + &data->offset_enable, channel, enable); +} + +static int max31730_set_channel_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE, + &data->channel_enable, channel, enable); +} + +static int max31730_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int regval, reg, offset; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + if (!(data->channel_enable & BIT(channel))) + return -ENODATA; + reg = MAX31730_REG_TEMP + (channel * 2); + break; + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + (channel * 2); + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + *val = !!(data->channel_enable & BIT(channel)); + return 0; + case hwmon_temp_offset: + if (!channel) + return -EINVAL; + if (!(data->offset_enable & BIT(channel))) { + *val = 0; + return 0; + } + offset = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET); + if (offset < 0) + return offset; + *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125; + return 0; + case hwmon_temp_fault: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_FAULT); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_min_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_LOW); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_max_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_HIGH); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + default: + return -EINVAL; + } + regval = i2c_smbus_read_word_swapped(data->client, reg); + if (regval < 0) + return regval; + + *val = max31730_reg_to_mc(regval); + + return 0; +} + +static int max31730_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int reg, err; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + channel * 2; + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + if (val != 0 && val != 1) + return -EINVAL; + return max31730_set_channel_enable(data, channel, val); + case hwmon_temp_offset: + val = clamp_val(val, -14875, 17000) + 14875; + val = DIV_ROUND_CLOSEST(val, 125); + err = max31730_set_offset_enable(data, channel, + val != MAX31730_TEMP_OFFSET_BASELINE); + if (err) + return err; + return i2c_smbus_write_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET, val); + default: + return -EINVAL; + } + + val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX); + val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4; + + return i2c_smbus_write_word_swapped(data->client, reg, (u16)val); +} + +static umode_t max31730_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_fault: + return 0444; + case hwmon_temp_min: + return channel ? 0444 : 0644; + case hwmon_temp_offset: + case hwmon_temp_enable: + case hwmon_temp_max: + return 0644; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *max31730_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT + ), + NULL +}; + +static const struct hwmon_ops max31730_hwmon_ops = { + .is_visible = max31730_is_visible, + .read = max31730_read, + .write = max31730_write, +}; + +static const struct hwmon_chip_info max31730_chip_info = { + .ops = &max31730_hwmon_ops, + .info = max31730_info, +}; + +static void max31730_remove(void *data) +{ + struct max31730_data *max31730 = data; + struct i2c_client *client = max31730->client; + + i2c_smbus_write_byte_data(client, MAX31730_REG_CONF, + max31730->orig_conf); +} + +static int +max31730_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct max31730_data *data; + int status, err; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) + return -EIO; + + data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + + /* Cache original configuration and enable status */ + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE); + if (status < 0) + return status; + data->channel_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE); + if (status < 0) + return status; + data->offset_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF); + if (status < 0) + return status; + data->orig_conf = status; + data->current_conf = status; + + err = max31730_write_config(data, + data->channel_enable ? 0 : MAX31730_STOP, + data->channel_enable ? MAX31730_STOP : 0); + if (err) + return err; + + dev_set_drvdata(dev, data); + + err = devm_add_action_or_reset(dev, max31730_remove, data); + if (err) + return err; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, + &max31730_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id max31730_ids[] = { + { "max31730", 0, }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max31730_ids); + +static const struct of_device_id __maybe_unused max31730_of_match[] = { + { + .compatible = "maxim,max31730", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, max31730_of_match); + +static bool max31730_check_reg_temp(struct i2c_client *client, + int reg) +{ + int regval; + + regval = i2c_smbus_read_byte_data(client, reg + 1); + return regval < 0 || (regval & 0x0f); +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max31730_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int regval; + int i; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID); + if (regval != MAX31730_MFG_ID) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV); + if (regval != MAX31730_MFG_REV) + return -ENODEV; + + /* lower 4 bit of temperature and limit registers must be 0 */ + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN)) + return -ENODEV; + + for (i = 0; i < 4; i++) { + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2)) + return -ENODEV; + if (max31730_check_reg_temp(client, + MAX31730_REG_TEMP_MAX + i * 2)) + return -ENODEV; + } + + strlcpy(info->type, "max31730", I2C_NAME_SIZE); + + return 0; +} + +static int __maybe_unused max31730_suspend(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, MAX31730_STOP, 0); +} + +static int __maybe_unused max31730_resume(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, 0, MAX31730_STOP); +} + +static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume); + +static struct i2c_driver max31730_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "max31730", + .of_match_table = of_match_ptr(max31730_of_match), + .pm = &max31730_pm_ops, + }, + .probe = max31730_probe, + .id_table = max31730_ids, + .detect = max31730_detect, + .address_list = normal_i2c, +}; + +module_i2c_driver(max31730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("MAX31730 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index f3dd2a17bd42..2e97e56c72c7 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -23,8 +23,8 @@ static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e }; static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = { - { 0x40, 0x00, 0x42, 0x44, 0x46 }, - { 0x3f, 0x00, 0x41, 0x43, 0x45 }, + { 0x46, 0x00, 0x40, 0x42, 0x44 }, + { 0x45, 0x00, 0x3f, 0x41, 0x43 }, }; static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 }; @@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = { struct nct7802_data { struct regmap *regmap; struct mutex access_lock; /* for multi-byte read and write operations */ + u8 in_status; + struct mutex in_alarm_lock; }; static ssize_t temp_type_show(struct device *dev, @@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr, return err ? : count; } +static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + struct nct7802_data *data = dev_get_drvdata(dev); + int volt, min, max, ret; + unsigned int val; + + mutex_lock(&data->in_alarm_lock); + + /* + * The SMI Voltage status register is the only register giving a status + * for voltages. A bit is set for each input crossing a threshold, in + * both direction, but the "inside" or "outside" limits info is not + * available. Also this register is cleared on read. + * Note: this is not explicitly spelled out in the datasheet, but + * from experiment. + * To deal with this we use a status cache with one validity bit and + * one status bit for each input. Validity is cleared at startup and + * each time the register reports a change, and the status is processed + * by software based on current input value and limits. + */ + ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */ + if (ret < 0) + goto abort; + + /* invalidate cached status for all inputs crossing a threshold */ + data->in_status &= ~((val & 0x0f) << 4); + + /* if cached status for requested input is invalid, update it */ + if (!(data->in_status & (0x10 << sattr->index))) { + ret = nct7802_read_voltage(data, sattr->nr, 0); + if (ret < 0) + goto abort; + volt = ret; + + ret = nct7802_read_voltage(data, sattr->nr, 1); + if (ret < 0) + goto abort; + min = ret; + + ret = nct7802_read_voltage(data, sattr->nr, 2); + if (ret < 0) + goto abort; + max = ret; + + if (volt < min || volt > max) + data->in_status |= (1 << sattr->index); + else + data->in_status &= ~(1 << sattr->index); + + data->in_status |= 0x10 << sattr->index; + } + + ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index))); +abort: + mutex_unlock(&data->in_alarm_lock); + return ret; +} + static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = { static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0); static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1); static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2); -static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3); +static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3); static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3); static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0); @@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0); static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0); static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1); static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2); -static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0); +static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0); static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0); static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0); static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1); static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2); -static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1); +static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1); static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1); static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0); static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1); static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2); -static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2); +static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2); static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2); static struct attribute *nct7802_in_attrs[] = { @@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client, return PTR_ERR(data->regmap); mutex_init(&data->access_lock); + mutex_init(&data->in_alarm_lock); ret = nct7802_init_chip(data); if (ret < 0) diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index 59859979571d..a9ea06204767 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -20,8 +20,8 @@ config SENSORS_PMBUS help If you say yes here you get hardware monitoring support for generic PMBus devices, including but not limited to ADP4000, BMR453, BMR454, - MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20, - TPS544B25, TPS544C20, TPS544C25, and UDT020. + MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, + TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020. This driver can also be built as a module. If so, the module will be called pmbus. @@ -145,6 +145,15 @@ config SENSORS_MAX16064 This driver can also be built as a module. If so, the module will be called max16064. +config SENSORS_MAX20730 + tristate "Maxim MAX20730, MAX20734, MAX20743" + help + If you say yes here you get hardware monitoring support for Maxim + MAX20730, MAX20734, and MAX20743. + + This driver can also be built as a module. If so, the module will + be called max20730. + config SENSORS_MAX20751 tristate "Maxim MAX20751" help @@ -200,20 +209,20 @@ config SENSORS_TPS40422 be called tps40422. config SENSORS_TPS53679 - tristate "TI TPS53679" + tristate "TI TPS53679, TPS53688" help If you say yes here you get hardware monitoring support for TI - TPS53679. + TPS53679, TPS53688 This driver can also be built as a module. If so, the module will be called tps53679. config SENSORS_UCD9000 - tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910" + tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910" help If you say yes here you get hardware monitoring support for TI - UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System - Health Controllers. + UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer + and System Health Controllers. This driver can also be built as a module. If so, the module will be called ucd9000. @@ -228,6 +237,15 @@ config SENSORS_UCD9200 This driver can also be built as a module. If so, the module will be called ucd9200. +config SENSORS_XDPE122 + tristate "Infineon XDPE122 family" + help + If you say yes here you get hardware monitoring support for Infineon + XDPE12254, XDPE12284, device. + + This driver can also be built as a module. If so, the module will + be called xdpe12284. + config SENSORS_ZL6100 tristate "Intersil ZL6100 and compatibles" help diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 3f8c1014938b..5feb45806123 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o +obj-$(CONFIG_SENSORS_MAX20730) += max20730.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o obj-$(CONFIG_SENSORS_MAX31785) += max31785.o obj-$(CONFIG_SENSORS_MAX34440) += max34440.o @@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o +obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c index d359b76bcb36..3795fe55b84f 100644 --- a/drivers/hwmon/pmbus/ibm-cffps.c +++ b/drivers/hwmon/pmbus/ibm-cffps.c @@ -20,12 +20,15 @@ #define CFFPS_FRU_CMD 0x9A #define CFFPS_PN_CMD 0x9B +#define CFFPS_HEADER_CMD 0x9C #define CFFPS_SN_CMD 0x9E +#define CFFPS_MAX_POWER_OUT_CMD 0xA7 #define CFFPS_CCIN_CMD 0xBD #define CFFPS_FW_CMD 0xFA #define CFFPS1_FW_NUM_BYTES 4 #define CFFPS2_FW_NUM_WORDS 3 #define CFFPS_SYS_CONFIG_CMD 0xDA +#define CFFPS_12VCS_VOUT_CMD 0xDE #define CFFPS_INPUT_HISTORY_CMD 0xD6 #define CFFPS_INPUT_HISTORY_SIZE 100 @@ -44,22 +47,21 @@ #define CFFPS_MFR_VAUX_FAULT BIT(6) #define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7) -/* - * LED off state actually relinquishes LED control to PSU firmware, so it can - * turn on the LED for faults. - */ -#define CFFPS_LED_OFF 0 #define CFFPS_LED_BLINK BIT(0) #define CFFPS_LED_ON BIT(1) +#define CFFPS_LED_OFF BIT(2) #define CFFPS_BLINK_RATE_MS 250 enum { CFFPS_DEBUGFS_INPUT_HISTORY = 0, CFFPS_DEBUGFS_FRU, CFFPS_DEBUGFS_PN, + CFFPS_DEBUGFS_HEADER, CFFPS_DEBUGFS_SN, + CFFPS_DEBUGFS_MAX_POWER_OUT, CFFPS_DEBUGFS_CCIN, CFFPS_DEBUGFS_FW, + CFFPS_DEBUGFS_ON_OFF_CONFIG, CFFPS_DEBUGFS_NUM_ENTRIES }; @@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu, psu->input_history.byte_count); } -static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) { u8 cmd; int i, rc; int *idxp = file->private_data; int idx = *idxp; struct ibm_cffps *psu = to_psu(idxp, idx); - char data[I2C_SMBUS_BLOCK_MAX] = { 0 }; + char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 }; pmbus_set_page(psu->client, 0); @@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, case CFFPS_DEBUGFS_PN: cmd = CFFPS_PN_CMD; break; + case CFFPS_DEBUGFS_HEADER: + cmd = CFFPS_HEADER_CMD; + break; case CFFPS_DEBUGFS_SN: cmd = CFFPS_SN_CMD; break; + case CFFPS_DEBUGFS_MAX_POWER_OUT: + rc = i2c_smbus_read_word_swapped(psu->client, + CFFPS_MAX_POWER_OUT_CMD); + if (rc < 0) + return rc; + + rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc); + goto done; case CFFPS_DEBUGFS_CCIN: rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD); if (rc < 0) @@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, return -EOPNOTSUPP; } goto done; + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + rc = i2c_smbus_read_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG); + if (rc < 0) + return rc; + + rc = snprintf(data, 3, "%02x", rc); + goto done; default: return -EINVAL; } @@ -214,9 +235,42 @@ done: return simple_read_from_buffer(buf, count, ppos, data, rc); } +static ssize_t ibm_cffps_debugfs_write(struct file *file, + const char __user *buf, size_t count, + loff_t *ppos) +{ + u8 data; + ssize_t rc; + int *idxp = file->private_data; + int idx = *idxp; + struct ibm_cffps *psu = to_psu(idxp, idx); + + switch (idx) { + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + pmbus_set_page(psu->client, 0); + + rc = simple_write_to_buffer(&data, 1, ppos, buf, count); + if (rc <= 0) + return rc; + + rc = i2c_smbus_write_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG, data); + if (rc) + return rc; + + rc = 1; + break; + default: + return -EINVAL; + } + + return rc; +} + static const struct file_operations ibm_cffps_fops = { .llseek = noop_llseek, - .read = ibm_cffps_debugfs_op, + .read = ibm_cffps_debugfs_read, + .write = ibm_cffps_debugfs_write, .open = simple_open, }; @@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page, if (mfr & CFFPS_MFR_PS_KILL) rc |= PB_STATUS_OFF; break; + case PMBUS_VIRT_READ_VMON: + rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD); + break; default: rc = -ENODATA; break; @@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu) rc = devm_led_classdev_register(dev, &psu->led); if (rc) dev_warn(dev, "failed to register led class: %d\n", rc); + else + i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD, + CFFPS_LED_OFF); } static struct pmbus_driver_info ibm_cffps_info[] = { @@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = { PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP | - PMBUS_HAVE_STATUS_FAN12, + PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON, .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT, @@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client, debugfs_create_file("part_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_PN], &ibm_cffps_fops); + debugfs_create_file("header", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER], + &ibm_cffps_fops); debugfs_create_file("serial_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_SN], &ibm_cffps_fops); + debugfs_create_file("max_power_out", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT], + &ibm_cffps_fops); debugfs_create_file("ccin", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN], &ibm_cffps_fops); debugfs_create_file("fw_version", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_FW], &ibm_cffps_fops); + debugfs_create_file("on_off_config", 0644, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG], + &ibm_cffps_fops); return 0; } diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c new file mode 100644 index 000000000000..294e2212f61e --- /dev/null +++ b/drivers/hwmon/pmbus/max20730.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down + * Switching Regulators + * + * Copyright 2019 Google LLC. + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/pmbus.h> +#include <linux/util_macros.h> +#include "pmbus.h" + +enum chips { + max20730, + max20734, + max20743 +}; + +struct max20730_data { + enum chips id; + struct pmbus_driver_info info; + struct mutex lock; /* Used to protect against parallel writes */ + u16 mfr_devset1; +}; + +#define to_max20730_data(x) container_of(x, struct max20730_data, info) + +#define MAX20730_MFR_DEVSET1 0xd2 + +/* + * Convert discreet value to direct data format. Strictly speaking, all passed + * values are constants, so we could do that calculation manually. On the + * downside, that would make the driver more difficult to maintain, so lets + * use this approach. + */ +static u16 val_to_direct(int v, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; /* take milli-units into account */ + int b = info->b[class] * 1000; + long d; + + d = v * info->m[class] + b; + /* + * R < 0 is true for all callers, so we don't need to bother + * about the R > 0 case. + */ + while (R < 0) { + d = DIV_ROUND_CLOSEST(d, 10); + R++; + } + return (u16)d; +} + +static long direct_to_val(u16 w, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; + int b = info->b[class] * 1000; + int m = info->m[class]; + long d = (s16)w; + + if (m == 0) + return 0; + + while (R < 0) { + d *= 10; + R++; + } + d = (d - b) / m; + return d; +} + +static u32 max_current[][5] = { + [max20730] = { 13000, 16600, 20100, 23600 }, + [max20734] = { 21000, 27000, 32000, 38000 }, + [max20743] = { 18900, 24100, 29200, 34100 }, +}; + +static int max20730_read_word_data(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + const struct max20730_data *data = to_max20730_data(info); + int ret = 0; + u32 max_c; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + switch ((data->mfr_devset1 >> 11) & 0x3) { + case 0x0: + ret = val_to_direct(150000, PSC_TEMPERATURE, info); + break; + case 0x1: + ret = val_to_direct(130000, PSC_TEMPERATURE, info); + break; + default: + ret = -ENODATA; + break; + } + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3]; + ret = val_to_direct(max_c, PSC_CURRENT_OUT, info); + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int max20730_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + struct pmbus_driver_info *info; + struct max20730_data *data; + u16 devset1; + int ret = 0; + int idx; + + info = (struct pmbus_driver_info *)pmbus_get_driver_info(client); + data = to_max20730_data(info); + + mutex_lock(&data->lock); + devset1 = data->mfr_devset1; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + devset1 &= ~(BIT(11) | BIT(12)); + if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000) + devset1 |= BIT(11); + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + devset1 &= ~(BIT(5) | BIT(6)); + + idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info), + max_current[data->id], 4); + devset1 |= (idx << 5); + break; + default: + ret = -ENODATA; + break; + } + + if (!ret && devset1 != data->mfr_devset1) { + ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1, + devset1); + if (!ret) { + data->mfr_devset1 = devset1; + pmbus_clear_cache(client); + } + } + mutex_unlock(&data->lock); + return ret; +} + +static const struct pmbus_driver_info max20730_info[] = { + [max20730] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3609, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + /* + * Values in the datasheet are adjusted for temperature and + * for the relationship between Vin and Vout. + * Unfortunately, the data sheet suggests that Vout measurement + * may be scaled with a resistor array. This is indeed the case + * at least on the evaulation boards. As a result, any in-driver + * adjustments would either be wrong or require elaborate means + * to configure the scaling. Instead of doing that, just report + * raw values and let userspace handle adjustments. + */ + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 153, + .b[PSC_CURRENT_OUT] = 4976, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20734] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6209 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3592, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 111, + .b[PSC_CURRENT_OUT] = 3461, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20743] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3597, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 95, + .b[PSC_CURRENT_OUT] = 5014, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, +}; + +static int max20730_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + struct max20730_data *data; + enum chips chip_id; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA | + I2C_FUNC_SMBUS_READ_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf); + if (ret < 0) { + dev_err(&client->dev, "Failed to read Manufacturer ID\n"); + return ret; + } + if (ret != 5 || strncmp(buf, "MAXIM", 5)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf); + return -ENODEV; + } + + /* + * The chips support reading PMBUS_MFR_MODEL. On both MAX20730 + * and MAX20734, reading it returns M20743. Presumably that is + * the reason why the command is not documented. Unfortunately, + * that means that there is no reliable means to detect the chip. + * However, we can at least detect the chip series. Compare + * the returned value against 'M20743' and bail out if there is + * a mismatch. If that doesn't work for all chips, we may have + * to remove this check. + */ + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Model\n"); + return ret; + } + if (ret != 6 || strncmp(buf, "M20743", 6)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf); + return -ENODEV; + } + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Revision\n"); + return ret; + } + if (ret != 1 || buf[0] != 'F') { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf); + return -ENODEV; + } + + if (client->dev.of_node) + chip_id = (enum chips)of_device_get_match_data(dev); + else + chip_id = id->driver_data; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->id = chip_id; + mutex_init(&data->lock); + memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info)); + + ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1); + if (ret < 0) + return ret; + data->mfr_devset1 = ret; + + return pmbus_do_probe(client, id, &data->info); +} + +static const struct i2c_device_id max20730_id[] = { + { "max20730", max20730 }, + { "max20734", max20734 }, + { "max20743", max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, max20730_id); + +static const struct of_device_id max20730_of_match[] = { + { .compatible = "maxim,max20730", .data = (void *)max20730 }, + { .compatible = "maxim,max20734", .data = (void *)max20734 }, + { .compatible = "maxim,max20743", .data = (void *)max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(of, max20730_of_match); + +static struct i2c_driver max20730_driver = { + .driver = { + .name = "max20730", + .of_match_table = max20730_of_match, + }, + .probe = max20730_probe, + .remove = pmbus_do_remove, + .id_table = max20730_id, +}; + +module_i2c_driver(max20730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c index ee5f0cdbde06..da3c38cb9a5c 100644 --- a/drivers/hwmon/pmbus/max20751.c +++ b/drivers/hwmon/pmbus/max20751.c @@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = { .pages = 1, .format[PSC_VOLTAGE_IN] = linear, .format[PSC_VOLTAGE_OUT] = vid, - .vrm_version = vr12, + .vrm_version[0] = vr12, .format[PSC_TEMPERATURE] = linear, .format[PSC_CURRENT_OUT] = linear, .format[PSC_POWER] = linear, diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index c0bc43d01018..51e8312b6c2d 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client, } if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - int vout_mode; + int vout_mode, i; vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { @@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client, break; case 1: info->format[PSC_VOLTAGE_OUT] = vid; - info->vrm_version = vr11; + for (i = 0; i < info->pages; i++) + info->vrm_version[i] = vr11; break; case 2: info->format[PSC_VOLTAGE_OUT] = direct; @@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = { {"dps460", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps800", (kernel_ulong_t)&pmbus_info_one_skip}, + {"max20796", (kernel_ulong_t)&pmbus_info_one}, {"mdt040", (kernel_ulong_t)&pmbus_info_one}, {"ncp4200", (kernel_ulong_t)&pmbus_info_one}, {"ncp4208", (kernel_ulong_t)&pmbus_info_one}, diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index d198af3a92b6..13b34bd67f23 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -22,6 +22,8 @@ enum pmbus_regs { PMBUS_CLEAR_FAULTS = 0x03, PMBUS_PHASE = 0x04, + PMBUS_WRITE_PROTECT = 0x10, + PMBUS_CAPABILITY = 0x19, PMBUS_QUERY = 0x1A, @@ -226,6 +228,15 @@ enum pmbus_regs { #define PB_OPERATION_CONTROL_ON BIT(7) /* + * WRITE_PROTECT + */ +#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */ +#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */ +#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */ + +#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT) + +/* * CAPABILITY */ #define PB_CAPABILITY_SMBALERT BIT(4) @@ -377,12 +388,12 @@ enum pmbus_sensor_classes { #define PMBUS_PAGE_VIRTUAL BIT(31) enum pmbus_data_format { linear = 0, direct, vid }; -enum vrm_version { vr11 = 0, vr12, vr13 }; +enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv }; struct pmbus_driver_info { int pages; /* Total number of pages */ enum pmbus_data_format format[PSC_NUM_CLASSES]; - enum vrm_version vrm_version; + enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */ /* * Support one set of coefficients for each sensor type * Used for chips providing data in direct mode. diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 8470097907bc..d9c17feb7b4a 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, long val = sensor->data; long rv = 0; - switch (data->info->vrm_version) { + switch (data->info->vrm_version[sensor->page]) { case vr11: if (val >= 0x02 && val <= 0xb2) rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100); @@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, if (val >= 0x01) rv = 500 + (val - 1) * 10; break; + case imvp9: + if (val >= 0x01) + rv = 200 + (val - 1) * 10; + break; + case amd625mv: + if (val >= 0x0 && val <= 0xd8) + rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100); + break; } return rv; } @@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, snprintf(sensor->name, sizeof(sensor->name), "%s%d", name, seq); + if (data->flags & PMBUS_WRITE_PROTECTED) + readonly = true; + sensor->page = page; sensor->reg = reg; sensor->class = class; @@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) client->flags |= I2C_CLIENT_PEC; + /* + * Check if the chip is write protected. If it is, we can not clear + * faults, and we should not try it. Also, in that case, writes into + * limit registers need to be disabled. + */ + ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT); + if (ret > 0 && (ret & PB_WP_ANY)) + data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK; + if (data->info->pages) pmbus_clear_faults(client); else diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c index ebe3f023f840..517584cff3de 100644 --- a/drivers/hwmon/pmbus/pxe1610.c +++ b/drivers/hwmon/pmbus/pxe1610.c @@ -19,26 +19,30 @@ static int pxe1610_identify(struct i2c_client *client, struct pmbus_driver_info *info) { - if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - u8 vout_mode; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_mode = ret & GENMASK(4, 0); - - switch (vout_mode) { - case 1: - info->vrm_version = vr12; - break; - case 2: - info->vrm_version = vr13; - break; - default: - return -ENODEV; + int i; + + for (i = 0; i < PXE1610_NUM_PAGES; i++) { + if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) { + u8 vout_mode; + int ret; + + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_mode = ret & GENMASK(4, 0); + + switch (vout_mode) { + case 1: + info->vrm_version[i] = vr12; + break; + case 2: + info->vrm_version[i] = vr13; + break; + default: + return -ENODEV; + } } } diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c index 86bb3aca09ed..9c22e9013dd7 100644 --- a/drivers/hwmon/pmbus/tps53679.c +++ b/drivers/hwmon/pmbus/tps53679.c @@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client, struct pmbus_driver_info *info) { u8 vout_params; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_params = ret & GENMASK(4, 0); - - switch (vout_params) { - case TPS53679_PROT_VR13_10MV: - case TPS53679_PROT_VR12_5_10MV: - info->vrm_version = vr13; - break; - case TPS53679_PROT_VR13_5MV: - case TPS53679_PROT_VR12_5MV: - case TPS53679_PROT_IMVP8_5MV: - info->vrm_version = vr12; - break; - default: - return -EINVAL; + int i, ret; + + for (i = 0; i < TPS53679_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case TPS53679_PROT_VR13_10MV: + case TPS53679_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case TPS53679_PROT_VR13_5MV: + case TPS53679_PROT_VR12_5MV: + case TPS53679_PROT_IMVP8_5MV: + info->vrm_version[i] = vr12; + break; + default: + return -EINVAL; + } } return 0; @@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client, static const struct i2c_device_id tps53679_id[] = { {"tps53679", 0}, + {"tps53688", 0}, {} }; @@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id); static const struct of_device_id __maybe_unused tps53679_of_match[] = { {.compatible = "ti,tps53679"}, + {.compatible = "ti,tps53688"}, {} }; MODULE_DEVICE_TABLE(of, tps53679_of_match); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index a9229c6b0e84..23ea3415f166 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -18,7 +18,8 @@ #include <linux/gpio/driver.h> #include "pmbus.h" -enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; +enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090, + ucd90910 }; #define UCD9000_MONITOR_CONFIG 0xd5 #define UCD9000_NUM_PAGES 0xd6 @@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_OUTPUT 1 #define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07) -#define UCD9000_MON_PAGE(x) ((x) & 0x0f) +#define UCD9000_MON_PAGE(x) ((x) & 0x1f) #define UCD9000_MON_VOLTAGE 1 #define UCD9000_MON_TEMPERATURE 2 @@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_NAME_LEN 16 #define UCD9090_NUM_GPIOS 23 #define UCD901XX_NUM_GPIOS 26 +#define UCD90320_NUM_GPIOS 84 #define UCD90910_NUM_GPIOS 26 #define UCD9000_DEBUGFS_NAME_LEN 24 #define UCD9000_GPI_COUNT 8 +#define UCD90320_GPI_COUNT 32 struct ucd9000_data { u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX]; @@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = { {"ucd90120", ucd90120}, {"ucd90124", ucd90124}, {"ucd90160", ucd90160}, + {"ucd90320", ucd90320}, {"ucd9090", ucd9090}, {"ucd90910", ucd90910}, {} @@ -155,6 +159,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = { .data = (void *)ucd90160 }, { + .compatible = "ti,ucd90320", + .data = (void *)ucd90320 + }, + { .compatible = "ti,ucd9090", .data = (void *)ucd9090 }, @@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client, case ucd90160: data->gpio.ngpio = UCD901XX_NUM_GPIOS; break; + case ucd90320: + data->gpio.ngpio = UCD90320_NUM_GPIOS; + break; case ucd90910: data->gpio.ngpio = UCD90910_NUM_GPIOS; break; @@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val) struct ucd9000_debugfs_entry *entry = data; struct i2c_client *client = entry->client; u8 buffer[I2C_SMBUS_BLOCK_MAX]; - int ret; + int ret, i; ret = ucd9000_get_mfr_status(client, buffer); if (ret < 0) return ret; /* - * Attribute only created for devices with gpi fault bits at bits - * 16-23, which is the second byte of the response. + * GPI fault bits are in sets of 8, two bytes from end of response. */ - *val = !!(buffer[1] & BIT(entry->index)); + i = ret - 3 - entry->index / 8; + if (i >= 0) + *val = !!(buffer[i] & BIT(entry->index % 8)); return 0; } @@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client, { struct dentry *debugfs; struct ucd9000_debugfs_entry *entries; - int i; + int i, gpi_count; char name[UCD9000_DEBUGFS_NAME_LEN]; debugfs = pmbus_get_debugfs_dir(client); @@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client, /* * Of the chips this driver supports, only the UCD9090, UCD90160, - * and UCD90910 report GPI faults in their MFR_STATUS register, so only - * create the GPI fault debugfs attributes for those chips. + * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS + * register, so only create the GPI fault debugfs attributes for those + * chips. */ if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 || - mid->driver_data == ucd90910) { + mid->driver_data == ucd90320 || mid->driver_data == ucd90910) { + gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT + : UCD9000_GPI_COUNT; entries = devm_kcalloc(&client->dev, - UCD9000_GPI_COUNT, sizeof(*entries), + gpi_count, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; - for (i = 0; i < UCD9000_GPI_COUNT; i++) { + for (i = 0; i < gpi_count; i++) { entries[i].client = client; entries[i].index = i; scnprintf(name, UCD9000_DEBUGFS_NAME_LEN, diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c new file mode 100644 index 000000000000..3d47806ff4d3 --- /dev/null +++ b/drivers/hwmon/pmbus/xdpe12284.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers + * + * Copyright (c) 2020 Mellanox Technologies. All rights reserved. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include "pmbus.h" + +#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */ +#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */ +#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */ +#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */ +#define XDPE122_PAGE_NUM 2 + +static int xdpe122_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + u8 vout_params; + int i, ret; + + for (i = 0; i < XDPE122_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case XDPE122_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case XDPE122_PROT_VR12_5MV: + info->vrm_version[i] = vr12; + break; + case XDPE122_PROT_IMVP9_10MV: + info->vrm_version[i] = imvp9; + break; + case XDPE122_AMD_625MV: + info->vrm_version[i] = amd625mv; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static struct pmbus_driver_info xdpe122_info = { + .pages = XDPE122_PAGE_NUM, + .format[PSC_VOLTAGE_IN] = linear, + .format[PSC_VOLTAGE_OUT] = vid, + .format[PSC_TEMPERATURE] = linear, + .format[PSC_CURRENT_IN] = linear, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_POWER] = linear, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .identify = xdpe122_identify, +}; + +static int xdpe122_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pmbus_driver_info *info; + + info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + return pmbus_do_probe(client, id, info); +} + +static const struct i2c_device_id xdpe122_id[] = { + {"xdpe12254", 0}, + {"xdpe12284", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, xdpe122_id); + +static const struct of_device_id __maybe_unused xdpe122_of_match[] = { + {.compatible = "infineon, xdpe12254"}, + {.compatible = "infineon, xdpe12284"}, + {} +}; +MODULE_DEVICE_TABLE(of, xdpe122_of_match); + +static struct i2c_driver xdpe122_driver = { + .driver = { + .name = "xdpe12284", + .of_match_table = of_match_ptr(xdpe122_of_match), + }, + .probe = xdpe122_probe, + .remove = pmbus_do_remove, + .id_table = xdpe122_id, +}; + +module_i2c_driver(xdpe122_driver); + +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 42ffd2e5182d..30b7b3ea8836 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int pwm_fan_suspend(struct device *dev) +static int pwm_fan_disable(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); struct pwm_args args; @@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev) return 0; } +static void pwm_fan_shutdown(struct platform_device *pdev) +{ + pwm_fan_disable(&pdev->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int pwm_fan_suspend(struct device *dev) +{ + return pwm_fan_disable(dev); +} + static int pwm_fan_resume(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); @@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, + .shutdown = pwm_fan_shutdown, .driver = { .name = "pwm-fan", .pm = &pwm_fan_pm, diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index eb171d15ac48..7ffadc2da57b 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -28,8 +28,6 @@ * w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3 * w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3 * w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3 - * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3 - * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -50,7 +48,7 @@ enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83627uhg, - w83667hg, w83667hg_b, nct6775, nct6776, + w83667hg, w83667hg_b, }; /* used to set data->name = w83627ehf_device_names[data->sio_kind] */ @@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = { "w83627uhg", "w83667hg", "w83667hg", - "nct6775", - "nct6776", }; static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -static unsigned short fan_debounce; -module_param(fan_debounce, ushort, 0); -MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); - #define DRVNAME "w83627ehf" /* @@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); #define SIO_W83627UHG_ID 0xa230 #define SIO_W83667HG_ID 0xa510 #define SIO_W83667HG_B_ID 0xb350 -#define SIO_NCT6775_ID 0xb470 -#define SIO_NCT6776_ID 0xc330 #define SIO_ID_MASK 0xFFF0 static inline void @@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 }; #define W83627EHF_REG_DIODE 0x59 #define W83627EHF_REG_SMI_OVT 0x4C -/* NCT6775F has its own fan divider registers */ -#define NCT6775_REG_FANDIV1 0x506 -#define NCT6775_REG_FANDIV2 0x507 -#define NCT6775_REG_FAN_DEBOUNCE 0xf0 - #define W83627EHF_REG_ALARM1 0x459 #define W83627EHF_REG_ALARM2 0x45A #define W83627EHF_REG_ALARM3 0x45B @@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 }; -static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 }; -static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 }; -static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 }; -static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 }; -static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 }; -static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 }; -static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a }; -static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b }; -static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; -static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642}; - -static const u16 NCT6775_REG_TEMP[] - = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d }; -static const u16 NCT6775_REG_TEMP_CONFIG[] - = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A }; -static const u16 NCT6775_REG_TEMP_HYST[] - = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D }; -static const u16 NCT6775_REG_TEMP_OVER[] - = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C }; -static const u16 NCT6775_REG_TEMP_SOURCE[] - = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 }; - static const char *const w83667hg_b_temp_label[] = { "SYSTIN", "CPUTIN", @@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = { "PECI Agent 4" }; -static const char *const nct6775_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "AMD SB-TSI", - "PECI Agent 0", - "PECI Agent 1", - "PECI Agent 2", - "PECI Agent 3", - "PECI Agent 4", - "PECI Agent 5", - "PECI Agent 6", - "PECI Agent 7", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP" -}; - -static const char *const nct6776_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "SMBUSMASTER 0", - "SMBUSMASTER 1", - "SMBUSMASTER 2", - "SMBUSMASTER 3", - "SMBUSMASTER 4", - "SMBUSMASTER 5", - "SMBUSMASTER 6", - "SMBUSMASTER 7", - "PECI Agent 0", - "PECI Agent 1", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP", - "BYTE_TEMP" -}; - -#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP) +#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP) static int is_word_sized(u16 reg) { @@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg) return 1350000U / (reg << divreg); } -static unsigned int fan_from_reg13(u16 reg, unsigned int divreg) -{ - if ((reg & 0xff1f) == 0xff1f) - return 0; - - reg = (reg & 0x1f) | ((reg & 0xff00) >> 3); - - if (reg == 0) - return 0; - - return 1350000U / reg; -} - -static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) -{ - if (reg == 0 || reg == 0xffff) - return 0; - - /* - * Even though the registers are 16 bit wide, the fan divisor - * still applies. - */ - return 1350000U / (reg << divreg); -} - static inline unsigned int div_from_reg(u8 reg) { @@ -418,7 +306,6 @@ struct w83627ehf_data { int addr; /* IO base of hw monitor block */ const char *name; - struct device *hwmon_dev; struct mutex lock; u16 reg_temp[NUM_REG_TEMP]; @@ -428,20 +315,10 @@ struct w83627ehf_data { u8 temp_src[NUM_REG_TEMP]; const char * const *temp_label; - const u16 *REG_PWM; - const u16 *REG_TARGET; - const u16 *REG_FAN; - const u16 *REG_FAN_MIN; - const u16 *REG_FAN_START_OUTPUT; - const u16 *REG_FAN_STOP_OUTPUT; - const u16 *REG_FAN_STOP_TIME; const u16 *REG_FAN_MAX_OUTPUT; const u16 *REG_FAN_STEP_OUTPUT; const u16 *scale_in; - unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg); - unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg); - struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -457,7 +334,6 @@ struct w83627ehf_data { u8 fan_div[5]; u8 has_fan; /* some fan inputs can be disabled */ u8 has_fan_min; /* some fans don't have min register */ - bool has_fan_div; u8 temp_type[3]; s8 temp_offset[3]; s16 temp[9]; @@ -494,6 +370,7 @@ struct w83627ehf_data { u16 have_temp_offset; u8 in6_skip:1; u8 temp3_val_only:1; + u8 have_vid:1; #ifdef CONFIG_PM /* Remember extra register values over suspend/resume */ @@ -584,35 +461,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg, } /* This function assumes that the caller holds data->update_lock */ -static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr) -{ - u8 reg; - - switch (nr) { - case 0: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70) - | (data->fan_div[0] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 1: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7) - | ((data->fan_div[1] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 2: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70) - | (data->fan_div[2] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - case 3: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7) - | ((data->fan_div[3] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - } -} - -/* This function assumes that the caller holds data->update_lock */ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) { u8 reg; @@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) } } -static void w83627ehf_write_fan_div_common(struct device *dev, - struct w83627ehf_data *data, int nr) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_write_fan_div(data, nr); - else - w83627ehf_write_fan_div(data, nr); -} - -static void nct6775_update_fan_div(struct w83627ehf_data *data) -{ - u8 i; - - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fan_div[0] = i & 0x7; - data->fan_div[1] = (i & 0x70) >> 4; - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - data->fan_div[2] = i & 0x7; - if (data->has_fan & (1<<3)) - data->fan_div[3] = (i & 0x70) >> 4; -} - static void w83627ehf_update_fan_div(struct w83627ehf_data *data) { int i; @@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data) } } -static void w83627ehf_update_fan_div_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_update_fan_div(data); - else - w83627ehf_update_fan_div(data); -} - -static void nct6775_update_pwm(struct w83627ehf_data *data) -{ - int i; - int pwmcfg, fanmodecfg; - - for (i = 0; i < data->pwm_num; i++) { - pwmcfg = w83627ehf_read_value(data, - W83627EHF_REG_PWM_ENABLE[i]); - fanmodecfg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[i]); - data->pwm_mode[i] = - ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; - data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1; - data->tolerance[i] = fanmodecfg & 0x0f; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); - } -} - static void w83627ehf_update_pwm(struct w83627ehf_data *data) { int i; @@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data) ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i]) & 3) + 1; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); + data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]); data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f; } } -static void w83627ehf_update_pwm_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) - nct6775_update_pwm(data); - else - w83627ehf_update_pwm(data); -} - static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int i; mutex_lock(&data->update_lock); @@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (time_after(jiffies, data->last_updated + HZ + HZ/2) || !data->valid) { /* Fan clock dividers */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Measured voltages and limits */ for (i = 0; i < data->in_num; i++) { @@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (!(data->has_fan & (1 << i))) continue; - reg = w83627ehf_read_value(data, data->REG_FAN[i]); - data->rpm[i] = data->fan_from_reg(reg, - data->fan_div[i]); + reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]); + data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]); if (data->has_fan_min & (1 << i)) data->fan_min[i] = w83627ehf_read_value(data, - data->REG_FAN_MIN[i]); + W83627EHF_REG_FAN_MIN[i]); /* * If we failed to measure the fan speed and clock * divider can be increased, let's try that for next * time */ - if (data->has_fan_div - && (reg >= 0xff || (sio_data->kind == nct6775 - && reg == 0x00)) - && data->fan_div[i] < 0x07) { + if (reg >= 0xff && data->fan_div[i] < 0x07) { dev_dbg(dev, "Increasing fan%d clock divider from %u to %u\n", i + 1, div_from_reg(data->fan_div[i]), div_from_reg(data->fan_div[i] + 1)); data->fan_div[i]++; - w83627ehf_write_fan_div_common(dev, data, i); + w83627ehf_write_fan_div(data, i); /* Preserve min limit if possible */ if ((data->has_fan_min & (1 << i)) && data->fan_min[i] >= 2 && data->fan_min[i] != 255) w83627ehf_write_value(data, - data->REG_FAN_MIN[i], + W83627EHF_REG_FAN_MIN[i], (data->fan_min[i] /= 2)); } } - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) { if (!(data->has_fan & (1 << i))) @@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->fan_start_output[i] = w83627ehf_read_value(data, - data->REG_FAN_START_OUTPUT[i]); + W83627EHF_REG_FAN_START_OUTPUT[i]); data->fan_stop_output[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_OUTPUT[i]); + W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_TIME[i]); + W83627EHF_REG_FAN_STOP_TIME[i]); if (data->REG_FAN_MAX_OUTPUT && data->REG_FAN_MAX_OUTPUT[i] != 0xff) @@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->target_temp[i] = w83627ehf_read_value(data, - data->REG_TARGET[i]) & + W83627EHF_REG_TARGET[i]) & (data->pwm_mode[i] == 1 ? 0x7f : 0xff); } @@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) return data; } -/* - * Sysfs callback functions - */ -#define show_in_reg(reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \ - data->scale_in)); \ -} -show_in_reg(in) -show_in_reg(in_min) -show_in_reg(in_max) - #define store_in_reg(REG, reg) \ -static ssize_t \ -store_in_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - unsigned long val; \ - int err; \ - err = kstrtoul(buf, 10, &val); \ - if (err < 0) \ - return err; \ + if (val < 0) \ + return -EINVAL; \ mutex_lock(&data->update_lock); \ - data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \ - w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \ - data->in_##reg[nr]); \ + data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \ + w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \ + data->in_##reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_in_reg(MIN, min) store_in_reg(MAX, max) -static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, - char *buf) +static int +store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01); -} - -static struct sensor_device_attribute sda_in_input[] = { - SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), - SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), - SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), - SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), - SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), - SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), - SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), - SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), - SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), - SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9), -}; - -static struct sensor_device_attribute sda_in_alarm[] = { - SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0), - SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1), - SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2), - SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3), - SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8), - SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21), - SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20), - SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16), - SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17), - SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19), -}; - -static struct sensor_device_attribute sda_in_min[] = { - SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), - SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), - SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), - SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), - SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), - SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), - SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), - SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), - SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), - SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), -}; - -static struct sensor_device_attribute sda_in_max[] = { - SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), - SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), - SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), - SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), - SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), - SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), - SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), - SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), - SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), - SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), -}; - -static ssize_t -show_fan(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", data->rpm[nr]); -} - -static ssize_t -show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", - data->fan_from_reg_min(data->fan_min[nr], - data->fan_div[nr])); -} - -static ssize_t -show_fan_div(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr])); -} - -static ssize_t -store_fan_min(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; unsigned int reg; u8 new_div; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; + if (val < 0) + return -EINVAL; mutex_lock(&data->update_lock); - if (!data->has_fan_div) { - /* - * Only NCT6776F for now, so we know that this is a 13 bit - * register - */ - if (!val) { - val = 0xff1f; - } else { - if (val > 1350000U) - val = 135000U; - val = 1350000U / val; - val = (val & 0x1f) | ((val << 3) & 0xff00); - } - data->fan_min[nr] = val; - goto done; /* Leave fan divider alone */ - } if (!val) { /* No min limit, alarm disabled */ - data->fan_min[nr] = 255; - new_div = data->fan_div[nr]; /* No change */ - dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1); + data->fan_min[channel] = 255; + new_div = data->fan_div[channel]; /* No change */ + dev_info(dev, "fan%u low limit and alarm disabled\n", + channel + 1); } else if ((reg = 1350000U / val) >= 128 * 255) { /* * Speed below this value cannot possibly be represented, * even with the highest divider (128) */ - data->fan_min[nr] = 254; + data->fan_min[channel] = 254; new_div = 7; /* 128 == (1 << 7) */ dev_warn(dev, "fan%u low limit %lu below minimum %u, set to minimum\n", - nr + 1, val, data->fan_from_reg_min(254, 7)); + channel + 1, val, fan_from_reg8(254, 7)); } else if (!reg) { /* * Speed above this value cannot possibly be represented, * even with the lowest divider (1) */ - data->fan_min[nr] = 1; + data->fan_min[channel] = 1; new_div = 0; /* 1 == (1 << 0) */ dev_warn(dev, "fan%u low limit %lu above maximum %u, set to maximum\n", - nr + 1, val, data->fan_from_reg_min(1, 0)); + channel + 1, val, fan_from_reg8(1, 0)); } else { /* * Automatically pick the best divider, i.e. the one such @@ -1127,362 +763,117 @@ store_fan_min(struct device *dev, struct device_attribute *attr, reg >>= 1; new_div++; } - data->fan_min[nr] = reg; + data->fan_min[channel] = reg; } /* * Write both the fan clock divider (if it changed) and the new * fan min (unconditionally) */ - if (new_div != data->fan_div[nr]) { + if (new_div != data->fan_div[channel]) { dev_dbg(dev, "fan%u clock divider changed from %u to %u\n", - nr + 1, div_from_reg(data->fan_div[nr]), + channel + 1, div_from_reg(data->fan_div[channel]), div_from_reg(new_div)); - data->fan_div[nr] = new_div; - w83627ehf_write_fan_div_common(dev, data, nr); + data->fan_div[channel] = new_div; + w83627ehf_write_fan_div(data, channel); /* Give the chip time to sample a new speed value */ data->last_updated = jiffies; } -done: - w83627ehf_write_value(data, data->REG_FAN_MIN[nr], - data->fan_min[nr]); - mutex_unlock(&data->update_lock); - return count; -} - -static struct sensor_device_attribute sda_fan_input[] = { - SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), - SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), - SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), - SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3), - SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4), -}; - -static struct sensor_device_attribute sda_fan_alarm[] = { - SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6), - SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7), - SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11), - SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10), - SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23), -}; - -static struct sensor_device_attribute sda_fan_min[] = { - SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 0), - SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 1), - SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 2), - SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 3), - SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 4), -}; - -static struct sensor_device_attribute sda_fan_div[] = { - SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0), - SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1), - SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2), - SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3), - SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4), -}; - -static ssize_t -show_temp_label(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]); -} + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel], + data->fan_min[channel]); + mutex_unlock(&data->update_lock); -#define show_temp_reg(addr, reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \ + return 0; } -show_temp_reg(reg_temp, temp); -show_temp_reg(reg_temp_over, temp_max); -show_temp_reg(reg_temp_hyst, temp_max_hyst); #define store_temp_reg(addr, reg) \ -static ssize_t \ -store_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - int err; \ - long val; \ - err = kstrtol(buf, 10, &val); \ - if (err < 0) \ - return err; \ mutex_lock(&data->update_lock); \ - data->reg[nr] = LM75_TEMP_TO_REG(val); \ - w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \ + data->reg[channel] = LM75_TEMP_TO_REG(val); \ + w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_temp_reg(reg_temp_over, temp_max); store_temp_reg(reg_temp_hyst, temp_max_hyst); -static ssize_t -show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) +static int +store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - - return sprintf(buf, "%d\n", - data->temp_offset[sensor_attr->index] * 1000); -} - -static ssize_t -store_temp_offset(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); mutex_lock(&data->update_lock); - data->temp_offset[nr] = val; - w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val); + data->temp_offset[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val); mutex_unlock(&data->update_lock); - return count; -} - -static ssize_t -show_temp_type(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", (int)data->temp_type[nr]); -} - -static struct sensor_device_attribute sda_temp_input[] = { - SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0), - SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1), - SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2), - SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3), - SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4), - SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5), - SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6), - SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7), - SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_label[] = { - SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0), - SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1), - SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2), - SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3), - SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4), - SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5), - SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6), - SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7), - SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_max[] = { - SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 0), - SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 1), - SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 2), - SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 3), - SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 4), - SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 5), - SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 6), - SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 7), - SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 8), -}; - -static struct sensor_device_attribute sda_temp_max_hyst[] = { - SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 0), - SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 1), - SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 2), - SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 3), - SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 4), - SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 5), - SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 6), - SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 7), - SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 8), -}; - -static struct sensor_device_attribute sda_temp_alarm[] = { - SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4), - SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5), - SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13), -}; - -static struct sensor_device_attribute sda_temp_type[] = { - SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0), - SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1), - SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2), -}; - -static struct sensor_device_attribute sda_temp_offset[] = { - SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 0), - SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 1), - SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 2), -}; - -#define show_pwm_reg(reg) \ -static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", data->reg[nr]); \ + return 0; } -show_pwm_reg(pwm_mode) -show_pwm_reg(pwm_enable) -show_pwm_reg(pwm) - -static ssize_t -store_pwm_mode(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (val > 1) - return -EINVAL; - - /* On NCT67766F, DC mode is only supported for pwm1 */ - if (sio_data->kind == nct6776 && nr && val != 1) + if (val < 0 || val > 1) return -EINVAL; mutex_lock(&data->update_lock); - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - data->pwm_mode[nr] = val; - reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]); + data->pwm_mode[channel] = val; + reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]); if (!val) - reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); + reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; - - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); - data->pwm[nr] = val; - w83627ehf_write_value(data, data->REG_PWM[nr], val); + data->pwm[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm_enable(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (!val || (val > 4 && val != data->pwm_enable_orig[nr])) - return -EINVAL; - /* SmartFan III mode is not supported on NCT6776F */ - if (sio_data->kind == nct6776 && val == 4) + if (!val || val < 0 || + (val > 4 && val != data->pwm_enable_orig[channel])) return -EINVAL; mutex_lock(&data->update_lock); - data->pwm_enable[nr] = val; - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - reg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[nr]); - reg &= 0x0f; - reg |= (val - 1) << 4; - w83627ehf_write_value(data, - NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]); - reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); - } + data->pwm_enable[channel] = val; + reg = w83627ehf_read_value(data, + W83627EHF_REG_PWM_ENABLE[channel]); + reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]); + reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], + reg); mutex_unlock(&data->update_lock); - return count; + return 0; } - #define show_tol_temp(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1510,7 +901,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); data->target_temp[nr] = val; - w83627ehf_write_value(data, data->REG_TARGET[nr], val); + w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val); mutex_unlock(&data->update_lock); return count; } @@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; u16 reg; @@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr, val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); mutex_lock(&data->update_lock); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* Limit tolerance further for NCT6776F */ - if (sio_data->kind == nct6776 && val > 7) - val = 7; - reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); + if (nr == 1) + reg = (reg & 0x0f) | (val << 4); + else reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); - if (nr == 1) - reg = (reg & 0x0f) | (val << 4); - else - reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); - } + w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); data->tolerance[nr] = val; mutex_unlock(&data->update_lock); return count; } -static struct sensor_device_attribute sda_pwm[] = { - SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0), - SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), - SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2), - SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3), -}; - -static struct sensor_device_attribute sda_pwm_mode[] = { - SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 0), - SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 1), - SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 2), - SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 3), -}; - -static struct sensor_device_attribute sda_pwm_enable[] = { - SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 0), - SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 1), - SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 2), - SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 3), -}; - -static struct sensor_device_attribute sda_target_temp[] = { - SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 0), - SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 1), - SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 2), - SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 3), -}; - -static struct sensor_device_attribute sda_tolerance[] = { - SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 0), - SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 1), - SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 2), - SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 3), -}; +static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp, + store_target_temp, 0); +static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp, + store_target_temp, 1); +static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp, + store_target_temp, 2); +static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp, + store_target_temp, 3); + +static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance, + store_tolerance, 0); +static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance, + store_tolerance, 1); +static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance, + store_tolerance, 2); +static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance, + store_tolerance, 3); /* Smart Fan registers */ @@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = { static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = clamp_val(val, 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } -fan_functions(fan_start_output, FAN_START_OUTPUT) -fan_functions(fan_stop_output, FAN_STOP_OUTPUT) -fan_functions(fan_max_output, FAN_MAX_OUTPUT) -fan_functions(fan_step_output, FAN_STEP_OUTPUT) +fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT) +fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT) +fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT) +fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT) #define fan_time_functions(reg, REG) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = step_time_to_reg(val, data->pwm_mode[nr]); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } \ -fan_time_functions(fan_stop_time, FAN_STOP_TIME) - -static ssize_t name_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - - return sprintf(buf, "%s\n", data->name); -} -static DEVICE_ATTR_RO(name); - -static struct sensor_device_attribute sda_sf3_arrays_fan4[] = { - SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 3), - SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 3), - SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 3), - SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 3), - SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 3), -}; - -static struct sensor_device_attribute sda_sf3_arrays_fan3[] = { - SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 2), - SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 2), - SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 2), -}; - -static struct sensor_device_attribute sda_sf3_arrays[] = { - SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 0), - SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 1), - SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 0), - SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 1), - SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 0), - SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 1), -}; +fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME) + +static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 3); +static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output, + store_fan_start_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output, + store_fan_max_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output, + store_fan_step_output, 3); + +static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 2); +static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output, + store_fan_start_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 2); + +static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 1); +static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output, + store_fan_start_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output, + store_fan_start_output, 1); +static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 1); /* * pwm1 and pwm3 don't support max and step settings on all chips. * Need to check support while generating/removing attribute files. */ -static struct sensor_device_attribute sda_sf3_max_step_arrays[] = { - SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 0), - SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 0), - SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 1), - SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 1), - SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 2), - SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 2), -}; +static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output, + store_fan_max_output, 0); +static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output, + store_fan_step_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output, + store_fan_max_output, 1); +static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output, + store_fan_step_output, 1); +static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output, + store_fan_max_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output, + store_fan_step_output, 2); static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) struct w83627ehf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } -static DEVICE_ATTR_RO(cpu0_vid); +DEVICE_ATTR_RO(cpu0_vid); /* Case open detection */ - -static ssize_t -show_caseopen(struct device *dev, struct device_attribute *attr, char *buf) +static int +clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - - return sprintf(buf, "%d\n", - !!(data->caseopen & to_sensor_dev_attr_2(attr)->index)); -} - -static ssize_t -clear_caseopen(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - unsigned long val; - u16 reg, mask; + const u16 mask = 0x80; + u16 reg; - if (kstrtoul(buf, 10, &val) || val != 0) + if (val != 0 || channel != 0) return -EINVAL; - mask = to_sensor_dev_attr_2(attr)->nr; - mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR); w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask); @@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr, data->valid = 0; /* Force cache refresh */ mutex_unlock(&data->update_lock); - return count; + return 0; } -static struct sensor_device_attribute_2 sda_caseopen[] = { - SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x80, 0x10), - SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x40, 0x40), -}; - -/* - * Driver and device management - */ - -static void w83627ehf_device_remove_files(struct device *dev) +static umode_t w83627ehf_attrs_visible(struct kobject *kobj, + struct attribute *a, int n) { - /* - * some entries in the following arrays may not have been used in - * device_create_file(), but device_remove_file() will ignore them - */ - int i; + struct device *dev = container_of(kobj, struct device, kobj); struct w83627ehf_data *data = dev_get_drvdata(dev); + struct device_attribute *devattr; + struct sensor_device_attribute *sda; + + devattr = container_of(a, struct device_attribute, attr); + + /* Not sensor */ + if (devattr->show == cpu0_vid_show && data->have_vid) + return a->mode; + + sda = (struct sensor_device_attribute *)devattr; + + if (sda->index < 2 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index < 3 && + (devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output) && + data->REG_FAN_STEP_OUTPUT && + data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff) + return a->mode; + + /* if fan3 and fan4 are enabled create the files for them */ + if (sda->index == 2 && + (data->has_fan & (1 << 2)) && data->pwm_num >= 3 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index == 3 && + (data->has_fan & (1 << 3)) && data->pwm_num >= 4 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output || + devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output)) + return a->mode; + + if ((devattr->show == show_target_temp || + devattr->show == show_tolerance) && + (data->has_fan & (1 << sda->index)) && + sda->index < data->pwm_num) + return a->mode; - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) - device_remove_file(dev, &sda_sf3_arrays[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) - device_remove_file(dev, &attr->dev_attr); - } - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) - device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) - device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr); - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - device_remove_file(dev, &sda_in_input[i].dev_attr); - device_remove_file(dev, &sda_in_alarm[i].dev_attr); - device_remove_file(dev, &sda_in_min[i].dev_attr); - device_remove_file(dev, &sda_in_max[i].dev_attr); - } - for (i = 0; i < 5; i++) { - device_remove_file(dev, &sda_fan_input[i].dev_attr); - device_remove_file(dev, &sda_fan_alarm[i].dev_attr); - device_remove_file(dev, &sda_fan_div[i].dev_attr); - device_remove_file(dev, &sda_fan_min[i].dev_attr); - } - for (i = 0; i < data->pwm_num; i++) { - device_remove_file(dev, &sda_pwm[i].dev_attr); - device_remove_file(dev, &sda_pwm_mode[i].dev_attr); - device_remove_file(dev, &sda_pwm_enable[i].dev_attr); - device_remove_file(dev, &sda_target_temp[i].dev_attr); - device_remove_file(dev, &sda_tolerance[i].dev_attr); - } - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - device_remove_file(dev, &sda_temp_input[i].dev_attr); - device_remove_file(dev, &sda_temp_label[i].dev_attr); - if (i == 2 && data->temp3_val_only) - continue; - device_remove_file(dev, &sda_temp_max[i].dev_attr); - device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr); - if (i > 2) - continue; - device_remove_file(dev, &sda_temp_alarm[i].dev_attr); - device_remove_file(dev, &sda_temp_type[i].dev_attr); - device_remove_file(dev, &sda_temp_offset[i].dev_attr); - } + return 0; +} + +/* These groups handle non-standard attributes used in this device */ +static struct attribute *w83627ehf_attrs[] = { + + &sensor_dev_attr_pwm1_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm1_start_output.dev_attr.attr, + &sensor_dev_attr_pwm1_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm1_max_output.dev_attr.attr, + &sensor_dev_attr_pwm1_step_output.dev_attr.attr, + &sensor_dev_attr_pwm1_target.dev_attr.attr, + &sensor_dev_attr_pwm1_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm2_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm2_start_output.dev_attr.attr, + &sensor_dev_attr_pwm2_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm2_max_output.dev_attr.attr, + &sensor_dev_attr_pwm2_step_output.dev_attr.attr, + &sensor_dev_attr_pwm2_target.dev_attr.attr, + &sensor_dev_attr_pwm2_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm3_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm3_start_output.dev_attr.attr, + &sensor_dev_attr_pwm3_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm3_max_output.dev_attr.attr, + &sensor_dev_attr_pwm3_step_output.dev_attr.attr, + &sensor_dev_attr_pwm3_target.dev_attr.attr, + &sensor_dev_attr_pwm3_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm4_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm4_start_output.dev_attr.attr, + &sensor_dev_attr_pwm4_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm4_max_output.dev_attr.attr, + &sensor_dev_attr_pwm4_step_output.dev_attr.attr, + &sensor_dev_attr_pwm4_target.dev_attr.attr, + &sensor_dev_attr_pwm4_tolerance.dev_attr.attr, + + &dev_attr_cpu0_vid.attr, + NULL +}; - device_remove_file(dev, &sda_caseopen[0].dev_attr); - device_remove_file(dev, &sda_caseopen[1].dev_attr); +static const struct attribute_group w83627ehf_group = { + .attrs = w83627ehf_attrs, + .is_visible = w83627ehf_attrs_visible, +}; - device_remove_file(dev, &dev_attr_name); - device_remove_file(dev, &dev_attr_cpu0_vid); -} +static const struct attribute_group *w83627ehf_groups[] = { + &w83627ehf_group, + NULL +}; + +/* + * Driver and device management + */ /* Get the monitoring functions started */ static inline void w83627ehf_init_device(struct w83627ehf_data *data, @@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data, } } -static void w82627ehf_swap_tempreg(struct w83627ehf_data *data, - int r1, int r2) -{ - swap(data->temp_src[r1], data->temp_src[r2]); - swap(data->reg_temp[r1], data->reg_temp[r2]); - swap(data->reg_temp_over[r1], data->reg_temp_over[r2]); - swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]); - swap(data->reg_temp_config[r1], data->reg_temp_config[r2]); -} - static void w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp) { @@ -1954,7 +1293,7 @@ static void w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, struct w83627ehf_data *data) { - int fan3pin, fan4pin, fan4min, fan5pin, regval; + int fan3pin, fan4pin, fan5pin, regval; /* The W83627UHG is simple, only two fan inputs, no config */ if (sio_data->kind == w83627uhg) { @@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, } /* fan4 and fan5 share some pins with the GPIO and serial flash */ - if (sio_data->kind == nct6775) { - /* On NCT6775, fan4 shares pins with the fdc interface */ - fan3pin = 1; - fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80); - fan4min = 0; - fan5pin = 0; - } else if (sio_data->kind == nct6776) { - bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE); - - if (regval & 0x80) - fan3pin = gpok; - else - fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); - - if (regval & 0x40) - fan4pin = gpok; - else - fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); - - if (regval & 0x20) - fan5pin = gpok; - else - fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); - - fan4min = fan4pin; - } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { fan3pin = 1; fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40; fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20; - fan4min = fan4pin; } else { fan3pin = 1; fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06); fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02); - fan4min = fan4pin; } data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */ data->has_fan |= (fan3pin << 2); data->has_fan_min |= (fan3pin << 2); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* - * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 - * register - */ - data->has_fan |= (fan4pin << 3) | (fan5pin << 4); - data->has_fan_min |= (fan4min << 3) | (fan5pin << 4); - } else { - /* - * It looks like fan4 and fan5 pins can be alternatively used - * as fan on/off switches, but fan5 control is write only :/ - * We assume that if the serial interface is disabled, designers - * connected fan5 as input unless they are emitting log 1, which - * is not the default. - */ - regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); - if ((regval & (1 << 2)) && fan4pin) { - data->has_fan |= (1 << 3); - data->has_fan_min |= (1 << 3); + /* + * It looks like fan4 and fan5 pins can be alternatively used + * as fan on/off switches, but fan5 control is write only :/ + * We assume that if the serial interface is disabled, designers + * connected fan5 as input unless they are emitting log 1, which + * is not the default. + */ + regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); + if ((regval & (1 << 2)) && fan4pin) { + data->has_fan |= (1 << 3); + data->has_fan_min |= (1 << 3); + } + if (!(regval & (1 << 1)) && fan5pin) { + data->has_fan |= (1 << 4); + data->has_fan_min |= (1 << 4); + } +} + +static umode_t +w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct w83627ehf_data *data = drvdata; + + switch (type) { + case hwmon_temp: + /* channel 0.., name 1.. */ + if (!(data->have_temp & (1 << channel))) + return 0; + if (attr == hwmon_temp_input || attr == hwmon_temp_label) + return 0444; + if (channel == 2 && data->temp3_val_only) + return 0; + if (attr == hwmon_temp_max) { + if (data->reg_temp_over[channel]) + return 0644; + else + return 0; + } + if (attr == hwmon_temp_max_hyst) { + if (data->reg_temp_hyst[channel]) + return 0644; + else + return 0; + } + if (channel > 2) + return 0; + if (attr == hwmon_temp_alarm || attr == hwmon_temp_type) + return 0444; + if (attr == hwmon_temp_offset) { + if (data->have_temp_offset & (1 << channel)) + return 0644; + else + return 0; + } + break; + + case hwmon_fan: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel))) + return 0; + if (attr == hwmon_fan_input || attr == hwmon_fan_alarm) + return 0444; + if (attr == hwmon_fan_div) { + return 0444; } - if (!(regval & (1 << 1)) && fan5pin) { - data->has_fan |= (1 << 4); - data->has_fan_min |= (1 << 4); + if (attr == hwmon_fan_min) { + if (data->has_fan_min & (1 << channel)) + return 0644; + else + return 0; } + break; + + case hwmon_in: + /* channel 0.., name 0.. */ + if (channel >= data->in_num) + return 0; + if (channel == 6 && data->in6_skip) + return 0; + if (attr == hwmon_in_alarm || attr == hwmon_in_input) + return 0444; + if (attr == hwmon_in_min || attr == hwmon_in_max) + return 0644; + break; + + case hwmon_pwm: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel)) || + channel >= data->pwm_num) + return 0; + if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable || + attr == hwmon_pwm_input) + return 0644; + break; + + case hwmon_intrusion: + return 0644; + + default: /* Shouldn't happen */ + return 0; } + + return 0; /* Shouldn't happen */ } +static int +w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_temp_input: + *val = LM75_TEMP_FROM_REG(data->temp[channel]); + return 0; + case hwmon_temp_max: + *val = LM75_TEMP_FROM_REG(data->temp_max[channel]); + return 0; + case hwmon_temp_max_hyst: + *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]); + return 0; + case hwmon_temp_offset: + *val = data->temp_offset[channel] * 1000; + return 0; + case hwmon_temp_type: + *val = (int)data->temp_type[channel]; + return 0; + case hwmon_temp_alarm: + if (channel < 3) { + int bit[] = { 4, 5, 13 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_in_input: + *val = in_from_reg(data->in[channel], channel, data->scale_in); + return 0; + case hwmon_in_min: + *val = in_from_reg(data->in_min[channel], channel, + data->scale_in); + return 0; + case hwmon_in_max: + *val = in_from_reg(data->in_max[channel], channel, + data->scale_in); + return 0; + case hwmon_in_alarm: + if (channel < 10) { + int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_fan_input: + *val = data->rpm[channel]; + return 0; + case hwmon_fan_min: + *val = fan_from_reg8(data->fan_min[channel], + data->fan_div[channel]); + return 0; + case hwmon_fan_div: + *val = div_from_reg(data->fan_div[channel]); + return 0; + case hwmon_fan_alarm: + if (channel < 5) { + int bit[] = { 6, 7, 11, 10, 23 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_pwm_input: + *val = data->pwm[channel]; + return 0; + case hwmon_pwm_enable: + *val = data->pwm_enable[channel]; + return 0; + case hwmon_pwm_mode: + *val = data->pwm_enable[channel]; + return 0; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + if (attr != hwmon_intrusion_alarm || channel != 0) + return -EOPNOTSUPP; /* shouldn't happen */ + + *val = !!(data->caseopen & 0x10); + return 0; +} + +static int +w83627ehf_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); + + switch (type) { + case hwmon_fan: + return w83627ehf_do_read_fan(data, attr, channel, val); + + case hwmon_in: + return w83627ehf_do_read_in(data, attr, channel, val); + + case hwmon_pwm: + return w83627ehf_do_read_pwm(data, attr, channel, val); + + case hwmon_temp: + return w83627ehf_do_read_temp(data, attr, channel, val); + + case hwmon_intrusion: + return w83627ehf_do_read_intrusion(data, attr, channel, val); + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + if (attr == hwmon_temp_label) { + *str = data->temp_label[data->temp_src[channel]]; + return 0; + } + break; + + default: + break; + } + /* Nothing else should be read as a string */ + return -EOPNOTSUPP; +} + +static int +w83627ehf_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + if (type == hwmon_in && attr == hwmon_in_min) + return store_in_min(dev, data, channel, val); + if (type == hwmon_in && attr == hwmon_in_max) + return store_in_max(dev, data, channel, val); + + if (type == hwmon_fan && attr == hwmon_fan_min) + return store_fan_min(dev, data, channel, val); + + if (type == hwmon_temp && attr == hwmon_temp_max) + return store_temp_max(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_max_hyst) + return store_temp_max_hyst(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_offset) + return store_temp_offset(dev, data, channel, val); + + if (type == hwmon_pwm && attr == hwmon_pwm_mode) + return store_pwm_mode(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_enable) + return store_pwm_enable(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_input) + return store_pwm(dev, data, channel, val); + + if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm) + return clear_caseopen(dev, data, channel, val); + + return -EOPNOTSUPP; +} + +static const struct hwmon_ops w83627ehf_ops = { + .is_visible = w83627ehf_is_visible, + .read = w83627ehf_read, + .read_string = w83627ehf_read_string, + .write = w83627ehf_write, +}; + +static const struct hwmon_channel_info *w83627ehf_info[] = { + HWMON_CHANNEL_INFO(fan, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN), + HWMON_CHANNEL_INFO(in, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE), + HWMON_CHANNEL_INFO(temp, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE), + HWMON_CHANNEL_INFO(intrusion, + HWMON_INTRUSION_ALARM), + NULL +}; + +static const struct hwmon_chip_info w83627ehf_chip_info = { + .ops = &w83627ehf_ops, + .info = w83627ehf_info, +}; + static int w83627ehf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev) struct resource *res; u8 en_vrm10; int i, err = 0; + struct device *hwmon_dev; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) { @@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev) /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9; - /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */ + /* 667HG has 3 pwms, and 627UHG has only 2 */ switch (sio_data->kind) { default: data->pwm_num = 4; break; case w83667hg: case w83667hg_b: - case nct6775: - case nct6776: data->pwm_num = 3; break; case w83627uhg: @@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp = 0x07; /* Deal with temperature register setup first. */ - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - int mask = 0; - - /* - * Display temperature sensor output only if it monitors - * a source other than one already reported. Always display - * first three temperature registers, though. - */ - for (i = 0; i < NUM_REG_TEMP; i++) { - u8 src; - - data->reg_temp[i] = NCT6775_REG_TEMP[i]; - data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i]; - data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i]; - data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i]; - - src = w83627ehf_read_value(data, - NCT6775_REG_TEMP_SOURCE[i]); - src &= 0x1f; - if (src && !(mask & (1 << src))) { - data->have_temp |= 1 << i; - mask |= 1 << src; - } - - data->temp_src[i] = src; - - /* - * Now do some register swapping if index 0..2 don't - * point to SYSTIN(1), CPUIN(2), and AUXIN(3). - * Idea is to have the first three attributes - * report SYSTIN, CPUIN, and AUXIN if possible - * without overriding the basic system configuration. - */ - if (i > 0 && data->temp_src[0] != 1 - && data->temp_src[i] == 1) - w82627ehf_swap_tempreg(data, 0, i); - if (i > 1 && data->temp_src[1] != 2 - && data->temp_src[i] == 2) - w82627ehf_swap_tempreg(data, 1, i); - if (i > 2 && data->temp_src[2] != 3 - && data->temp_src[i] == 3) - w82627ehf_swap_tempreg(data, 2, i); - } - if (sio_data->kind == nct6776) { - /* - * On NCT6776, AUXTIN and VIN3 pins are shared. - * Only way to detect it is to check if AUXTIN is used - * as a temperature source, and if that source is - * enabled. - * - * If that is the case, disable in6, which reports VIN3. - * Otherwise disable temp3. - */ - if (data->temp_src[2] == 3) { - u8 reg; - - if (data->reg_temp_config[2]) - reg = w83627ehf_read_value(data, - data->reg_temp_config[2]); - else - reg = 0; /* Assume AUXTIN is used */ - - if (reg & 0x01) - data->have_temp &= ~(1 << 2); - else - data->in6_skip = 1; - } - data->temp_label = nct6776_temp_label; - } else { - data->temp_label = nct6775_temp_label; - } - data->have_temp_offset = data->have_temp & 0x07; - for (i = 0; i < 3; i++) { - if (data->temp_src[i] > 3) - data->have_temp_offset &= ~(1 << i); - } - } else if (sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg_b) { u8 reg; w83627ehf_set_temp_reg_ehf(data, 4); @@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp_offset = data->have_temp & 0x07; } - if (sio_data->kind == nct6775) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg16; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT; - data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT; - } else if (sio_data->kind == nct6776) { - data->has_fan_div = false; - data->fan_from_reg = fan_from_reg13; - data->fan_from_reg_min = fan_from_reg13; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = NCT6776_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - } else if (sio_data->kind == w83667hg_b) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; + if (sio_data->kind == w83667hg_b) { data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B; data->REG_FAN_STEP_OUTPUT = W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B; } else { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_COMMON; data->REG_FAN_STEP_OUTPUT = @@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev) goto exit_release; /* Read VID value */ - if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b || - sio_data->kind == nct6775 || sio_data->kind == nct6776) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { /* * W83667HG has different pins for VID input and output, so * we can get the VID input values directly at logical device D @@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev) */ superio_select(sio_data->sioreg, W83667HG_LD_VID); data->vid = superio_inb(sio_data->sioreg, 0xe3); - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else if (sio_data->kind != w83627uhg) { superio_select(sio_data->sioreg, W83627EHF_LD_HWM); if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) { @@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev) SIO_REG_VID_DATA); if (sio_data->kind == w83627ehf) /* 6 VID pins only */ data->vid &= 0x3f; - - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else { dev_info(dev, "VID pins in output mode, CPU VID not available\n"); } } - if (fan_debounce && - (sio_data->kind == nct6775 || sio_data->kind == nct6776)) { - u8 tmp; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE); - if (sio_data->kind == nct6776) - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x3e | tmp); - else - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x1e | tmp); - pr_info("Enabled fan debounce for chip %s\n", data->name); - } - w83627ehf_check_fan_inputs(sio_data, data); superio_exit(sio_data->sioreg); /* Read fan clock dividers immediately */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Read pwm data to save original values */ - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) data->pwm_enable_orig[i] = data->pwm_enable[i]; - /* Register sysfs hooks */ - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) { - err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { - err = device_create_file(dev, &attr->dev_attr); - if (err) - goto exit_remove; - } - } - /* if fan3 and fan4 are enabled create the sf3 files for them */ - if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan3[i].dev_attr); - if (err) - goto exit_remove; - } - if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan4[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - if ((err = device_create_file(dev, &sda_in_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_min[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_max[i].dev_attr))) - goto exit_remove; - } - - for (i = 0; i < 5; i++) { - if (data->has_fan & (1 << i)) { - if ((err = device_create_file(dev, - &sda_fan_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_fan_alarm[i].dev_attr))) - goto exit_remove; - if (sio_data->kind != nct6776) { - err = device_create_file(dev, - &sda_fan_div[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->has_fan_min & (1 << i)) { - err = device_create_file(dev, - &sda_fan_min[i].dev_attr); - if (err) - goto exit_remove; - } - if (i < data->pwm_num && - ((err = device_create_file(dev, - &sda_pwm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_mode[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_enable[i].dev_attr)) - || (err = device_create_file(dev, - &sda_target_temp[i].dev_attr)) - || (err = device_create_file(dev, - &sda_tolerance[i].dev_attr)))) - goto exit_remove; - } - } + hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, + data->name, + data, + &w83627ehf_chip_info, + w83627ehf_groups); - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - err = device_create_file(dev, &sda_temp_input[i].dev_attr); - if (err) - goto exit_remove; - if (data->temp_label) { - err = device_create_file(dev, - &sda_temp_label[i].dev_attr); - if (err) - goto exit_remove; - } - if (i == 2 && data->temp3_val_only) - continue; - if (data->reg_temp_over[i]) { - err = device_create_file(dev, - &sda_temp_max[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->reg_temp_hyst[i]) { - err = device_create_file(dev, - &sda_temp_max_hyst[i].dev_attr); - if (err) - goto exit_remove; - } - if (i > 2) - continue; - if ((err = device_create_file(dev, - &sda_temp_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_temp_type[i].dev_attr))) - goto exit_remove; - if (data->have_temp_offset & (1 << i)) { - err = device_create_file(dev, - &sda_temp_offset[i].dev_attr); - if (err) - goto exit_remove; - } - } - - err = device_create_file(dev, &sda_caseopen[0].dev_attr); - if (err) - goto exit_remove; - - if (sio_data->kind == nct6776) { - err = device_create_file(dev, &sda_caseopen[1].dev_attr); - if (err) - goto exit_remove; - } - - err = device_create_file(dev, &dev_attr_name); - if (err) - goto exit_remove; - - data->hwmon_dev = hwmon_device_register(dev); - if (IS_ERR(data->hwmon_dev)) { - err = PTR_ERR(data->hwmon_dev); - goto exit_remove; - } + return PTR_ERR_OR_ZERO(hwmon_dev); - return 0; - -exit_remove: - w83627ehf_device_remove_files(dev); exit_release: release_region(res->start, IOREGION_LENGTH); exit: @@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev) { struct w83627ehf_data *data = platform_get_drvdata(pdev); - hwmon_device_unregister(data->hwmon_dev); - w83627ehf_device_remove_files(&pdev->dev); release_region(data->addr, IOREGION_LENGTH); return 0; @@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev) static int w83627ehf_suspend(struct device *dev) { struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); mutex_lock(&data->update_lock); data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT); - if (sio_data->kind == nct6775) { - data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - } mutex_unlock(&data->update_lock); return 0; @@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev) static int w83627ehf_resume(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); int i; mutex_lock(&data->update_lock); @@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev) if (!(data->has_fan_min & (1 << i))) continue; - w83627ehf_write_value(data, data->REG_FAN_MIN[i], + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i], data->fan_min[i]); } @@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev) /* Restore other settings */ w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat); - if (sio_data->kind == nct6775) { - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2); - } /* Force re-reading all values */ data->valid = 0; @@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, static const char sio_name_W83627UHG[] __initconst = "W83627UHG"; static const char sio_name_W83667HG[] __initconst = "W83667HG"; static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B"; - static const char sio_name_NCT6775[] __initconst = "NCT6775F"; - static const char sio_name_NCT6776[] __initconst = "NCT6776F"; u16 val; const char *sio_name; @@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, sio_data->kind = w83667hg_b; sio_name = sio_name_W83667HG_B; break; - case SIO_NCT6775_ID: - sio_data->kind = nct6775; - sio_name = sio_name_NCT6775; - break; - case SIO_NCT6776_ID: - sio_data->kind = nct6776; - sio_name = sio_name_NCT6776; - break; default: if (val != 0xffff) pr_debug("unsupported chip ID: 0x%04x\n", val); diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index dc3f507e7562..a90d757f7043 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) drvdata->trcid = coresight_get_trace_id(drvdata->cpu); } -#ifdef CONFIG_CPU_PM static int etm4_cpu_save(struct etmv4_drvdata *drvdata) { int i, ret = 0; @@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = { static int etm4_cpu_pm_register(void) { - return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + if (IS_ENABLED(CONFIG_CPU_PM)) + return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + + return 0; } static void etm4_cpu_pm_unregister(void) { - cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + if (IS_ENABLED(CONFIG_CPU_PM)) + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); } -#else -static int etm4_cpu_pm_register(void) { return 0; } -static void etm4_cpu_pm_unregister(void) { } -#endif static int etm4_probe(struct amba_device *adev, const struct amba_id *id) { diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index e13af4874976..5137e6297022 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -174,7 +174,7 @@ static struct at91_twi_pdata sama5d2_config = { static struct at91_twi_pdata sam9x60_config = { .clk_max_div = 7, - .clk_offset = 4, + .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index e01b2b57e724..5ab901ad615d 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -58,6 +58,7 @@ struct bcm2835_i2c_dev { struct i2c_adapter adapter; struct completion completion; struct i2c_msg *curr_msg; + struct clk *bus_clk; int num_msgs; u32 msg_err; u8 *msg_buf; @@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) struct resource *mem, *irq; int ret; struct i2c_adapter *adap; - struct clk *bus_clk; struct clk *mclk; u32 bus_clk_rate; @@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) return PTR_ERR(mclk); } - bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); + i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); - if (IS_ERR(bus_clk)) { + if (IS_ERR(i2c_dev->bus_clk)) { dev_err(&pdev->dev, "Could not register clock\n"); - return PTR_ERR(bus_clk); + return PTR_ERR(i2c_dev->bus_clk); } ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", @@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) bus_clk_rate = 100000; } - ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate); + ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate); if (ret < 0) { dev_err(&pdev->dev, "Could not set clock frequency\n"); return ret; } - ret = clk_prepare_enable(bus_clk); + ret = clk_prepare_enable(i2c_dev->bus_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare clock"); return ret; @@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) static int bcm2835_i2c_remove(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev); - struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div"); - clk_rate_exclusive_put(bus_clk); - clk_disable_unprepare(bus_clk); + clk_rate_exclusive_put(i2c_dev->bus_clk); + clk_disable_unprepare(i2c_dev->bus_clk); free_irq(i2c_dev->irq, i2c_dev); i2c_del_adapter(&i2c_dev->adapter); diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 38556381f4ca..2f8b8050a223 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev) adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev, "scl", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_scl)) - return PTR_ERR(adapter_data->gpio_scl); + if (IS_ERR(adapter_data->gpio_scl)) { + ret = PTR_ERR(adapter_data->gpio_scl); + goto free_both; + } adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_sda)) - return PTR_ERR(adapter_data->gpio_sda); + if (IS_ERR(adapter_data->gpio_sda)) { + ret = PTR_ERR(adapter_data->gpio_sda); + goto free_both; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index a98bf31d0e5c..61339c665ebd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) + if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_i2c_runtime_resume(&pdev->dev); - else + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto unprepare_div_clk; + } + } else { ret = pm_runtime_get_sync(i2c_dev->dev); - - if (ret < 0) { - dev_err(&pdev->dev, "runtime resume failed\n"); - goto unprepare_div_clk; + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto disable_rpm; + } } if (i2c_dev->is_multimaster_mode) { @@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(i2c_dev->dev, "div_clk enable failed %d\n", ret); - goto disable_rpm; + goto put_rpm; } } @@ -1671,11 +1675,16 @@ disable_div_clk: if (i2c_dev->is_multimaster_mode) clk_disable(i2c_dev->div_clk); -disable_rpm: - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) +put_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_put_sync(&pdev->dev); + else tegra_i2c_runtime_suspend(&pdev->dev); +disable_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_disable(&pdev->dev); + unprepare_div_clk: clk_unprepare(i2c_dev->div_clk); @@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int err; i2c_mark_adapter_suspended(&i2c_dev->adapter); + err = pm_runtime_force_suspend(dev); + if (err < 0) + return err; + return 0; } @@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; + err = pm_runtime_force_resume(dev); + if (err < 0) + return err; + i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 9f8dcd3f8385..35b209797d7b 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) * If we can set SDA, we will always create a STOP to ensure additional * pulses will do no harm. This is achieved by letting SDA follow SCL * half a cycle later. Check the 'incomplete_write_byte' fault injector - * for details. + * for details. Note that we must honour tsu:sto, 4us, but lets use 5us + * here for simplicity. */ bri->set_scl(adap, scl); - ndelay(RECOVERY_NDELAY / 2); + ndelay(RECOVERY_NDELAY); if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); @@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) scl = !scl; bri->set_scl(adap, scl); /* Creating STOP again, see above */ - ndelay(RECOVERY_NDELAY / 2); + if (scl) { + /* Honour minimum tsu:sto */ + ndelay(RECOVERY_NDELAY); + } else { + /* Honour minimum tf and thd:dat */ + ndelay(RECOVERY_NDELAY / 2); + } if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 043691656245..7f8f896fa0c3 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = { .groups = i3c_masterdev_groups, }; -int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, - unsigned long max_i2c_scl_rate) +static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, + unsigned long max_i2c_scl_rate) { struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus); diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b0ff0e12d84c..bd26c3b9634e 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); struct i3c_master_controller *m = i3c_dev_get_master(dev); struct dw_i3c_master *master = to_dw_i3c_master(m); + int pos; + + pos = dw_i3c_master_get_free_pos(master); + + if (data->index > pos && pos > 0) { + writel(0, + master->regs + + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); + + master->addrs[data->index] = 0; + master->free_pos |= BIT(data->index); + + data->index = pos; + master->addrs[pos] = dev->info.dyn_addr; + master->free_pos &= ~BIT(pos); + } writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), master->regs + @@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { static int dw_i3c_probe(struct platform_device *pdev) { struct dw_i3c_master *master; - struct resource *res; int ret, irq; master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 10db0bf0655a..54712793709e 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> +#include <linux/of_device.h> #define DEV_ID 0x0 #define DEV_ID_I3C_MASTER 0x5034 @@ -60,6 +61,7 @@ #define CTRL_HALT_EN BIT(30) #define CTRL_MCS BIT(29) #define CTRL_MCS_EN BIT(28) +#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24)) #define CTRL_HJ_DISEC BIT(8) #define CTRL_MST_ACK BIT(7) #define CTRL_HJ_ACK BIT(6) @@ -70,6 +72,7 @@ #define CTRL_MIXED_FAST_BUS_MODE 2 #define CTRL_MIXED_SLOW_BUS_MODE 3 #define CTRL_BUS_MODE_MASK GENMASK(1, 0) +#define THD_DELAY_MAX 3 #define PRESCL_CTRL0 0x14 #define PRESCL_CTRL0_I2C(x) ((x) << 16) @@ -388,6 +391,10 @@ struct cdns_i3c_xfer { struct cdns_i3c_cmd cmds[0]; }; +struct cdns_i3c_data { + u8 thd_delay_ns; +}; + struct cdns_i3c_master { struct work_struct hj_work; struct i3c_master_controller base; @@ -408,6 +415,7 @@ struct cdns_i3c_master { struct clk *pclk; struct cdns_i3c_master_caps caps; unsigned long i3c_scl_lim; + const struct cdns_i3c_data *devdata; }; static inline struct cdns_i3c_master * @@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m) return 0; } +static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master) +{ + unsigned long sysclk_rate = clk_get_rate(master->sysclk); + u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns, + (NSEC_PER_SEC / sysclk_rate)); + + /* Every value greater than 3 is not valid. */ + if (thd_delay > THD_DELAY_MAX) + thd_delay = THD_DELAY_MAX; + + /* CTLR_THD_DEL value is encoded. */ + return (THD_DELAY_MAX - thd_delay); +} + static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) { struct cdns_i3c_master *master = to_cdns_i3c_master(m); @@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) * We will issue ENTDAA afterwards from the threaded IRQ handler. */ ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; + + /* + * Configure data hold delay based on device-specific data. + * + * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on + * master output. This setting allows to meet this timing on master's + * SoC outputs, regardless of PCB balancing. + */ + ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master)); writel(ctrl, master->regs + CTRL); cdns_i3c_master_enable(master); @@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work) i3c_master_do_daa(&master->base); } +static struct cdns_i3c_data cdns_i3c_devdata = { + .thd_delay_ns = 10, +}; + +static const struct of_device_id cdns_i3c_master_of_ids[] = { + { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata }, + { /* sentinel */ }, +}; + static int cdns_i3c_master_probe(struct platform_device *pdev) { struct cdns_i3c_master *master; - struct resource *res; int ret, irq; u32 val; @@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->devdata = of_device_get_match_data(&pdev->dev); + if (!master->devdata) + return -EINVAL; + + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); @@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id cdns_i3c_master_of_ids[] = { - { .compatible = "cdns,i3c-master" }, - { /* sentinel */ }, -}; - static struct platform_driver cdns_i3c_master = { .probe = cdns_i3c_master_probe, .remove = cdns_i3c_master_remove, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 75fd2a7b0842..9f38ff02a7b8 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -41,6 +41,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/tick.h> @@ -79,6 +80,7 @@ struct idle_cpu { unsigned long auto_demotion_disable_flags; bool byt_auto_demotion_disable_flag; bool disable_promotion_to_c1e; + bool use_acpi; }; static const struct idle_cpu *icpu; @@ -90,6 +92,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, static struct cpuidle_state *cpuidle_state_table; /* + * Enable this state by default even if the ACPI _CST does not list it. + */ +#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) + +/* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. * If this flag is set, SW flushes the TLB, so even if the @@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 80, .enter = &intel_idle, @@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 250, .enter = &intel_idle, @@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 500, .enter = &intel_idle, @@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -808,7 +815,7 @@ static struct cpuidle_state bxt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -869,7 +876,7 @@ static struct cpuidle_state dnv_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -944,37 +951,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, mwait_idle_with_hints(eax, ecx); } -static void __setup_broadcast_timer(bool on) -{ - if (on) - tick_broadcast_enable(); - else - tick_broadcast_disable(); -} - -static void auto_demotion_disable(void) -{ - unsigned long long msr_bits; - - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); - msr_bits &= ~(icpu->auto_demotion_disable_flags); - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); -} -static void c1e_promotion_disable(void) -{ - unsigned long long msr_bits; - - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); - msr_bits &= ~0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); -} - static const struct idle_cpu idle_cpu_nehalem = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_nhx = { + .state_table = nehalem_cstates, + .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_atom = { .state_table = atom_cstates, }; @@ -993,6 +982,12 @@ static const struct idle_cpu idle_cpu_snb = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_snx = { + .state_table = snb_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_byt = { .state_table = byt_cstates, .disable_promotion_to_c1e = true, @@ -1013,6 +1008,7 @@ static const struct idle_cpu idle_cpu_ivb = { static const struct idle_cpu idle_cpu_ivt = { .state_table = ivt_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_hsw = { @@ -1020,11 +1016,23 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_hsx = { + .state_table = hsw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_bdw = { .state_table = bdw_cstates, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_bdx = { + .state_table = bdw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_skl = { .state_table = skl_cstates, .disable_promotion_to_c1e = true, @@ -1033,15 +1041,18 @@ static const struct idle_cpu idle_cpu_skl = { static const struct idle_cpu idle_cpu_skx = { .state_table = skx_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_avn = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_knl = { .state_table = knl_cstates, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_bxt = { @@ -1052,20 +1063,21 @@ static const struct idle_cpu idle_cpu_bxt = { static const struct idle_cpu idle_cpu_dnv = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct x86_cpu_id intel_idle_ids[] __initconst = { - INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem), + INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx), INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem), INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem), INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem), - INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem), - INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx), + INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx), INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft), - INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx), INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb), - INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb), + INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx), INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt), INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier), @@ -1073,14 +1085,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb), INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt), INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), + INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx), INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw), INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn), INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw), + INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx), + INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx), INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl), @@ -1095,76 +1107,169 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { {} }; -/* - * intel_idle_probe() +#define INTEL_CPU_FAM6_MWAIT \ + { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 } + +static const struct x86_cpu_id intel_mwait_ids[] __initconst = { + INTEL_CPU_FAM6_MWAIT, + {} +}; + +static bool __init intel_idle_max_cstate_reached(int cstate) +{ + if (cstate + 1 > max_cstate) { + pr_info("max_cstate %d reached\n", max_cstate); + return true; + } + return false; +} + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +#include <acpi/processor.h> + +static bool no_acpi __read_mostly; +module_param(no_acpi, bool, 0444); +MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); + +static struct acpi_processor_power acpi_state_table __initdata; + +/** + * intel_idle_cst_usable - Check if the _CST information can be used. + * + * Check if all of the C-states listed by _CST in the max_cstate range are + * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT. */ -static int __init intel_idle_probe(void) +static bool __init intel_idle_cst_usable(void) { - unsigned int eax, ebx, ecx; - const struct x86_cpu_id *id; + int cstate, limit; - if (max_cstate == 0) { - pr_debug("disabled\n"); - return -EPERM; - } + limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), + acpi_state_table.count); - id = x86_match_cpu(intel_idle_ids); - if (!id) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6) - pr_debug("does not run on family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return -ENODEV; + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; + + if (cx->entry_method != ACPI_CSTATE_FFH) + return false; } - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { - pr_debug("Please enable MWAIT in BIOS SETUP\n"); - return -ENODEV; + return true; +} + +static bool __init intel_idle_acpi_cst_extract(void) +{ + unsigned int cpu; + + if (no_acpi) { + pr_debug("Not allowed to use ACPI _CST\n"); + return false; } - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return -ENODEV; + for_each_possible_cpu(cpu) { + struct acpi_processor *pr = per_cpu(processors, cpu); - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + if (!pr) + continue; - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || - !mwait_substates) - return -ENODEV; + if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) + continue; - pr_debug("MWAIT substates: 0x%x\n", mwait_substates); + acpi_state_table.count++; - icpu = (const struct idle_cpu *)id->driver_data; - cpuidle_state_table = icpu->state_table; + if (!intel_idle_cst_usable()) + continue; - pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", - boot_cpu_data.x86_model); + if (!acpi_processor_claim_cst_control()) { + acpi_state_table.count = 0; + return false; + } - return 0; + return true; + } + + pr_debug("ACPI _CST not found or not usable\n"); + return false; } -/* - * intel_idle_cpuidle_devices_uninit() - * Unregisters the cpuidle devices. - */ -static void intel_idle_cpuidle_devices_uninit(void) +static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { - int i; - struct cpuidle_device *dev; + int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx; + struct cpuidle_state *state; - for_each_online_cpu(i) { - dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); - cpuidle_unregister_device(dev); + if (intel_idle_max_cstate_reached(cstate)) + break; + + cx = &acpi_state_table.states[cstate]; + + state = &drv->states[drv->state_count++]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); + strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + state->exit_latency = cx->latency; + /* + * For C1-type C-states use the same number for both the exit + * latency and target residency, because that is the case for + * C1 in the majority of the static C-states tables above. + * For the other types of C-states, however, set the target + * residency to 3 times the exit latency which should lead to + * a reasonable balance between energy-efficiency and + * performance in the majority of interesting cases. + */ + state->target_residency = cx->latency; + if (cx->type > ACPI_STATE_C1) + state->target_residency *= 3; + + state->flags = MWAIT2flg(cx->address); + if (cx->type > ACPI_STATE_C2) + state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + + state->enter = intel_idle; + state->enter_s2idle = intel_idle_s2idle; } } +static bool __init intel_idle_off_by_default(u32 mwait_hint) +{ + int cstate, limit; + + /* + * If there are no _CST C-states, do not disable any C-states by + * default. + */ + if (!acpi_state_table.count) + return false; + + limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + if (acpi_state_table.states[cstate].address == mwait_hint) + return false; + } + return true; +} +#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static inline bool intel_idle_acpi_cst_extract(void) { return false; } +static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } +static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ + /* * ivt_idle_state_table_update(void) * * Tune IVT multi-socket targets * Assumption: num_sockets == (max_package_num + 1) */ -static void ivt_idle_state_table_update(void) +static void __init ivt_idle_state_table_update(void) { /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ int cpu, package_num, num_sockets = 1; @@ -1187,15 +1292,17 @@ static void ivt_idle_state_table_update(void) /* else, 1 and 2 socket systems use default ivt_cstates */ } -/* - * Translate IRTL (Interrupt Response Time Limit) MSR to usec +/** + * irtl_2_usec - IRTL to microseconds conversion. + * @irtl: IRTL MSR value. + * + * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds. */ - -static unsigned int irtl_ns_units[] = { - 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; - -static unsigned long long irtl_2_usec(unsigned long long irtl) +static unsigned long long __init irtl_2_usec(unsigned long long irtl) { + static const unsigned int irtl_ns_units[] __initconst = { + 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 + }; unsigned long long ns; if (!irtl) @@ -1203,15 +1310,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) ns = irtl_ns_units[(irtl >> 10) & 0x7]; - return div64_u64((irtl & 0x3FF) * ns, 1000); + return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC); } + /* * bxt_idle_state_table_update(void) * * On BXT, we trust the IRTL to show the definitive maximum latency * We use the same value for target_residency. */ -static void bxt_idle_state_table_update(void) +static void __init bxt_idle_state_table_update(void) { unsigned long long msr; unsigned int usec; @@ -1258,7 +1366,7 @@ static void bxt_idle_state_table_update(void) * On SKL-H (model 0x5e) disable C8 and C9 if: * C10 is enabled and SGX disabled */ -static void sklh_idle_state_table_update(void) +static void __init sklh_idle_state_table_update(void) { unsigned long long msr; unsigned int eax, ebx, ecx, edx; @@ -1294,16 +1402,28 @@ static void sklh_idle_state_table_update(void) skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */ skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ } -/* - * intel_idle_state_table_update() - * - * Update the default state_table for this CPU-id - */ -static void intel_idle_state_table_update(void) +static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) { - switch (boot_cpu_data.x86_model) { + unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; + unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & + MWAIT_SUBSTATE_MASK; + + /* Ignore the C-state if there are NO sub-states in CPUID for it. */ + if (num_substates == 0) + return false; + + if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle states deeper than C2"); + return true; +} + +static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) +{ + int cstate; + + switch (boot_cpu_data.x86_model) { case INTEL_FAM6_IVYBRIDGE_X: ivt_idle_state_table_update(); break; @@ -1315,62 +1435,36 @@ static void intel_idle_state_table_update(void) sklh_idle_state_table_update(); break; } -} - -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states - */ -static void __init intel_idle_cpuidle_driver_init(void) -{ - int cstate; - struct cpuidle_driver *drv = &intel_idle_driver; - - intel_idle_state_table_update(); - - cpuidle_poll_state_init(drv); - drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { - int num_substates, mwait_hint, mwait_cstate; + unsigned int mwait_hint; - if ((cpuidle_state_table[cstate].enter == NULL) && - (cpuidle_state_table[cstate].enter_s2idle == NULL)) + if (intel_idle_max_cstate_reached(cstate)) break; - if (cstate + 1 > max_cstate) { - pr_info("max_cstate %d reached\n", max_cstate); + if (!cpuidle_state_table[cstate].enter && + !cpuidle_state_table[cstate].enter_s2idle) break; - } - - mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); - mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); - - /* number of sub-states for this state in CPUID.MWAIT */ - num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) - & MWAIT_SUBSTATE_MASK; - - /* if NO sub-states for this state in CPUID, skip it */ - if (num_substates == 0) - continue; - /* if state marked as disabled, skip it */ + /* If marked as unusable, skip this state. */ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) { pr_debug("state %s is disabled\n", cpuidle_state_table[cstate].name); continue; } + mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); + if (!intel_idle_verify_cstate(mwait_hint)) + continue; - if (((mwait_cstate + 1) > 2) && - !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - mark_tsc_unstable("TSC halts in idle" - " states deeper than C2"); + /* Structure copy. */ + drv->states[drv->state_count] = cpuidle_state_table[cstate]; - drv->states[drv->state_count] = /* structure copy */ - cpuidle_state_table[cstate]; + if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) && + !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)) + drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF; - drv->state_count += 1; + drv->state_count++; } if (icpu->byt_auto_demotion_disable_flag) { @@ -1379,6 +1473,38 @@ static void __init intel_idle_cpuidle_driver_init(void) } } +/* + * intel_idle_cpuidle_driver_init() + * allocate, initialize cpuidle_states + */ +static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) +{ + cpuidle_poll_state_init(drv); + drv->state_count = 1; + + if (icpu) + intel_idle_init_cstates_icpu(drv); + else + intel_idle_init_cstates_acpi(drv); +} + +static void auto_demotion_disable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); + msr_bits &= ~(icpu->auto_demotion_disable_flags); + wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); +} + +static void c1e_promotion_disable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits &= ~0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +} /* * intel_idle_cpu_init() @@ -1397,6 +1523,9 @@ static int intel_idle_cpu_init(unsigned int cpu) return -EIO; } + if (!icpu) + return 0; + if (icpu->auto_demotion_disable_flags) auto_demotion_disable(); @@ -1411,7 +1540,7 @@ static int intel_idle_cpu_online(unsigned int cpu) struct cpuidle_device *dev; if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) - __setup_broadcast_timer(true); + tick_broadcast_enable(); /* * Some systems can hotplug a cpu at runtime after @@ -1425,23 +1554,74 @@ static int intel_idle_cpu_online(unsigned int cpu) return 0; } +/** + * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices. + */ +static void __init intel_idle_cpuidle_devices_uninit(void) +{ + int i; + + for_each_online_cpu(i) + cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); +} + static int __init intel_idle_init(void) { + const struct x86_cpu_id *id; + unsigned int eax, ebx, ecx; int retval; /* Do not load intel_idle at all for now if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; - retval = intel_idle_probe(); - if (retval) - return retval; + if (max_cstate == 0) { + pr_debug("disabled\n"); + return -EPERM; + } + + id = x86_match_cpu(intel_idle_ids); + if (id) { + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_debug("Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } + } else { + id = x86_match_cpu(intel_mwait_ids); + if (!id) + return -ENODEV; + } + + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return -ENODEV; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || + !mwait_substates) + return -ENODEV; + + pr_debug("MWAIT substates: 0x%x\n", mwait_substates); + + icpu = (const struct idle_cpu *)id->driver_data; + if (icpu) { + cpuidle_state_table = icpu->state_table; + if (icpu->use_acpi) + intel_idle_acpi_cst_extract(); + } else if (!intel_idle_acpi_cst_extract()) { + return -ENODEV; + } + + pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", + boot_cpu_data.x86_model); intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (intel_idle_cpuidle_devices == NULL) + if (!intel_idle_cpuidle_devices) return -ENOMEM; - intel_idle_cpuidle_driver_init(); + intel_idle_cpuidle_driver_init(&intel_idle_driver); + retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { struct cpuidle_driver *drv = cpuidle_get_driver(); diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 3f03abf100b5..306bf15023a7 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -494,13 +494,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, st->channel_config[channel].buf_negative = of_property_read_bool(child, "adi,buffered-negative"); - *chan = ad7124_channel_template; - chan->address = channel; - chan->scan_index = channel; - chan->channel = ain[0]; - chan->channel2 = ain[1]; - - chan++; + chan[channel] = ad7124_channel_template; + chan[channel].address = channel; + chan[channel].scan_index = channel; + chan[channel].channel = ain[0]; + chan[channel].channel2 = ain[1]; } return 0; diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index fa4586037bb8..0b91de4df8f4 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -65,6 +65,7 @@ config IAQCORE config PMS7003 tristate "Plantower PMS7003 particulate matter sensor" depends on SERIAL_DEV_BUS + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Plantower PMS7003 particulate diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index a7d40c02ce6b..b921dd9e108f 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1301,7 +1301,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id, for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) { for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) { - if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) + if (st_lsm6dsx_sensor_settings[i].id[j].name && + id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) break; } if (j < ST_LSM6DSX_MAX_ID) diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index c193d64e5217..112225c0e486 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const unsigned long *mask, bool timestamp) { unsigned bytes = 0; - int length, i; + int length, i, largest = 0; /* How much space will the demuxed element take? */ for_each_set_bit(i, mask, @@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, length = iio_storage_bytes_for_si(indio_dev, i); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } if (timestamp) { length = iio_storage_bytes_for_timestamp(indio_dev); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } + + bytes = ALIGN(bytes, largest); return bytes; } diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index 16dacea9eadf..b0e241aaefb4 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data) if (ret < 0) return ret; - data->al_scale = 24000; data->vcnl4200_al.reg = VCNL4200_AL_DATA; data->vcnl4200_ps.reg = VCNL4200_PS_DATA; switch (id) { @@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data) /* show 54ms in total. */ data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000); data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000); + data->al_scale = 24000; break; case VCNL4040_PROD_ID: /* Integration time is 80ms, add 10ms. */ data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000); data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000); + data->al_scale = 120000; break; } data->vcnl4200_al.last_measurement = ktime_set(0, 0); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 9b6ca15a183c..ad5112a2325f 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) int rc; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 0441c7eea36b..020f70e6865e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Add qp to flush list of the CQ */ bnxt_qplib_add_flush_qp(qp); } else { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { - /* Before we complete, do WA 9060 */ - if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, - cqe_sq_cons)) { - *lib_qp = qp; - goto out; - } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c index adb4a1ba921b..5836fe7b2817 100644 --- a/drivers/infiniband/hw/hfi1/iowait.c +++ b/drivers/infiniband/hw/hfi1/iowait.c @@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit, void iowait_cancel_work(struct iowait *w) { cancel_work_sync(&iowait_get_ib_work(w)->iowork); - cancel_work_sync(&iowait_get_tid_work(w)->iowork); + /* Make sure that the iowork for TID RDMA is used */ + if (iowait_get_tid_work(w)->iowork.func) + cancel_work_sync(&iowait_get_tid_work(w)->iowork); } /** diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index e53f542b60af..8a2e0d9351e9 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) */ fpsn = full_flow_psn(flow, flow->flow_state.spsn); req->r_ack_psn = psn; + /* + * If resync_psn points to the last flow PSN for a + * segment and the new segment (likely from a new + * request) starts with a new generation number, we + * need to adjust resync_psn accordingly. + */ + if (flow->flow_state.generation != + (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT)) + resync_psn = mask_psn(fpsn - 1); flow->resync_npkts += delta_psn(mask_psn(resync_psn + 1), fpsn); /* diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 86375947bc67..dbd96d029d8b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context) static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct i40iw_ucontext *ucontext; - u64 db_addr_offset; - u64 push_offset; + u64 db_addr_offset, push_offset, pfn; ucontext = to_ucontext(context); if (ucontext->iwdev->sc_dev.is_pf) { @@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_private_data = ucontext; } else { if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); } - if (io_remap_pfn_range(vma, vma->vm_start, - vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; + pfn = vma->vm_pgoff + + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> + PAGE_SHIFT); - return 0; + return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + vma->vm_page_prot, NULL); } /** diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a1a035270cab..b273e421e910 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn) } } -static void -isert_wait4cmds(struct iscsi_conn *conn) -{ - isert_info("iscsi_conn %p\n", conn); - - if (conn->sess) { - target_sess_cmd_list_set_waiting(conn->sess->se_sess); - target_wait_for_sess_cmds(conn->sess->se_sess); - } -} - /** * isert_put_unsol_pending_cmds() - Drop commands waiting for * unsolicitate dataout @@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) ib_drain_qp(isert_conn->qp); isert_put_unsol_pending_cmds(conn); - isert_wait4cmds(conn); isert_wait4logout(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index d7dd6fcf2db0..cb6e3a5f509c 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client, */ client->tail = (client->head - 2) & (client->bufsize - 1); - client->buffer[client->tail].input_event_sec = - event->input_event_sec; - client->buffer[client->tail].input_event_usec = - event->input_event_usec; - client->buffer[client->tail].type = EV_SYN; - client->buffer[client->tail].code = SYN_DROPPED; - client->buffer[client->tail].value = 0; + client->buffer[client->tail] = (struct input_event) { + .input_event_sec = event->input_event_sec, + .input_event_usec = event->input_event_usec, + .type = EV_SYN, + .code = SYN_DROPPED, + .value = 0, + }; client->packet_head = client->tail; } @@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file) struct evdev_client *client; int error; - client = kzalloc(struct_size(client, buffer, bufsize), - GFP_KERNEL | __GFP_NOWARN); - if (!client) - client = vzalloc(struct_size(client, buffer, bufsize)); + client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL); if (!client) return -ENOMEM; diff --git a/drivers/input/input.c b/drivers/input/input.c index 55086279d044..ee6c3234df36 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -878,16 +878,18 @@ static int input_default_setkeycode(struct input_dev *dev, } } - __clear_bit(*old_keycode, dev->keybit); - __set_bit(ke->keycode, dev->keybit); - - for (i = 0; i < dev->keycodemax; i++) { - if (input_fetch_keycode(dev, i) == *old_keycode) { - __set_bit(*old_keycode, dev->keybit); - break; /* Setting the bit twice is useless, so break */ + if (*old_keycode <= KEY_MAX) { + __clear_bit(*old_keycode, dev->keybit); + for (i = 0; i < dev->keycodemax; i++) { + if (input_fetch_keycode(dev, i) == *old_keycode) { + __set_bit(*old_keycode, dev->keybit); + /* Setting the bit twice is useless, so break */ + break; + } } } + __set_bit(ke->keycode, dev->keybit); return 0; } @@ -943,9 +945,13 @@ int input_set_keycode(struct input_dev *dev, * Simulate keyup event if keycode is not present * in the keymap anymore */ - if (test_bit(EV_KEY, dev->evbit) && - !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && - __test_and_clear_bit(old_keycode, dev->key)) { + if (old_keycode > KEY_MAX) { + dev_warn(dev->dev.parent ?: &dev->dev, + "%s: got too big old keycode %#x\n", + __func__, old_keycode); + } else if (test_bit(EV_KEY, dev->evbit) && + !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && + __test_and_clear_bit(old_keycode, dev->key)) { struct input_value vals[] = { { EV_KEY, old_keycode, 0 }, input_value_sync diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c index 53799527dc75..9f809aeb785c 100644 --- a/drivers/input/keyboard/imx_sc_key.c +++ b/drivers/input/keyboard/imx_sc_key.c @@ -78,7 +78,13 @@ static void imx_sc_check_for_events(struct work_struct *work) return; } - state = (bool)msg.state; + /* + * The response data from SCU firmware is 4 bytes, + * but ONLY the first byte is the key state, other + * 3 bytes could be some dirty data, so we should + * ONLY take the first byte as key state. + */ + state = (bool)(msg.state & 0xff); if (state ^ priv->keystate) { priv->keystate = state; diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c index 83368f1e7c4e..4650f4a94989 100644 --- a/drivers/input/misc/keyspan_remote.c +++ b/drivers/input/misc/keyspan_remote.c @@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev) int retval = 0; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0); + 0x11, 0x40, 0x5601, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", __func__, retval); @@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x44, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x44, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", __func__, retval); @@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x22, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x22, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", __func__, retval); diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c index 4d875f2ac13d..ee55f22dbca5 100644 --- a/drivers/input/misc/max77650-onkey.c +++ b/drivers/input/misc/max77650-onkey.c @@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev) return input_register_device(onkey->input); } +static const struct of_device_id max77650_onkey_of_match[] = { + { .compatible = "maxim,max77650-onkey" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_onkey_of_match); + static struct platform_driver max77650_onkey_driver = { .driver = { .name = "max77650-onkey", + .of_match_table = max77650_onkey_of_match, }, .probe = max77650_onkey_probe, }; diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c index ecd762f93732..53ad25eaf1a2 100644 --- a/drivers/input/misc/pm8xxx-vibrator.c +++ b/drivers/input/misc/pm8xxx-vibrator.c @@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on) if (regs->enable_mask) rc = regmap_update_bits(vib->regmap, regs->enable_addr, - on ? regs->enable_mask : 0, val); + regs->enable_mask, on ? ~0 : 0); return rc; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index fd253781be71..f2593133e524 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev, struct uinput_device *udev = input_get_drvdata(dev); struct timespec64 ts; - udev->buff[udev->head].type = type; - udev->buff[udev->head].code = code; - udev->buff[udev->head].value = value; ktime_get_ts64(&ts); - udev->buff[udev->head].input_event_sec = ts.tv_sec; - udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC; + + udev->buff[udev->head] = (struct input_event) { + .input_event_sec = ts.tv_sec, + .input_event_usec = ts.tv_nsec / NSEC_PER_USEC, + .type = type, + .code = code, + .value = value, + }; + udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE; wake_up_interruptible(&udev->waitq); @@ -689,13 +693,14 @@ static ssize_t uinput_read(struct file *file, char __user *buffer, static __poll_t uinput_poll(struct file *file, poll_table *wait) { struct uinput_device *udev = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */ poll_wait(file, &udev->waitq, wait); if (udev->head != udev->tail) - return EPOLLIN | EPOLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; - return EPOLLOUT | EPOLLWRNORM; + return mask; } static int uinput_release(struct inode *inode, struct file *file) diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index 0bc01cfc2b51..6b23e679606e 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -24,6 +24,12 @@ #define F54_NUM_TX_OFFSET 1 #define F54_NUM_RX_OFFSET 0 +/* + * The smbus protocol can read only 32 bytes max at a time. + * But this should be fine for i2c/spi as well. + */ +#define F54_REPORT_DATA_SIZE 32 + /* F54 commands */ #define F54_GET_REPORT 1 #define F54_FORCE_CAL 2 @@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work) int report_size; u8 command; int error; + int i; report_size = rmi_f54_get_report_size(f54); if (report_size == 0) { @@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work) rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n"); - fifo[0] = 0; - fifo[1] = 0; - error = rmi_write_block(fn->rmi_dev, - fn->fd.data_base_addr + F54_FIFO_OFFSET, - fifo, sizeof(fifo)); - if (error) { - dev_err(&fn->dev, "Failed to set fifo start offset\n"); - goto abort; - } + for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) { + int size = min(F54_REPORT_DATA_SIZE, report_size - i); + + fifo[0] = i & 0xff; + fifo[1] = i >> 8; + error = rmi_write_block(fn->rmi_dev, + fn->fd.data_base_addr + F54_FIFO_OFFSET, + fifo, sizeof(fifo)); + if (error) { + dev_err(&fn->dev, "Failed to set fifo start offset\n"); + goto abort; + } - error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr + - F54_REPORT_DATA_OFFSET, f54->report_data, - report_size); - if (error) { - dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n", - __func__, report_size, error); - goto abort; + error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr + + F54_REPORT_DATA_OFFSET, + f54->report_data + i, size); + if (error) { + dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n", + __func__, size, error); + goto abort; + } } abort: diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c index b313c579914f..2407ea43de59 100644 --- a/drivers/input/rmi4/rmi_smbus.c +++ b/drivers/input/rmi4/rmi_smbus.c @@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to write next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; + rmiaddr += SMB_MAX_COUNT; } exit: mutex_unlock(&rmi_smb->page_mutex); @@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to read next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; + rmiaddr += SMB_MAX_COUNT; } retval = 0; diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 2ca586fb914f..e08b0ef078e8 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) aiptek->inputdev = inputdev; aiptek->intf = intf; - aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber; + aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; aiptek->inDelay = 0; aiptek->endDelay = 0; aiptek->previousJitterable = 0; @@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); /* Verify that a device really has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "interface has %d endpoints, but must have minimum 1\n", - intf->altsetting[0].desc.bNumEndpoints); + intf->cur_altsetting->desc.bNumEndpoints); err = -EINVAL; goto fail3; } - endpoint = &intf->altsetting[0].endpoint[0].desc; + endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index 35031228a6d0..96d65575f75a 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c @@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface, } /* Sanity check that a device has an endpoint */ - if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { + if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&usbinterface->dev, "Invalid number of endpoints\n"); error = -EINVAL; goto err_free_urb; } - /* - * The endpoint is always altsetting 0, we know this since we know - * this device only has one interrupt endpoint - */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; + endpoint = &usbinterface->cur_altsetting->endpoint[0].desc; /* Some debug */ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); @@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface, if (usb_endpoint_xfer_int(endpoint)) dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n"); - dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen); + dev_dbg(&usbinterface->dev, "interface extra len:%d\n", + usbinterface->cur_altsetting->extralen); /* * Find the HID descriptor so we can find out the size of the @@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface, input_dev->dev.parent = &usbinterface->dev; /* Setup the URB, it will be posted later on open of input device */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; - usb_fill_int_urb(gtco->urbinfo, udev, usb_rcvintpipe(udev, diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c index a1f3a0cb197e..38f087404f7a 100644 --- a/drivers/input/tablet/pegasus_notetaker.c +++ b/drivers/input/tablet/pegasus_notetaker.c @@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf, return -ENODEV; /* Sanity check that the device has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "Invalid number of endpoints\n"); return -EINVAL; } diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 0af0fe8c40d7..742a7e96c1b5 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device *hwmon; + struct thermal_zone_device *thermal; int error; u32 reg; bool ts_attached; @@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev) if (IS_ERR(hwmon)) return PTR_ERR(hwmon); - devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops); + thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, + &sun4i_ts_tz_ops); + if (IS_ERR(thermal)) + return PTR_ERR(thermal); writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC); diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index 1dd47dda71cd..34d31c7ec8ba 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface, int error; /* Check if we really have the right interface. */ - iface_desc = &interface->altsetting[0]; + iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 27bde309e6f7..823cc4ef51fd 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, static void init_iommu_perf_ctr(struct amd_iommu *iommu) { struct pci_dev *pdev = iommu->dev; - u64 val = 0xabcd, val2 = 0; + u64 val = 0xabcd, val2 = 0, save_reg = 0; if (!iommu_feature(iommu, FEATURE_PC)) return; amd_iommu_pc_present = true; + /* save the value to restore, if writable */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) + goto pc_false; + /* Check if the performance counters can be written to */ if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || - (val != val2)) { - pci_err(pdev, "Unable to write to IOMMU perf counter.\n"); - amd_iommu_pc_present = false; - return; - } + (val != val2)) + goto pc_false; + + /* restore */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) + goto pc_false; pci_info(pdev, "IOMMU performance counters supported\n"); val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); iommu->max_banks = (u8) ((val >> 12) & 0x3f); iommu->max_counters = (u8) ((val >> 7) & 0xf); + + return; + +pc_false: + pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; + return; } static ssize_t amd_iommu_show_cap(struct device *dev, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index c363294b3bb9..a2e96a5fd9a7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1203,7 +1203,6 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { struct device *dev = msi_desc_to_dev(desc); struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_dma_cookie *cookie; struct iommu_dma_msi_page *msi_page; static DEFINE_MUTEX(msi_prepare_lock); /* see below */ @@ -1212,8 +1211,6 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) return 0; } - cookie = domain->iova_cookie; - /* * In fact the whole prepare operation should already be serialised by * irq_domain_mutex further up the callchain, but that's pretty subtle diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 42966611a192..932267f49f9a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; - if (info) + if (info && info != DEFER_DEVICE_DOMAIN_INFO + && info != DUMMY_DEVICE_DOMAIN_INFO) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -5624,8 +5625,10 @@ static int intel_iommu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto unlink; + } iommu_group_put(group); @@ -5651,7 +5654,8 @@ static int intel_iommu_add_device(struct device *dev) if (!get_private_domain_for_dev(dev)) { dev_warn(dev, "Failed to get a private domain.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto unlink; } dev_info(dev, @@ -5666,6 +5670,10 @@ static int intel_iommu_add_device(struct device *dev) } return 0; + +unlink: + iommu_device_unlink(&iommu->iommu, dev); + return ret; } static void intel_iommu_remove_device(struct device *dev) @@ -5817,6 +5825,13 @@ static void intel_iommu_apply_resv_region(struct device *dev, WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); } +static struct iommu_group *intel_iommu_device_group(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_device_group(dev); + return generic_device_group(dev); +} + #ifdef CONFIG_INTEL_IOMMU_SVM struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) { @@ -5989,7 +6004,7 @@ const struct iommu_ops intel_iommu_ops = { .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = intel_iommu_put_resv_regions, .apply_resv_region = intel_iommu_apply_resv_region, - .device_group = pci_device_group, + .device_group = intel_iommu_device_group, .dev_has_feat = intel_iommu_dev_has_feat, .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index fdd40756dbc1..3ead597e1c57 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -751,6 +751,7 @@ err_put_group: mutex_unlock(&group->mutex); dev->iommu_group = NULL; kobject_put(group->devices_kobj); + sysfs_remove_link(group->devices_kobj, device->name); err_free_name: kfree(device->name); err_remove_link: diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index 01d18b39069e..c5589ee0dfb3 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -17,7 +17,6 @@ #include <linux/delay.h> #include <asm/io.h> -#include <asm/mach-jz4740/irq.h> struct ingenic_intc_data { void __iomem *base; @@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data) while (pending) { int bit = __fls(pending); - irq = irq_find_mapping(domain, bit + (i * 32)); + irq = irq_linear_revmap(domain, bit + (i * 32)); generic_handle_irq(irq); pending &= ~BIT(bit); } @@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } - domain = irq_domain_add_legacy(node, num_chips * 32, - JZ4740_IRQ_BASE, 0, + domain = irq_domain_add_linear(node, num_chips * 32, &irq_generic_chip_ops, NULL); if (!domain) { err = -ENOMEM; diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c index b7e0ae1af8fa..e8922fa03379 100644 --- a/drivers/leds/leds-as3645a.c +++ b/drivers/leds/leds-as3645a.c @@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash, switch (id) { case AS_LED_FLASH: flash->flash_node = child; + fwnode_handle_get(child); break; case AS_LED_INDICATOR: flash->indicator_node = child; + fwnode_handle_get(child); break; default: dev_warn(&flash->client->dev, "unknown LED %u encountered, ignoring\n", id); break; } - fwnode_handle_get(child); } if (!flash->flash_node) { diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index a5c73f3d5f79..2bf74595610f 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) struct gpio_led led = {}; const char *state = NULL; + /* + * Acquire gpiod from DT with uninitialized label, which + * will be updated after LED class device is registered, + * Only then the final LED name is known. + */ led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child, GPIOD_ASIS, - led.name); + NULL); if (IS_ERR(led.gpiod)) { fwnode_handle_put(child); return ERR_CAST(led.gpiod); @@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) fwnode_handle_put(child); return ERR_PTR(ret); } + /* Set gpiod label to match the corresponding LED name. */ + gpiod_set_consumer_name(led_dat->gpiod, + led_dat->cdev.dev->kobj.name); priv->num_leds++; } diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c index 0507c6575c08..491268bb34a7 100644 --- a/drivers/leds/leds-lm3532.c +++ b/drivers/leds/leds-lm3532.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // TI LM3532 LED driver // Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// http://www.ti.com/lit/ds/symlink/lm3532.pdf #include <linux/i2c.h> #include <linux/leds.h> @@ -623,7 +624,7 @@ static int lm3532_parse_node(struct lm3532_data *priv) led->num_leds = fwnode_property_count_u32(child, "led-sources"); if (led->num_leds > LM3532_MAX_LED_STRINGS) { - dev_err(&priv->client->dev, "To many LED string defined\n"); + dev_err(&priv->client->dev, "Too many LED string defined\n"); continue; } diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c index 4c2d0b3c6dad..a0d4b725c917 100644 --- a/drivers/leds/leds-max77650.c +++ b/drivers/leds/leds-max77650.c @@ -135,9 +135,16 @@ err_node_put: return rv; } +static const struct of_device_id max77650_led_of_match[] = { + { .compatible = "maxim,max77650-led" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_led_of_match); + static struct platform_driver max77650_led_driver = { .driver = { .name = "max77650-led", + .of_match_table = max77650_led_of_match, }, .probe = max77650_led_probe, }; diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c index db5af83f0cec..b6447c1721b4 100644 --- a/drivers/leds/leds-rb532.c +++ b/drivers/leds/leds-rb532.c @@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev, { if (brightness) set_latch_u5(LO_ULED, 0); - else set_latch_u5(0, LO_ULED); } diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c index 718729c89440..3abcafe46278 100644 --- a/drivers/leds/trigger/ledtrig-pattern.c +++ b/drivers/leds/trigger/ledtrig-pattern.c @@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void) module_init(pattern_trig_init); module_exit(pattern_trig_exit); -MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com"); -MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org"); +MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>"); +MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>"); MODULE_DESCRIPTION("LED Pattern trigger"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 9198c1b480d9..adf26a21fcd1 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -301,6 +301,7 @@ struct cached_dev { struct block_device *bdev; struct cache_sb sb; + struct cache_sb_disk *sb_disk; struct bio sb_bio; struct bio_vec sb_bv[1]; struct closure sb_write; @@ -403,6 +404,7 @@ enum alloc_reserve { struct cache { struct cache_set *set; struct cache_sb sb; + struct cache_sb_disk *sb_disk; struct bio sb_bio; struct bio_vec sb_bv[1]; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index cffcdc9feefb..4385303836d8 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1257,6 +1257,11 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, * Our temporary buffer is the same size as the btree node's * buffer, we can just swap buffers instead of doing a big * memcpy() + * + * Don't worry event 'out' is allocated from mempool, it can + * still be swapped here. Because state->pool is a page mempool + * creaated by by mempool_init_page_pool(), which allocates + * pages by alloc_pages() indeed. */ out->magic = b->set->data->magic; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 14d6c33b0957..fa872df4e770 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -734,34 +734,32 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, i = 0; btree_cache_used = c->btree_cache_used; - list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { + list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { if (nr <= 0) goto out; - if (++i > 3 && - !mca_reap(b, 0, false)) { + if (!mca_reap(b, 0, false)) { mca_data_free(b); rw_unlock(true, b); freed++; } nr--; + i++; } - for (; (nr--) && i < btree_cache_used; i++) { - if (list_empty(&c->btree_cache)) + list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { + if (nr <= 0 || i >= btree_cache_used) goto out; - b = list_first_entry(&c->btree_cache, struct btree, list); - list_rotate_left(&c->btree_cache); - - if (!b->accessed && - !mca_reap(b, 0, false)) { + if (!mca_reap(b, 0, false)) { mca_bucket_free(b); mca_data_free(b); rw_unlock(true, b); freed++; - } else - b->accessed = 0; + } + + nr--; + i++; } out: mutex_unlock(&c->bucket_lock); @@ -1069,7 +1067,6 @@ retry: BUG_ON(!b->written); b->parent = parent; - b->accessed = 1; for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { prefetch(b->keys.set[i].tree); @@ -1160,7 +1157,6 @@ retry: goto retry; } - b->accessed = 1; b->parent = parent; bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 76cfd121a486..f4dcca449391 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -121,8 +121,6 @@ struct btree { /* Key/pointer for this btree node */ BKEY_PADDED(key); - /* Single bit - set when accessed, cleared by shrinker */ - unsigned long accessed; unsigned long seq; struct rw_semaphore lock; struct cache_set *c; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index be2a2a201603..33ddc5269e8d 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -417,10 +417,14 @@ err: /* Journalling */ +#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask)) + static void btree_flush_write(struct cache_set *c) { struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR]; - unsigned int i, n; + unsigned int i, nr, ref_nr; + atomic_t *fifo_front_p, *now_fifo_front_p; + size_t mask; if (c->journal.btree_flushing) return; @@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c) c->journal.btree_flushing = true; spin_unlock(&c->journal.flush_write_lock); + /* get the oldest journal entry and check its refcount */ + spin_lock(&c->journal.lock); + fifo_front_p = &fifo_front(&c->journal.pin); + ref_nr = atomic_read(fifo_front_p); + if (ref_nr <= 0) { + /* + * do nothing if no btree node references + * the oldest journal entry + */ + spin_unlock(&c->journal.lock); + goto out; + } + spin_unlock(&c->journal.lock); + + mask = c->journal.pin.mask; + nr = 0; atomic_long_inc(&c->flush_write); memset(btree_nodes, 0, sizeof(btree_nodes)); - n = 0; mutex_lock(&c->bucket_lock); list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { + /* + * It is safe to get now_fifo_front_p without holding + * c->journal.lock here, because we don't need to know + * the exactly accurate value, just check whether the + * front pointer of c->journal.pin is changed. + */ + now_fifo_front_p = &fifo_front(&c->journal.pin); + /* + * If the oldest journal entry is reclaimed and front + * pointer of c->journal.pin changes, it is unnecessary + * to scan c->btree_cache anymore, just quit the loop and + * flush out what we have already. + */ + if (now_fifo_front_p != fifo_front_p) + break; + /* + * quit this loop if all matching btree nodes are + * scanned and record in btree_nodes[] already. + */ + ref_nr = atomic_read(fifo_front_p); + if (nr >= ref_nr) + break; + if (btree_node_journal_flush(b)) pr_err("BUG: flush_write bit should not be set here!"); @@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c) continue; } + /* + * Only select the btree node which exactly references + * the oldest journal entry. + * + * If the journal entry pointed by fifo_front_p is + * reclaimed in parallel, don't worry: + * - the list_for_each_xxx loop will quit when checking + * next now_fifo_front_p. + * - If there are matched nodes recorded in btree_nodes[], + * they are clean now (this is why and how the oldest + * journal entry can be reclaimed). These selected nodes + * will be ignored and skipped in the folowing for-loop. + */ + if (nr_to_fifo_front(btree_current_write(b)->journal, + fifo_front_p, + mask) != 0) { + mutex_unlock(&b->write_lock); + continue; + } + set_btree_node_journal_flush(b); mutex_unlock(&b->write_lock); - btree_nodes[n++] = b; - if (n == BTREE_FLUSH_NR) + btree_nodes[nr++] = b; + /* + * To avoid holding c->bucket_lock too long time, + * only scan for BTREE_FLUSH_NR matched btree nodes + * at most. If there are more btree nodes reference + * the oldest journal entry, try to flush them next + * time when btree_flush_write() is called. + */ + if (nr == BTREE_FLUSH_NR) break; } mutex_unlock(&c->bucket_lock); - for (i = 0; i < n; i++) { + for (i = 0; i < nr; i++) { b = btree_nodes[i]; if (!b) { pr_err("BUG: btree_nodes[%d] is NULL", i); @@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c) mutex_unlock(&b->write_lock); } +out: spin_lock(&c->journal.flush_write_lock); c->journal.btree_flushing = false; spin_unlock(&c->journal.flush_write_lock); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 77e9869345e7..3dea1d5acd5c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -15,7 +15,6 @@ #include "writeback.h" #include <linux/blkdev.h> -#include <linux/buffer_head.h> #include <linux/debugfs.h> #include <linux/genhd.h> #include <linux/idr.h> @@ -60,17 +59,18 @@ struct workqueue_struct *bch_journal_wq; /* Superblock */ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, - struct page **res) + struct cache_sb_disk **res) { const char *err; - struct cache_sb *s; - struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); + struct cache_sb_disk *s; + struct page *page; unsigned int i; - if (!bh) + page = read_cache_page_gfp(bdev->bd_inode->i_mapping, + SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); + if (IS_ERR(page)) return "IO error"; - - s = (struct cache_sb *) bh->b_data; + s = page_address(page) + offset_in_page(SB_OFFSET); sb->offset = le64_to_cpu(s->offset); sb->version = le64_to_cpu(s->version); @@ -188,12 +188,10 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, } sb->last_mount = (u32)ktime_get_real_seconds(); - err = NULL; - - get_page(bh->b_page); - *res = bh->b_page; + *res = s; + return NULL; err: - put_bh(bh); + put_page(page); return err; } @@ -207,15 +205,15 @@ static void write_bdev_super_endio(struct bio *bio) closure_put(&dc->sb_write); } -static void __write_super(struct cache_sb *sb, struct bio *bio) +static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, + struct bio *bio) { - struct cache_sb *out = page_address(bio_first_page_all(bio)); unsigned int i; + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; bio->bi_iter.bi_sector = SB_SECTOR; - bio->bi_iter.bi_size = SB_SIZE; - bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); - bch_bio_map(bio, NULL); + __bio_add_page(bio, virt_to_page(out), SB_SIZE, + offset_in_page(out)); out->offset = cpu_to_le64(sb->offset); out->version = cpu_to_le64(sb->version); @@ -257,14 +255,14 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) down(&dc->sb_write_mutex); closure_init(cl, parent); - bio_reset(bio); + bio_init(bio, dc->sb_bv, 1); bio_set_dev(bio, dc->bdev); bio->bi_end_io = write_bdev_super_endio; bio->bi_private = dc; closure_get(cl); /* I/O request sent to backing device */ - __write_super(&dc->sb, bio); + __write_super(&dc->sb, dc->sb_disk, bio); closure_return_with_destructor(cl, bch_write_bdev_super_unlock); } @@ -306,13 +304,13 @@ void bcache_write_super(struct cache_set *c) SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); - bio_reset(bio); + bio_init(bio, ca->sb_bv, 1); bio_set_dev(bio, ca->bdev); bio->bi_end_io = write_super_endio; bio->bi_private = ca; closure_get(cl); - __write_super(&ca->sb, bio); + __write_super(&ca->sb, ca->sb_disk, bio); } closure_return_with_destructor(cl, bcache_write_super_unlock); @@ -1275,6 +1273,9 @@ static void cached_dev_free(struct closure *cl) mutex_unlock(&bch_register_lock); + if (dc->sb_disk) + put_page(virt_to_page(dc->sb_disk)); + if (!IS_ERR_OR_NULL(dc->bdev)) blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); @@ -1350,7 +1351,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) /* Cached device - bcache superblock */ -static int register_bdev(struct cache_sb *sb, struct page *sb_page, +static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, struct block_device *bdev, struct cached_dev *dc) { @@ -1362,11 +1363,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page, memcpy(&dc->sb, sb, sizeof(struct cache_sb)); dc->bdev = bdev; dc->bdev->bd_holder = dc; - - bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); - bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; - get_page(sb_page); - + dc->sb_disk = sb_disk; if (cached_dev_init(dc, sb->block_size << 9)) goto err; @@ -2136,8 +2133,8 @@ void bch_cache_release(struct kobject *kobj) for (i = 0; i < RESERVE_NR; i++) free_fifo(&ca->free[i]); - if (ca->sb_bio.bi_inline_vecs[0].bv_page) - put_page(bio_first_page_all(&ca->sb_bio)); + if (ca->sb_disk) + put_page(virt_to_page(ca->sb_disk)); if (!IS_ERR_OR_NULL(ca->bdev)) blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); @@ -2259,7 +2256,7 @@ err_free: return ret; } -static int register_cache(struct cache_sb *sb, struct page *sb_page, +static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, struct block_device *bdev, struct cache *ca) { const char *err = NULL; /* must be set for any error case */ @@ -2269,10 +2266,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; - - bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); - bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; - get_page(sb_page); + ca->sb_disk = sb_disk; if (blk_queue_discard(bdev_get_queue(bdev))) ca->discard = CACHE_DISCARD(&ca->sb); @@ -2372,29 +2366,35 @@ static bool bch_is_open(struct block_device *bdev) static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, const char *buffer, size_t size) { - ssize_t ret = -EINVAL; - const char *err = "cannot allocate memory"; + const char *err; char *path = NULL; - struct cache_sb *sb = NULL; - struct block_device *bdev = NULL; - struct page *sb_page = NULL; + struct cache_sb *sb; + struct cache_sb_disk *sb_disk; + struct block_device *bdev; + ssize_t ret; + ret = -EBUSY; + err = "failed to reference bcache module"; if (!try_module_get(THIS_MODULE)) - return -EBUSY; + goto out; /* For latest state of bcache_is_reboot */ smp_mb(); + err = "bcache is in reboot"; if (bcache_is_reboot) - return -EBUSY; + goto out_module_put; + ret = -ENOMEM; + err = "cannot allocate memory"; path = kstrndup(buffer, size, GFP_KERNEL); if (!path) - goto err; + goto out_module_put; sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); if (!sb) - goto err; + goto out_free_path; + ret = -EINVAL; err = "failed to open device"; bdev = blkdev_get_by_path(strim(path), FMODE_READ|FMODE_WRITE|FMODE_EXCL, @@ -2411,57 +2411,63 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!IS_ERR(bdev)) bdput(bdev); if (attr == &ksysfs_register_quiet) - goto quiet_out; + goto done; } - goto err; + goto out_free_sb; } err = "failed to set blocksize"; if (set_blocksize(bdev, 4096)) - goto err_close; + goto out_blkdev_put; - err = read_super(sb, bdev, &sb_page); + err = read_super(sb, bdev, &sb_disk); if (err) - goto err_close; + goto out_blkdev_put; err = "failed to register device"; if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); if (!dc) - goto err_close; + goto out_put_sb_page; mutex_lock(&bch_register_lock); - ret = register_bdev(sb, sb_page, bdev, dc); + ret = register_bdev(sb, sb_disk, bdev, dc); mutex_unlock(&bch_register_lock); /* blkdev_put() will be called in cached_dev_free() */ if (ret < 0) - goto err; + goto out_free_sb; } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) - goto err_close; + goto out_put_sb_page; /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(sb, sb_page, bdev, ca) != 0) - goto err; + if (register_cache(sb, sb_disk, bdev, ca) != 0) + goto out_free_sb; } -quiet_out: - ret = size; -out: - if (sb_page) - put_page(sb_page); + +done: kfree(sb); kfree(path); module_put(THIS_MODULE); - return ret; + return size; -err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -err: - pr_info("error %s: %s", path, err); - goto out; +out_put_sb_page: + put_page(virt_to_page(sb_disk)); +out_blkdev_put: + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); +out_free_sb: + kfree(sb); +out_free_path: + kfree(path); + path = NULL; +out_module_put: + module_put(THIS_MODULE); +out: + pr_info("error %s: %s", path?path:"", err); + return ret; } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3c50c4e4da8f..963d3774c93e 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -17,7 +17,7 @@ #include <linux/dm-bufio.h> #define DM_MSG_PREFIX "persistent snapshot" -#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ +#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */ #define DM_PREFETCH_CHUNKS 12 diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 3ad18246fcb3..e230052c2107 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1019,8 +1019,6 @@ void md_bitmap_unplug(struct bitmap *bitmap) /* look at each page to see if there are any set bits that need to be * flushed out to disk */ for (i = 0; i < bitmap->storage.file_pages; i++) { - if (!bitmap->storage.filemap) - return; dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); need_write = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); @@ -1338,7 +1336,8 @@ void md_bitmap_daemon_work(struct mddev *mddev) BITMAP_PAGE_DIRTY)) /* bitmap_unplug will handle the rest */ break; - if (test_and_clear_page_attr(bitmap, j, + if (bitmap->storage.filemap && + test_and_clear_page_attr(bitmap, j, BITMAP_PAGE_NEEDWRITE)) { write_page(bitmap, bitmap->storage.filemap[j], 0); } @@ -1790,8 +1789,8 @@ void md_bitmap_destroy(struct mddev *mddev) return; md_bitmap_wait_behind_writes(mddev); - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; + if (!mddev->serialize_policy) + mddev_destroy_serial_pool(mddev, NULL, true); mutex_lock(&mddev->bitmap_info.mutex); spin_lock(&mddev->lock); @@ -1908,7 +1907,7 @@ int md_bitmap_load(struct mddev *mddev) goto out; rdev_for_each(rdev, mddev) - mddev_create_wb_pool(mddev, rdev, true); + mddev_create_serial_pool(mddev, rdev, true); if (mddev_is_clustered(mddev)) md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); @@ -2475,16 +2474,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len) if (backlog > COUNTER_MAX) return -EINVAL; mddev->bitmap_info.max_write_behind = backlog; - if (!backlog && mddev->wb_info_pool) { - /* wb_info_pool is not needed if backlog is zero */ - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; - } else if (backlog && !mddev->wb_info_pool) { - /* wb_info_pool is needed since backlog is not zero */ + if (!backlog && mddev->serial_info_pool) { + /* serial_info_pool is not needed if backlog is zero */ + if (!mddev->serialize_policy) + mddev_destroy_serial_pool(mddev, NULL, false); + } else if (backlog && !mddev->serial_info_pool) { + /* serial_info_pool is needed since backlog is not zero */ struct md_rdev *rdev; rdev_for_each(rdev, mddev) - mddev_create_wb_pool(mddev, rdev, false); + mddev_create_serial_pool(mddev, rdev, false); } if (old_mwb != backlog) md_bitmap_update_sb(mddev->bitmap); diff --git a/drivers/md/md.c b/drivers/md/md.c index 4e7c9f398bc6..4824d50526fa 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -125,74 +125,165 @@ static inline int speed_max(struct mddev *mddev) mddev->sync_speed_max : sysctl_speed_limit_max; } -static int rdev_init_wb(struct md_rdev *rdev) +static void rdev_uninit_serial(struct md_rdev *rdev) { - if (rdev->bdev->bd_queue->nr_hw_queues == 1) + if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) + return; + + kvfree(rdev->serial); + rdev->serial = NULL; +} + +static void rdevs_uninit_serial(struct mddev *mddev) +{ + struct md_rdev *rdev; + + rdev_for_each(rdev, mddev) + rdev_uninit_serial(rdev); +} + +static int rdev_init_serial(struct md_rdev *rdev) +{ + /* serial_nums equals with BARRIER_BUCKETS_NR */ + int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); + struct serial_in_rdev *serial = NULL; + + if (test_bit(CollisionCheck, &rdev->flags)) return 0; - spin_lock_init(&rdev->wb_list_lock); - INIT_LIST_HEAD(&rdev->wb_list); - init_waitqueue_head(&rdev->wb_io_wait); - set_bit(WBCollisionCheck, &rdev->flags); + serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, + GFP_KERNEL); + if (!serial) + return -ENOMEM; - return 1; + for (i = 0; i < serial_nums; i++) { + struct serial_in_rdev *serial_tmp = &serial[i]; + + spin_lock_init(&serial_tmp->serial_lock); + serial_tmp->serial_rb = RB_ROOT_CACHED; + init_waitqueue_head(&serial_tmp->serial_io_wait); + } + + rdev->serial = serial; + set_bit(CollisionCheck, &rdev->flags); + + return 0; +} + +static int rdevs_init_serial(struct mddev *mddev) +{ + struct md_rdev *rdev; + int ret = 0; + + rdev_for_each(rdev, mddev) { + ret = rdev_init_serial(rdev); + if (ret) + break; + } + + /* Free all resources if pool is not existed */ + if (ret && !mddev->serial_info_pool) + rdevs_uninit_serial(mddev); + + return ret; } /* - * Create wb_info_pool if rdev is the first multi-queue device flaged - * with writemostly, also write-behind mode is enabled. + * rdev needs to enable serial stuffs if it meets the conditions: + * 1. it is multi-queue device flaged with writemostly. + * 2. the write-behind mode is enabled. */ -void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, - bool is_suspend) +static int rdev_need_serial(struct md_rdev *rdev) { - if (mddev->bitmap_info.max_write_behind == 0) - return; + return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && + rdev->bdev->bd_queue->nr_hw_queues != 1 && + test_bit(WriteMostly, &rdev->flags)); +} + +/* + * Init resource for rdev(s), then create serial_info_pool if: + * 1. rdev is the first device which return true from rdev_enable_serial. + * 2. rdev is NULL, means we want to enable serialization for all rdevs. + */ +void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend) +{ + int ret = 0; - if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev)) + if (rdev && !rdev_need_serial(rdev) && + !test_bit(CollisionCheck, &rdev->flags)) return; - if (mddev->wb_info_pool == NULL) { + if (!is_suspend) + mddev_suspend(mddev); + + if (!rdev) + ret = rdevs_init_serial(mddev); + else + ret = rdev_init_serial(rdev); + if (ret) + goto abort; + + if (mddev->serial_info_pool == NULL) { unsigned int noio_flag; - if (!is_suspend) - mddev_suspend(mddev); noio_flag = memalloc_noio_save(); - mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS, - sizeof(struct wb_info)); + mddev->serial_info_pool = + mempool_create_kmalloc_pool(NR_SERIAL_INFOS, + sizeof(struct serial_info)); memalloc_noio_restore(noio_flag); - if (!mddev->wb_info_pool) - pr_err("can't alloc memory pool for writemostly\n"); - if (!is_suspend) - mddev_resume(mddev); + if (!mddev->serial_info_pool) { + rdevs_uninit_serial(mddev); + pr_err("can't alloc memory pool for serialization\n"); + } } + +abort: + if (!is_suspend) + mddev_resume(mddev); } -EXPORT_SYMBOL_GPL(mddev_create_wb_pool); /* - * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck. + * Free resource from rdev(s), and destroy serial_info_pool under conditions: + * 1. rdev is the last device flaged with CollisionCheck. + * 2. when bitmap is destroyed while policy is not enabled. + * 3. for disable policy, the pool is destroyed only when no rdev needs it. */ -static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev) +void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend) { - if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags)) + if (rdev && !test_bit(CollisionCheck, &rdev->flags)) return; - if (mddev->wb_info_pool) { + if (mddev->serial_info_pool) { struct md_rdev *temp; - int num = 0; + int num = 0; /* used to track if other rdevs need the pool */ - /* - * Check if other rdevs need wb_info_pool. - */ - rdev_for_each(temp, mddev) - if (temp != rdev && - test_bit(WBCollisionCheck, &temp->flags)) + if (!is_suspend) + mddev_suspend(mddev); + rdev_for_each(temp, mddev) { + if (!rdev) { + if (!mddev->serialize_policy || + !rdev_need_serial(temp)) + rdev_uninit_serial(temp); + else + num++; + } else if (temp != rdev && + test_bit(CollisionCheck, &temp->flags)) num++; - if (!num) { - mddev_suspend(rdev->mddev); - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; - mddev_resume(rdev->mddev); } + + if (rdev) + rdev_uninit_serial(rdev); + + if (num) + pr_info("The mempool could be used by other devices\n"); + else { + mempool_destroy(mddev->serial_info_pool); + mddev->serial_info_pool = NULL; + } + if (!is_suspend) + mddev_resume(mddev); } } @@ -2337,7 +2428,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) pr_debug("md: bind<%s>\n", b); if (mddev->raid_disks) - mddev_create_wb_pool(mddev, rdev, false); + mddev_create_serial_pool(mddev, rdev, false); if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) goto fail; @@ -2375,7 +2466,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev) bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); list_del_rcu(&rdev->same_set); pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); - mddev_destroy_wb_pool(rdev->mddev, rdev); + mddev_destroy_serial_pool(rdev->mddev, rdev, false); rdev->mddev = NULL; sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); @@ -2888,10 +2979,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) } } else if (cmd_match(buf, "writemostly")) { set_bit(WriteMostly, &rdev->flags); - mddev_create_wb_pool(rdev->mddev, rdev, false); + mddev_create_serial_pool(rdev->mddev, rdev, false); err = 0; } else if (cmd_match(buf, "-writemostly")) { - mddev_destroy_wb_pool(rdev->mddev, rdev); + mddev_destroy_serial_pool(rdev->mddev, rdev, false); clear_bit(WriteMostly, &rdev->flags); err = 0; } else if (cmd_match(buf, "blocked")) { @@ -5277,6 +5368,57 @@ static struct md_sysfs_entry md_fail_last_dev = __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, fail_last_dev_store); +static ssize_t serialize_policy_show(struct mddev *mddev, char *page) +{ + if (mddev->pers == NULL || (mddev->pers->level != 1)) + return sprintf(page, "n/a\n"); + else + return sprintf(page, "%d\n", mddev->serialize_policy); +} + +/* + * Setting serialize_policy to true to enforce write IO is not reordered + * for raid1. + */ +static ssize_t +serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) +{ + int err; + bool value; + + err = kstrtobool(buf, &value); + if (err) + return err; + + if (value == mddev->serialize_policy) + return len; + + err = mddev_lock(mddev); + if (err) + return err; + if (mddev->pers == NULL || (mddev->pers->level != 1)) { + pr_err("md: serialize_policy is only effective for raid1\n"); + err = -EINVAL; + goto unlock; + } + + mddev_suspend(mddev); + if (value) + mddev_create_serial_pool(mddev, NULL, true); + else + mddev_destroy_serial_pool(mddev, NULL, true); + mddev->serialize_policy = value; + mddev_resume(mddev); +unlock: + mddev_unlock(mddev); + return err ?: len; +} + +static struct md_sysfs_entry md_serialize_policy = +__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, + serialize_policy_store); + + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_layout.attr, @@ -5294,6 +5436,7 @@ static struct attribute *md_default_attrs[] = { &max_corr_read_errors.attr, &md_consistency_policy.attr, &md_fail_last_dev.attr, + &md_serialize_policy.attr, NULL, }; @@ -5769,18 +5912,18 @@ int md_run(struct mddev *mddev) goto bitmap_abort; if (mddev->bitmap_info.max_write_behind > 0) { - bool creat_pool = false; + bool create_pool = false; rdev_for_each(rdev, mddev) { if (test_bit(WriteMostly, &rdev->flags) && - rdev_init_wb(rdev)) - creat_pool = true; - } - if (creat_pool && mddev->wb_info_pool == NULL) { - mddev->wb_info_pool = - mempool_create_kmalloc_pool(NR_WB_INFOS, - sizeof(struct wb_info)); - if (!mddev->wb_info_pool) { + rdev_init_serial(rdev)) + create_pool = true; + } + if (create_pool && mddev->serial_info_pool == NULL) { + mddev->serial_info_pool = + mempool_create_kmalloc_pool(NR_SERIAL_INFOS, + sizeof(struct serial_info)); + if (!mddev->serial_info_pool) { err = -ENOMEM; goto bitmap_abort; } @@ -6025,8 +6168,9 @@ static void __md_stop_writes(struct mddev *mddev) mddev->in_sync = 1; md_update_sb(mddev, 1); } - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; + /* disable policy to guarantee rdevs free resources for serialization */ + mddev->serialize_policy = 0; + mddev_destroy_serial_pool(mddev, NULL, true); } void md_stop_writes(struct mddev *mddev) diff --git a/drivers/md/md.h b/drivers/md/md.h index 5f86f8adb0a4..acd681939112 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -32,6 +32,16 @@ * be retried. */ #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) + +/* + * The struct embedded in rdev is used to serialize IO. + */ +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + /* * MD's 'extended' device */ @@ -110,12 +120,7 @@ struct md_rdev { * in superblock. */ - /* - * The members for check collision of write behind IOs. - */ - struct list_head wb_list; - spinlock_t wb_list_lock; - wait_queue_head_t wb_io_wait; + struct serial_in_rdev *serial; /* used for raid1 io serialization */ struct work_struct del_work; /* used for delayed sysfs removal */ @@ -201,9 +206,9 @@ enum flag_bits { * it didn't fail, so don't use FailFast * any more for metadata */ - WBCollisionCheck, /* - * multiqueue device should check if there - * is collision between write behind bios. + CollisionCheck, /* + * check if there is collision between raid1 + * serial bios. */ }; @@ -263,12 +268,13 @@ enum mddev_sb_flags { MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ }; -#define NR_WB_INFOS 8 -/* record current range of write behind IOs */ -struct wb_info { - sector_t lo; - sector_t hi; - struct list_head list; +#define NR_SERIAL_INFOS 8 +/* record current range of serialize IOs */ +struct serial_info { + struct rb_node node; + sector_t start; /* start sector of rb node */ + sector_t last; /* end sector of rb node */ + sector_t _subtree_last; /* highest sector in subtree of rb node */ }; struct mddev { @@ -487,13 +493,14 @@ struct mddev { */ struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ - mempool_t *wb_info_pool; + mempool_t *serial_info_pool; void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; unsigned int good_device_nr; /* good device num within cluster raid */ bool has_superblocks:1; bool fail_last_dev:1; + bool serialize_policy:1; }; enum recovery_flags { @@ -737,8 +744,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); extern void md_kick_rdev_from_array(struct md_rdev * rdev); -extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, - bool is_suspend); +extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend); +extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index b7c20979bd19..322386ff5d22 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) char b[BDEVNAME_SIZE]; char b2[BDEVNAME_SIZE]; struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); - unsigned short blksize = 512; + unsigned blksize = 512; *private_conf = ERR_PTR(-ENOMEM); if (!conf) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 201fd8aec59a..cd810e195086 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> +#include <linux/interval_tree_generic.h> #include <trace/events/block.h> @@ -50,55 +51,71 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr); #include "raid1-10.c" -static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) +INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last, + START, LAST, static inline, raid1_rb); + +static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio, + struct serial_info *si, int idx) { - struct wb_info *wi, *temp_wi; unsigned long flags; int ret = 0; - struct mddev *mddev = rdev->mddev; - - wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO); - - spin_lock_irqsave(&rdev->wb_list_lock, flags); - list_for_each_entry(temp_wi, &rdev->wb_list, list) { - /* collision happened */ - if (hi > temp_wi->lo && lo < temp_wi->hi) { - ret = -EBUSY; - break; - } + sector_t lo = r1_bio->sector; + sector_t hi = lo + r1_bio->sectors; + struct serial_in_rdev *serial = &rdev->serial[idx]; + + spin_lock_irqsave(&serial->serial_lock, flags); + /* collision happened */ + if (raid1_rb_iter_first(&serial->serial_rb, lo, hi)) + ret = -EBUSY; + else { + si->start = lo; + si->last = hi; + raid1_rb_insert(si, &serial->serial_rb); } - - if (!ret) { - wi->lo = lo; - wi->hi = hi; - list_add(&wi->list, &rdev->wb_list); - } else - mempool_free(wi, mddev->wb_info_pool); - spin_unlock_irqrestore(&rdev->wb_list_lock, flags); + spin_unlock_irqrestore(&serial->serial_lock, flags); return ret; } -static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) +static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio) +{ + struct mddev *mddev = rdev->mddev; + struct serial_info *si; + int idx = sector_to_idx(r1_bio->sector); + struct serial_in_rdev *serial = &rdev->serial[idx]; + + if (WARN_ON(!mddev->serial_info_pool)) + return; + si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); + wait_event(serial->serial_io_wait, + check_and_add_serial(rdev, r1_bio, si, idx) == 0); +} + +static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi) { - struct wb_info *wi; + struct serial_info *si; unsigned long flags; int found = 0; struct mddev *mddev = rdev->mddev; - - spin_lock_irqsave(&rdev->wb_list_lock, flags); - list_for_each_entry(wi, &rdev->wb_list, list) - if (hi == wi->hi && lo == wi->lo) { - list_del(&wi->list); - mempool_free(wi, mddev->wb_info_pool); + int idx = sector_to_idx(lo); + struct serial_in_rdev *serial = &rdev->serial[idx]; + + spin_lock_irqsave(&serial->serial_lock, flags); + for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi); + si; si = raid1_rb_iter_next(si, lo, hi)) { + if (si->start == lo && si->last == hi) { + raid1_rb_remove(si, &serial->serial_rb); + mempool_free(si, mddev->serial_info_pool); found = 1; break; } - + } if (!found) - WARN(1, "The write behind IO is not recorded\n"); - spin_unlock_irqrestore(&rdev->wb_list_lock, flags); - wake_up(&rdev->wb_io_wait); + WARN(1, "The write IO is not recorded for serialization\n"); + spin_unlock_irqrestore(&serial->serial_lock, flags); + wake_up(&serial->serial_io_wait); } /* @@ -430,6 +447,8 @@ static void raid1_end_write_request(struct bio *bio) int mirror = find_bio_disk(r1_bio, bio); struct md_rdev *rdev = conf->mirrors[mirror].rdev; bool discard_error; + sector_t lo = r1_bio->sector; + sector_t hi = r1_bio->sector + r1_bio->sectors; discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; @@ -499,12 +518,8 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { - if (test_bit(WBCollisionCheck, &rdev->flags)) { - sector_t lo = r1_bio->sector; - sector_t hi = r1_bio->sector + r1_bio->sectors; - - remove_wb(rdev, lo, hi); - } + if (test_bit(CollisionCheck, &rdev->flags)) + remove_serial(rdev, lo, hi); if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); @@ -527,7 +542,8 @@ static void raid1_end_write_request(struct bio *bio) call_bio_endio(r1_bio); } } - } + } else if (rdev->mddev->serialize_policy) + remove_serial(rdev, lo, hi); if (r1_bio->bios[mirror] == NULL) rdev_dec_pending(rdev, conf->mddev); @@ -1479,6 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, for (i = 0; i < disks; i++) { struct bio *mbio = NULL; + struct md_rdev *rdev = conf->mirrors[i].rdev; if (!r1_bio->bios[i]) continue; @@ -1506,18 +1523,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); if (r1_bio->behind_master_bio) { - struct md_rdev *rdev = conf->mirrors[i].rdev; - - if (test_bit(WBCollisionCheck, &rdev->flags)) { - sector_t lo = r1_bio->sector; - sector_t hi = r1_bio->sector + r1_bio->sectors; - - wait_event(rdev->wb_io_wait, - check_and_add_wb(rdev, lo, hi) == 0); - } + if (test_bit(CollisionCheck, &rdev->flags)) + wait_for_serialization(rdev, r1_bio); if (test_bit(WriteMostly, &rdev->flags)) atomic_inc(&r1_bio->behind_remaining); - } + } else if (mddev->serialize_policy) + wait_for_serialization(rdev, r1_bio); r1_bio->bios[i] = mbio; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d4d3b67ffbba..ba00e9877f02 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6598,7 +6598,6 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page) static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, - int *worker_cnt_per_group, struct r5worker_group **worker_groups); static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) @@ -6607,7 +6606,7 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) unsigned int new; int err; struct r5worker_group *new_groups, *old_groups; - int group_cnt, worker_cnt_per_group; + int group_cnt; if (len >= PAGE_SIZE) return -EINVAL; @@ -6630,13 +6629,11 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) if (old_groups) flush_workqueue(raid5_wq); - err = alloc_thread_groups(conf, new, - &group_cnt, &worker_cnt_per_group, - &new_groups); + err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); if (!err) { spin_lock_irq(&conf->device_lock); conf->group_cnt = group_cnt; - conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_cnt_per_group = new; conf->worker_groups = new_groups; spin_unlock_irq(&conf->device_lock); @@ -6672,16 +6669,13 @@ static struct attribute_group raid5_attrs_group = { .attrs = raid5_attrs, }; -static int alloc_thread_groups(struct r5conf *conf, int cnt, - int *group_cnt, - int *worker_cnt_per_group, +static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, struct r5worker_group **worker_groups) { int i, j, k; ssize_t size; struct r5worker *workers; - *worker_cnt_per_group = cnt; if (cnt == 0) { *group_cnt = 0; *worker_groups = NULL; @@ -6882,7 +6876,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) struct disk_info *disk; char pers_name[6]; int i; - int group_cnt, worker_cnt_per_group; + int group_cnt; struct r5worker_group *new_group; int ret; @@ -6928,10 +6922,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) for (i = 0; i < PENDING_IO_MAX; i++) list_add(&conf->pending_data[i].sibling, &conf->free_list); /* Don't enable multi-threading by default*/ - if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, - &new_group)) { + if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { conf->group_cnt = group_cnt; - conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_cnt_per_group = 0; conf->worker_groups = new_group; } else goto abort; diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index f9ac22413000..1074b882c57c 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -100,19 +100,19 @@ struct buflist { * Function prototypes. Called from OS entry point mptctl_ioctl. * arg contents specific to function. */ -static int mptctl_fw_download(unsigned long arg); -static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); -static int mptctl_gettargetinfo(unsigned long arg); -static int mptctl_readtest(unsigned long arg); -static int mptctl_mpt_command(unsigned long arg); -static int mptctl_eventquery(unsigned long arg); -static int mptctl_eventenable(unsigned long arg); -static int mptctl_eventreport(unsigned long arg); -static int mptctl_replace_fw(unsigned long arg); - -static int mptctl_do_reset(unsigned long arg); -static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); -static int mptctl_hp_targetinfo(unsigned long arg); +static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg); + +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg); static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); static void mptctl_remove(struct pci_dev *); @@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); /* * Private function calls. */ -static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); -static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); +static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr); +static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen); static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, @@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { - return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); + return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { - return mptctl_gettargetinfo(arg); + return mptctl_gettargetinfo(iocp, arg); } else if (cmd == MPTTEST) { - return mptctl_readtest(arg); + return mptctl_readtest(iocp, arg); } else if (cmd == MPTEVENTQUERY) { - return mptctl_eventquery(arg); + return mptctl_eventquery(iocp, arg); } else if (cmd == MPTEVENTENABLE) { - return mptctl_eventenable(arg); + return mptctl_eventenable(iocp, arg); } else if (cmd == MPTEVENTREPORT) { - return mptctl_eventreport(arg); + return mptctl_eventreport(iocp, arg); } else if (cmd == MPTFWREPLACE) { - return mptctl_replace_fw(arg); + return mptctl_replace_fw(iocp, arg); } /* All of these commands require an interrupt or @@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; if (cmd == MPTFWDOWNLOAD) - ret = mptctl_fw_download(arg); + ret = mptctl_fw_download(iocp, arg); else if (cmd == MPTCOMMAND) - ret = mptctl_mpt_command(arg); + ret = mptctl_mpt_command(iocp, arg); else if (cmd == MPTHARDRESET) - ret = mptctl_do_reset(arg); + ret = mptctl_do_reset(iocp, arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) - ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); + ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) - ret = mptctl_hp_targetinfo(arg); + ret = mptctl_hp_targetinfo(iocp, arg); else ret = -EINVAL; @@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } -static int mptctl_do_reset(unsigned long arg) +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; - MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " @@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg) return -EFAULT; } - if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", - __FILE__, __LINE__, krinfo.hdr.iocnum); - return -ENODEV; /* (-6) No such device or address */ - } - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); @@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_fw_download(unsigned long arg) +mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; @@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg) return -EFAULT; } - return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); + return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) +mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; - MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; @@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; - if (mpt_verify_adapter(ioc, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", - ioc); - return -ENODEV; /* (-6) No such device or address */ - } else { - - /* Valid device. Get a message frame and construct the FW download message. - */ - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) - return -EAGAIN; - } + /* Valid device. Get a message frame and construct the FW download message. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) + return -EAGAIN; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); @@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", - iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; @@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE * -ENODEV if no such device/adapter */ static int -mptctl_getiocinfo (unsigned long arg, unsigned int data_size) +mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; - int iocnum; unsigned int port; int cim_rev; struct scsi_device *sdev; @@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) return PTR_ERR(karg); } - if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - kfree(karg); - return -ENODEV; - } - /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " @@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) * -ENODEV if no such device/adapter */ static int -mptctl_gettargetinfo (unsigned long arg) +mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; - MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; - int iocnum; int numDevices = 0; int lun; int maxWordsLeft; @@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes @@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_readtest (unsigned long arg) +mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " @@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling @@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_eventquery (unsigned long arg) +mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " @@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; @@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventenable (unsigned long arg) +mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " @@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { @@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventreport (unsigned long arg) +mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; - MPT_ADAPTER *ioc; - int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { @@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); @@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_replace_fw (unsigned long arg) +mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; struct mpt_ioctl_replace_fw karg; - MPT_ADAPTER *ioc; - int iocnum; int newFwSize; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { @@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", ioc->name)); /* If caching FW, Free the old FW image @@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg) * -ENOMEM if memory allocation error */ static int -mptctl_mpt_command (unsigned long arg) +mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; - MPT_ADAPTER *ioc; - int iocnum; int rc; @@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - - rc = mptctl_do_mpt_command (karg, &uarg->MF); + rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF); return rc; } @@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg) * -EPERM if SCSI I/O and target is untagged */ static int -mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) +mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr) { - MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; @@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ - int iocnum, flagsLength; + int flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; @@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); @@ -2418,17 +2321,15 @@ done_free_mem: * -ENOMEM if memory allocation error */ static int -mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) +mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; - int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; @@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); @@ -2659,15 +2554,13 @@ retry_wait: * -ENOMEM if memory allocation error */ static int -mptctl_hp_targetinfo(unsigned long arg) +mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; - MPT_ADAPTER *ioc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; - int iocnum; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; @@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } if (karg.hdr.id >= MPT_MAX_FC_DEVICES) return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", @@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); - ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); @@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, /* Pass new structure to do_mpt_command */ - ret = mptctl_do_mpt_command (karg, &uarg->MF); + ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 6d27ccfe0680..3c2d405bc79b 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) cdev = &edev->component[i]; if (cdev->dev == dev) { enclosure_remove_links(cdev); - device_del(&cdev->cdev); put_device(dev); cdev->dev = NULL; - return device_add(&cdev->cdev); + return 0; } } return -ENODEV; diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index a4fdad04809a..de87693cf557 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void) void lkdtm_UNSET_SMEP(void) { -#ifdef CONFIG_X86_64 +#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) #define MOV_CR4_DEPTH 64 void (*direct_write_cr4)(unsigned long val); unsigned char *insn; @@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void) native_write_cr4(cr4); } #else - pr_err("FAIL: this test is x86_64-only\n"); + pr_err("XFAIL: this test is x86_64-only\n"); #endif } -#ifdef CONFIG_X86_32 void lkdtm_DOUBLE_FAULT(void) { +#ifdef CONFIG_X86_32 /* * Trigger #DF by setting the stack limit to zero. This clobbers * a GDT TLS slot, which is okay because the current task will die @@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void) asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); - panic("tried to double fault but didn't die\n"); -} + pr_err("FAIL: tried to double fault but didn't die\n"); +#else + pr_err("XFAIL: this test is ia32-only\n"); #endif +} diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 95b41c0891d0..663d87924e5e 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) card->erase_arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, - 0); + card->ext_csd.generic_cmd6_time); } if (!err) err = mmc_erase(card, from, nr, card->erase_arg); @@ -1149,7 +1149,7 @@ retry: arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } @@ -1167,7 +1167,7 @@ retry: err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index abf8f5eb0a1c..aa54d359dab7 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work) } for (i = 0; i < ARRAY_SIZE(freqs); i++) { - if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) + unsigned int freq = freqs[i]; + if (freq > host->f_max) { + if (i + 1 < ARRAY_SIZE(freqs)) + continue; + freq = host->f_max; + } + if (!mmc_rescan_try_freq(host, max(freq, host->f_min))) break; if (freqs[i] <= host->f_min) break; @@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work) void mmc_start_host(struct mmc_host *host) { - host->f_init = max(freqs[0], host->f_min); + host->f_init = max(min(freqs[0], host->f_max), host->f_min); host->rescan_disable = 0; host->ios.power_mode = MMC_POWER_UNDEFINED; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 105b7a7c0251..c8768726d925 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host) struct device *dev = host->parent; u32 bus_width, drv_type, cd_debounce_delay_ms; int ret; - bool cd_cap_invert, cd_gpio_invert = false; - bool ro_cap_invert, ro_gpio_invert = false; if (!dev || !dev_fwnode(dev)) return 0; @@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host) */ /* Parse Card Detection */ + if (device_property_read_bool(dev, "non-removable")) { host->caps |= MMC_CAP_NONREMOVABLE; } else { - cd_cap_invert = device_property_read_bool(dev, "cd-inverted"); + if (device_property_read_bool(dev, "cd-inverted")) + host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; if (device_property_read_u32(dev, "cd-debounce-delay-ms", &cd_debounce_delay_ms)) @@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host) host->caps |= MMC_CAP_NEEDS_POLL; ret = mmc_gpiod_request_cd(host, "cd", 0, false, - cd_debounce_delay_ms * 1000, - &cd_gpio_invert); + cd_debounce_delay_ms * 1000); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) return ret; - - /* - * There are two ways to flag that the CD line is inverted: - * through the cd-inverted flag and by the GPIO line itself - * being inverted from the GPIO subsystem. This is a leftover - * from the times when the GPIO subsystem did not make it - * possible to flag a line as inverted. - * - * If the capability on the host AND the GPIO line are - * both inverted, the end result is that the CD line is - * not inverted. - */ - if (cd_cap_invert ^ cd_gpio_invert) - host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; } /* Parse Write Protection */ - ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); + if (device_property_read_bool(dev, "wp-inverted")) + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(host, "wp", 0, 0); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) @@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "disable-wp")) host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; - /* See the comment on CD inversion above */ - if (ro_cap_invert ^ ro_gpio_invert) - host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - if (device_property_read_bool(dev, "cap-sd-highspeed")) host->caps |= MMC_CAP_SD_HIGHSPEED; if (device_property_read_bool(dev, "cap-mmc-highspeed")) diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 09113b9ad679..da425ee2d9bf 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -19,7 +19,9 @@ #include "host.h" #include "mmc_ops.h" -#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ +#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ +#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ static const u8 tuning_blk_pattern_4bit[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, @@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, bool expired = false; bool busy = false; - /* We have an unspecified cmd timeout, use the fallback value. */ - if (!timeout_ms) - timeout_ms = MMC_OPS_TIMEOUT_MS; - /* * In cases when not allowed to poll by using CMD13 or because we aren't * capable of polling by using ->card_busy(), then rely on waiting the @@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, mmc_retune_hold(host); + if (!timeout_ms) { + pr_warn("%s: unspecified timeout for CMD6 - use generic\n", + mmc_hostname(host)); + timeout_ms = card->ext_csd.generic_cmd6_time; + } + /* - * If the cmd timeout and the max_busy_timeout of the host are both - * specified, let's validate them. A failure means we need to prevent - * the host from doing hw busy detection, which is done by converting - * to a R1 response instead of a R1B. + * If the max_busy_timeout of the host is specified, make sure it's + * enough to fit the used timeout_ms. In case it's not, let's instruct + * the host to avoid HW busy detection, by converting to a R1 response + * instead of a R1B. */ - if (timeout_ms && host->max_busy_timeout && - (timeout_ms > host->max_busy_timeout)) + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = MMC_SWITCH; @@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, cmd.flags = MMC_CMD_AC; if (use_r1b_resp) { cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; - /* - * A busy_timeout of zero means the host can decide to use - * whatever value it finds suitable. - */ cmd.busy_timeout = timeout_ms; } else { cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; @@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card) * urgent levels by using an asynchronous background task, when idle. */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS); + EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); if (err) pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); @@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card) (card->ext_csd.cache_size > 0) && (card->ext_csd.cache_ctrl & 1)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_FLUSH_CACHE, 1, 0); + EXT_CSD_FLUSH_CACHE, 1, + MMC_CACHE_FLUSH_TIMEOUT_MS); if (err) pr_err("%s: cache flush error %d\n", mmc_hostname(card->host), err); diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index da2596c5fa28..05e907451df9 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -19,7 +19,6 @@ struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; - bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; char *cd_label; @@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host) return -ENOSYS; cansleep = gpiod_cansleep(ctx->cd_gpio); - if (ctx->override_cd_active_level) { - int value = cansleep ? - gpiod_get_raw_value_cansleep(ctx->cd_gpio) : - gpiod_get_raw_value(ctx->cd_gpio); - return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); - } - return cansleep ? gpiod_get_value_cansleep(ctx->cd_gpio) : gpiod_get_value(ctx->cd_gpio); @@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); * @idx: index of the GPIO to obtain in the consumer * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, set - * to NULL to ignore * * Note that this must be called prior to mmc_add_host() * otherwise the caller must also call mmc_gpiod_request_cd_irq(). @@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); */ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce, bool *gpio_invert) + unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, ctx->cd_debounce_delay_ms = debounce / 1000; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + /* override forces default (active-low) polarity ... */ + if (override_active_level && !gpiod_is_active_low(desc)) + gpiod_toggle_active_low(desc); + + /* ... or active-high */ + if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); - ctx->override_cd_active_level = override_active_level; ctx->cd_gpio = desc; return 0; @@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, - * set to NULL to ignore * * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, - unsigned int debounce, bool *gpio_invert) + unsigned int idx, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, return ret; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); ctx->ro_gpio = desc; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index d06b2dfe3c95..3a5089f0332c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -501,6 +501,7 @@ config MMC_SDHCI_MSM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Secure Digital Host Controller Interface (SDHCI) support present in Qualcomm SOCs. The controller supports @@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB tristate "Broadcom SDIO/SD/MMC support" depends on ARCH_BRCMSTB || BMIPS_GENERIC depends on MMC_SDHCI_PLTFM + select MMC_CQHCI default y help This selects support for the SDIO/SD/MMC Host Controller on @@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP depends on MMC_SDHCI_PLTFM && OF select THERMAL imply TI_SOC_THERMAL + select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's DRA7 SOCs. The controller supports @@ -1040,3 +1043,6 @@ config MMC_OWL help This selects support for the SD/MMC Host Controller on Actions Semi Owl SoCs. + +config MMC_SDHCI_EXTERNAL_DMA + bool diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 6f065bb5c55a..aeaaa5314924 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev) { struct atmel_mci *host = dev_get_drvdata(dev); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); return clk_prepare_enable(host->mck); } diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bc8aeb47a7b4..8823680ca42c 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev) goto out2; } - r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!r) { - dev_err(&pdev->dev, "no IRQ defined\n"); + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) goto out3; - } - host->irq = r->start; mmc->ops = &au1xmmc_ops; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 99f61fd2a658..c3d949847cbd 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev) host->dma_chan = NULL; host->dma_desc = NULL; - host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx"); + host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx"); + if (IS_ERR(host->dma_chan_rxtx)) { + ret = PTR_ERR(host->dma_chan_rxtx); + host->dma_chan_rxtx = NULL; + + if (ret == -EPROBE_DEFER) + goto err; + + /* Ignore errors to fall back to PIO mode */ + } + clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index eee08d81b242..76013bbbcff3 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev, return ret; host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); - if (!host->base) - return -EINVAL; + if (!host->base) { + ret = -EINVAL; + goto error; + } /* On ThunderX these are identical */ host->dma_base = host->base; @@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev, host->reg_off_dma = 0x160; host->clk = devm_clk_get(dev, NULL); - if (IS_ERR(host->clk)) - return PTR_ERR(host->clk); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto error; + } ret = clk_prepare_enable(host->clk); if (ret) - return ret; + goto error; host->sys_freq = clk_get_rate(host->clk); spin_lock_init(&host->irq_handler_lock); @@ -157,6 +161,7 @@ error: } } clk_disable_unprepare(host->clk); + pci_release_regions(pdev); return ret; } @@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev) writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); clk_disable_unprepare(host->clk); + pci_release_regions(pdev); } static const struct pci_device_id thunder_mmc_id_table[] = { diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index ebfaeb33bc8c..f01fecd75833 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc) mmc->caps |= pdata->caps; /* Register a cd gpio, if there is not one, enable polling */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; else if (ret) mmc->caps |= MMC_CAP_NEEDS_POLL; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index fc9d4d000f97..bc5278ab5707 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host) if (!host->dms) return -ENOMEM; - host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); - if (!host->dms->ch) { + host->dms->ch = dma_request_chan(host->dev, "rx-tx"); + if (IS_ERR(host->dms->ch)) { + int ret = PTR_ERR(host->dms->ch); + dev_err(host->dev, "Failed to get external DMA channel.\n"); kfree(host->dms); host->dms = NULL; - return -ENXIO; + return ret; } return 0; diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 78383f60a3dc..fbae87d1f017 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev) static int jz4740_mmc_resume(struct device *dev) { - return pinctrl_pm_select_default_state(dev); + return pinctrl_select_default_state(dev); } static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e712315c7e8d..35400cf2a2e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -161,7 +161,6 @@ struct meson_host { bool dram_access_quirk; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_clk_gate; unsigned int bounce_buf_size; @@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host) u32 cfg; if (host->pins_clk_gate) - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(host->dev); /* Make sure the clock is not stopped in the controller */ cfg = readl(host->regs + SD_EMMC_CFG); @@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - ret = PTR_ERR(host->pins_default); - goto free_host; - } - host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, "clk-gate"); if (IS_ERR(host->pins_clk_gate)) { diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index ba9a63db73da..8b038e7b2cd3 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) struct platform_device *slot_pdev; struct mmc_host *mmc; struct meson_mx_mmc_host *host; - struct resource *res; int ret, irq; u32 conf; @@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(host->controller_dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto error_free_mmc; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 74c6cfbf9172..951f76dc1ddd 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host) * SPI protocol. Another is that when chipselect is released while * the card returns BUSY status, the clock must issue several cycles * with chipselect high before the card will stop driving its output. + * + * SPI_CS_HIGH means "asserted" here. In some cases like when using + * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically + * inverted by gpiolib, so if we want to ascertain to drive it high + * we should toggle the default with an XOR as we do here. */ - host->spi->mode |= SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Just warn; most cards work without it. */ dev_warn(&host->spi->dev, "can't change chip-select polarity\n"); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; } else { mmc_spi_readbytes(host, 18); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Wot, we can't get the same setup we had before? */ dev_err(&host->spi->dev, @@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi) * Index 0 is card detect * Old boardfiles were specifying 1 ms as debounce */ - status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL); + status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) { @@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi) mmc_detect_change(mmc, 0); /* Index 1 is write protect/read only */ - status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); + status = mmc_gpiod_request_ro(mmc, NULL, 1, 0); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 40e72c30ea84..e9ffce8d41ea 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = { .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = { .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = { .datacnt_useless = true, .datalength_bits = 25, .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, .stm32_idmabsize_mask = GENMASK(12, 5), .busy_timeout = true, .busy_detect = true, @@ -284,6 +289,7 @@ static struct variant_data variant_qcom = { .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, .pwrreg_powerup = MCI_PWR_UP, .f_max = 208000000, .explicit_mclk_control = true, @@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host) static int mmci_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; + if (!data) return 0; - - if (!is_power_of_2(data->blksz)) { + if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) { dev_err(mmc_dev(host->mmc), "unsupported block size (%d bytes)\n", data->blksz); return -EINVAL; @@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", data->sg_len, data->blksz, data->blocks, data->flags); - host->ops->dma_start(host, &datactrl); + ret = host->ops->dma_start(host, &datactrl); + if (ret) + return ret; /* Trigger the DMA transfer */ mmci_write_datactrlreg(host, datactrl); @@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host) host->dma_priv = dmae; - dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "rx"); - dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "tx"); + dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx"); + if (IS_ERR(dmae->rx_channel)) { + int ret = PTR_ERR(dmae->rx_channel); + dmae->rx_channel = NULL; + return ret; + } + + dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx"); + if (IS_ERR(dmae->tx_channel)) { + if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER) + dev_warn(mmc_dev(host->mmc), + "Deferred probe for TX channel ignored\n"); + dmae->tx_channel = NULL; + } /* * If only an RX channel is specified, the driver will @@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, if (data->blksz * data->blocks <= variant->fifosize) return -EINVAL; + /* + * This is necessary to get SDIO working on the Ux500. We do not yet + * know if this is a bug in: + * - The Ux500 DMA controller (DMA40) + * - The MMCI DMA interface on the Ux500 + * some power of two blocks (such as 64 bytes) are sent regularly + * during SDIO traffic and those work fine so for these we enable DMA + * transfers. + */ + if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz)) + return -EINVAL; + device = chan->device; nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); @@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host, int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) { struct mmci_dmae_priv *dmae = host->dma_priv; + int ret; host->dma_in_progress = true; - dmaengine_submit(dmae->desc_current); + ret = dma_submit_error(dmaengine_submit(dmae->desc_current)); + if (ret < 0) { + host->dma_in_progress = false; + return ret; + } dma_async_issue_pending(dmae->cur); *datactrl |= MCI_DPSM_DMAENABLE; @@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, } else if (host->variant->busy_timeout && busy_resp && status & MCI_DATATIMEOUT) { cmd->error = -ETIMEDOUT; + host->irq_action = IRQ_WAKE_THREAD; } else { cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); @@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, return; } } - mmci_request_end(host, host->mrq); + + if (host->irq_action != IRQ_WAKE_THREAD) + mmci_request_end(host, host->mrq); + } else if (sbc) { mmci_start_command(host, host->mrq->cmd, 0); } else if (!host->variant->datactrl_first && @@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; u32 status; - int ret = 0; spin_lock(&host->lock); + host->irq_action = IRQ_HANDLED; do { status = readl(host->base + MMCISTATUS); @@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) if (host->variant->busy_detect_flag) status &= ~host->variant->busy_detect_flag; - ret = 1; } while (status); spin_unlock(&host->lock); - return IRQ_RETVAL(ret); + return host->irq_action; +} + +/* + * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW. + * + * A reset is needed for some variants, where a datatimeout for a R1B request + * causes the DPSM to stay busy (non-functional). + */ +static irqreturn_t mmci_irq_thread(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + unsigned long flags; + + if (host->rst) { + reset_control_assert(host->rst); + udelay(2); + reset_control_deassert(host->rst); + } + + spin_lock_irqsave(&host->lock, flags); + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->pwr_reg, host->base + MMCIPOWER); + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + + host->irq_action = IRQ_HANDLED; + mmci_request_end(host, host->mrq); + spin_unlock_irqrestore(&host->lock, flags); + + return host->irq_action; } static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) pinctrl_select_state(host->pinctrl, host->pins_opendrain); else - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(mmc_dev(mmc)); } /* @@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev, goto host_free; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - dev_err(mmc_dev(mmc), "Can't select default pins\n"); - ret = PTR_ERR(host->pins_default); - goto host_free; - } - host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, MMCI_PINCTRL_STATE_OPENDRAIN); if (IS_ERR(host->pins_opendrain)) { @@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev, * silently of these do not exist */ if (!np) { - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto clk_disable; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) goto clk_disable; } - ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, - DRIVER_NAME " (cmd)", host); + ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq, + mmci_irq_thread, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); if (ret) goto clk_disable; @@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev) struct mmci_host *host = mmc_priv(mmc); clk_prepare_enable(host->clk); mmci_restore(host); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); } return 0; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 158e1231aa23..ea6a0b5779d4 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -279,7 +279,11 @@ struct mmci_host; * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @datactrl_mask_sdio: SDIO enable mask in datactrl register - * @datactrl_blksz: block size in power of two + * @datactrl_blocksz: block size in power of two + * @datactrl_any_blocksz: true if block any block sizes are accepted by + * hardware, such as with some SDIO traffic that send + * odd packets. + * @dma_power_of_2: DMA only works with blocks that are a power of 2. * @datactrl_first: true if data must be setup before send command * @datacnt_useless: true if you could not use datacnt register to read * remaining data @@ -326,6 +330,8 @@ struct variant_data { unsigned int datactrl_mask_ddrmode; unsigned int datactrl_mask_sdio; unsigned int datactrl_blocksz; + u8 datactrl_any_blocksz:1; + u8 dma_power_of_2:1; u8 datactrl_first:1; u8 datacnt_useless:1; u8 st_sdio:1; @@ -404,7 +410,6 @@ struct mmci_host { struct mmci_host_ops *ops; struct variant_data *variant; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_opendrain; u8 hw_designer; @@ -412,6 +417,7 @@ struct mmci_host { struct timer_list timer; unsigned int oldstat; + u32 irq_action; /* pio stuff */ struct sg_mapping_iter sg_miter; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 010fe29a4888..7726dcf48f2c 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2194,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev) if (ret) goto host_free; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(&pdev->dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto host_free; diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 74a0a7fbbf7f..203b61712601 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev) struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; const struct mbus_dram_target_info *dram; - struct resource *r; int ret, irq; if (!np) { dev_err(&pdev->dev, "no DT node\n"); return -ENODEV; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) + if (irq < 0) return -ENXIO; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); @@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev) spin_lock_init(&host->lock); - host->base = devm_ioremap_resource(&pdev->dev, r); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto out; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 011b59a3602e..b3d654c688e5 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev) mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); if (!host->pdata) { - host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); + host->dma = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(host->dma)) { + if (PTR_ERR(host->dma) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk_put; + } + + /* Ignore errors to fall back to PIO mode */ + host->dma = NULL; + } } else { res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (res) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 4031217d21c3..d82674aed447 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev) goto out_clk_disable; } - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_clk_disable; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 767e964ca5a2..a379c45b985c 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) ret = PTR_ERR(p); goto err_free_irq; } - if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { - dev_info(host->dev, "missing default pinctrl state\n"); - devm_pinctrl_put(p); - ret = -EINVAL; - goto err_free_irq; - } if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { dev_info(host->dev, "missing idle pinctrl state\n"); @@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev) if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); /* irq lost, if pinmux incorrect */ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); } else { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); } spin_unlock_irqrestore(&host->irq_lock, flags); return 0; diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 771e3d00f1bb..01ffe51f413d 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc"); - if (!owl_host->dma) { + owl_host->dma = dma_request_chan(&pdev->dev, "mmc"); + if (IS_ERR(owl_host->dma)) { dev_err(owl_host->dev, "Failed to get external DMA channel.\n"); - ret = -ENXIO; + ret = PTR_ERR(owl_host->dma); goto err_free_host; } diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 024acc1b0a2e..3a9333475a2b 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mmc); - host->dma_chan_rx = dma_request_slave_channel(dev, "rx"); - if (host->dma_chan_rx == NULL) { + host->dma_chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->dma_chan_rx)) { dev_err(dev, "unable to request rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_rx); + host->dma_chan_rx = NULL; goto out; } - host->dma_chan_tx = dma_request_slave_channel(dev, "tx"); - if (host->dma_chan_tx == NULL) { + host->dma_chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->dma_chan_tx)) { dev_err(dev, "unable to request tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_tx); + host->dma_chan_tx = NULL; goto out; } @@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev) } /* FIXME: should we pass detection delay to debounce? */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_cd\n"); goto out; } - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + if (!host->pdata->gpio_card_ro_invert) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_ro\n"); goto out; } - if (!ret) { + if (!ret) host->use_ro_gpio = true; - mmc->caps2 |= host->pdata->gpio_card_ro_invert ? - 0 : MMC_CAP2_RO_ACTIVE_HIGH; - } if (host->pdata->init) host->pdata->init(dev, pxamci_detect_irq, mmc); diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index c0504aa90857..f524251d5113 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -14,8 +14,8 @@ struct renesas_sdhi_scc { unsigned long clk_rate; /* clock rate for SDR104 */ - u32 tap; /* sampling clock position for SDR104 */ - u32 tap_hs400; /* sampling clock position for HS400 */ + u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */ + u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */ }; struct renesas_sdhi_of_data { @@ -33,6 +33,11 @@ struct renesas_sdhi_of_data { unsigned short max_segs; }; +struct renesas_sdhi_quirks { + bool hs400_disabled; + bool hs400_4taps; +}; + struct tmio_mmc_dma { enum dma_slave_buswidth dma_buswidth; bool (*filter)(struct dma_chan *chan, void *arg); @@ -46,6 +51,7 @@ struct renesas_sdhi { struct clk *clk_cd; struct tmio_mmc_data mmc_data; struct tmio_mmc_dma dma_priv; + const struct renesas_sdhi_quirks *quirks; struct pinctrl *pinctrl; struct pinctrl_state *pins_default, *pins_uhs; void __iomem *scc_ctl; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 234551a68739..35cb24cd45b4 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -46,11 +46,6 @@ #define SDHI_VER_GEN3_SD 0xcc10 #define SDHI_VER_GEN3_SDMMC 0xcd10 -struct renesas_sdhi_quirks { - bool hs400_disabled; - bool hs400_4taps; -}; - static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) { u32 val; @@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host) 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); - if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400) + if (priv->quirks && priv->quirks->hs400_4taps) sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set / 2); @@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host) static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host) { struct renesas_sdhi *priv = host_to_priv(host); - bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; /* * Skip checking SCC errors when running on 4 taps in HS400 mode as @@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = { }; static const struct soc_device_attribute sdhi_quirks_match[] = { + { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap }, { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, - { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 }, { /* Sentinel. */ }, }; @@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!priv) return -ENOMEM; + priv->quirks = quirks; mmc_data = &priv->mmc_data; dma_priv = &priv->dma_priv; @@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (quirks && quirks->hs400_disabled) host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); - if (quirks && quirks->hs400_4taps) - mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400; - /* For some SoC, we disable internal WP. GPIO may override this */ if (mmc_can_gpio_ro(host->mmc)) mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT; @@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) { const struct renesas_sdhi_scc *taps = of_data->taps; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; bool hit = false; for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { priv->scc_tappos = taps->tap; - priv->scc_tappos_hs400 = taps->tap_hs400; + priv->scc_tappos_hs400 = use_4tap ? + taps->tap_hs400_4tap : + taps->tap; hit = true; break; } } if (!hit) - dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n"); + dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); host->init_tuning = renesas_sdhi_init_tuning; host->prepare_tuning = renesas_sdhi_prepare_tuning; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 18839a10594c..47ac53e91241 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { .clk_rate = 0, .tap = 0x00000300, - .tap_hs400 = 0x00000704, + .tap_hs400_4tap = 0x00000100, }, }; @@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC * implementation as others may use a different implementation. */ -static const struct soc_device_attribute soc_whitelist[] = { - /* specific ones */ +static const struct soc_device_attribute soc_dma_quirks[] = { { .soc_id = "r7s9210", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, { .soc_id = "r8a7796", .revision = "ES1.0", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, - /* generic ones */ - { .soc_id = "r8a774a1" }, - { .soc_id = "r8a774b1" }, - { .soc_id = "r8a774c0" }, - { .soc_id = "r8a77470" }, - { .soc_id = "r8a7795" }, - { .soc_id = "r8a7796" }, - { .soc_id = "r8a77965" }, - { .soc_id = "r8a77970" }, - { .soc_id = "r8a77980" }, - { .soc_id = "r8a77990" }, - { .soc_id = "r8a77995" }, { /* sentinel */ } }; static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - const struct soc_device_attribute *soc = soc_device_match(soc_whitelist); + const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks); struct device *dev = &pdev->dev; - if (!soc) - return -ENODEV; - - global_flags |= (unsigned long)soc->data; + if (soc) + global_flags |= (unsigned long)soc->data; dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index bce9c33bc4b5..1e616ae56b13 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host) mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; /* If we get -ENOENT we have no card detect GPIO line */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for CD %d\n", ret); return ret; } - ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for WP %d\n", ret); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 674f9e084cd2..9651dca6863e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); - err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); if (err) { if (err == -EPROBE_DEFER) goto err_free; diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 73bb440aaf93..ad01f6451a95 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -9,29 +9,236 @@ #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/bitops.h> +#include <linux/delay.h> #include "sdhci-pltfm.h" +#include "cqhci.h" -static const struct sdhci_ops sdhci_brcmstb_ops = { +#define SDHCI_VENDOR 0x78 +#define SDHCI_VENDOR_ENHANCED_STRB 0x1 + +#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0) +#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1) + +#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 + +struct sdhci_brcmstb_priv { + void __iomem *cfg_regs; + bool has_cqe; +}; + +struct brcmstb_match_priv { + void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); + struct sdhci_ops *ops; + unsigned int flags; +}; + +static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + + u32 reg; + + dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n", + __func__); + reg = readl(host->ioaddr + SDHCI_VENDOR); + if (ios->enhanced_strobe) + reg |= SDHCI_VENDOR_ENHANCED_STRB; + else + reg &= ~SDHCI_VENDOR_ENHANCED_STRB; + writel(reg, host->ioaddr + SDHCI_VENDOR); +} + +static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + sdhci_enable_clk(host, clk); +} + +static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n", + __func__, timing); + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_SD_HS || + timing == MMC_TIMING_MMC_HS || + timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = { + .enable = sdhci_brcmstb_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_brcmstb_dumpregs, +}; + +static struct sdhci_ops sdhci_brcmstb_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = { +static struct sdhci_ops sdhci_brcmstb_ops_7216 = { + .set_clock = sdhci_brcmstb_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, +}; + +static struct brcmstb_match_priv match_priv_7425 = { + .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT | + BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; +static struct brcmstb_match_priv match_priv_7445 = { + .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .ops = &sdhci_brcmstb_ops, +}; + +static const struct brcmstb_match_priv match_priv_7216 = { + .hs400es = sdhci_brcmstb_hs400es, + .ops = &sdhci_brcmstb_ops_7216, +}; + +static const struct of_device_id sdhci_brcm_of_match[] = { + { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, + { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, + { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, + {}, +}; + +static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static int sdhci_brcmstb_add_host(struct sdhci_host *host, + struct sdhci_brcmstb_priv *priv) +{ + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!priv->has_cqe) + return sdhci_add_host(host); + + dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n"); + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(mmc_dev(host->mmc), + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR; + cq_host->ops = &sdhci_brcmstb_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) { + dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n"); + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; + } + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + static int sdhci_brcmstb_probe(struct platform_device *pdev) { - struct sdhci_host *host; + const struct brcmstb_match_priv *match_priv; + struct sdhci_pltfm_data brcmstb_pdata; struct sdhci_pltfm_host *pltfm_host; + const struct of_device_id *match; + struct sdhci_brcmstb_priv *priv; + struct sdhci_host *host; + struct resource *iomem; + bool has_cqe = false; struct clk *clk; int res; + match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node); + match_priv = match->data; + + dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; dev_err(&pdev->dev, "Clock not found in Device Tree\n"); clk = NULL; } @@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) if (res) return res; - host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0); + memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata)); + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + has_cqe = true; + match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; + } + brcmstb_pdata.ops = match_priv->ops; + host = sdhci_pltfm_init(pdev, &brcmstb_pdata, + sizeof(struct sdhci_brcmstb_priv)); if (IS_ERR(host)) { res = PTR_ERR(host); goto err_clk; } + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->has_cqe = has_cqe; + + /* Map in the non-standard CFG registers */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem); + if (IS_ERR(priv->cfg_regs)) { + res = PTR_ERR(priv->cfg_regs); + goto err; + } + sdhci_get_of_property(pdev); res = mmc_of_parse(host->mmc); if (res) goto err; /* + * If the chip has enhanced strobe and it's enabled, add + * callback + */ + if (match_priv->hs400es && + (host->mmc->caps2 & MMC_CAP2_HS400_ES)) + host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es; + + /* * Supply the existing CAPS, but clear the UHS modes. This * will allow these modes to be specified by device tree * properties through mmc_of_parse(). */ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); - if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci")) + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT) host->caps &= ~SDHCI_CAN_64BIT; host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | - SDHCI_SUPPORT_DDR50); - host->quirks |= SDHCI_QUIRK_MISSING_CAPS | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + SDHCI_SUPPORT_DDR50); + host->quirks |= SDHCI_QUIRK_MISSING_CAPS; + + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - res = sdhci_add_host(host); + res = sdhci_brcmstb_add_host(host, priv); if (res) goto err; - pltfm_host = sdhci_priv(host); pltfm_host->clk = clk; return res; @@ -79,11 +314,15 @@ err_clk: return res; } -static const struct of_device_id sdhci_brcm_of_match[] = { - { .compatible = "brcm,bcm7425-sdhci" }, - { .compatible = "brcm,bcm7445-sdhci" }, - {}, -}; +static void sdhci_brcmstb_shutdown(struct platform_device *pdev) +{ + int ret; + + ret = sdhci_pltfm_unregister(pdev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); static struct platform_driver sdhci_brcmstb_driver = { @@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = { }, .probe = sdhci_brcmstb_probe, .remove = sdhci_pltfm_unregister, + .shutdown = sdhci_brcmstb_shutdown, }; module_platform_driver(sdhci_brcmstb_driver); diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index ae0ec27dd7cc..5827d3751b81 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) return 0; } -static inline void *sdhci_cdns_priv(struct sdhci_host *host) +static void *sdhci_cdns_priv(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1c988d6a2433..382f25b2fa45 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = { struct pltfm_imx_data { u32 scratchpad; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_100mhz; struct pinctrl_state *pins_200mhz; const struct esdhc_soc_data *socdata; @@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host, dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); if (IS_ERR(imx_data->pinctrl) || - IS_ERR(imx_data->pins_default) || IS_ERR(imx_data->pins_100mhz) || IS_ERR(imx_data->pins_200mhz)) return -EINVAL; @@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host, break; default: /* back to default state for other legacy timing */ - pinctrl = imx_data->pins_default; + return pinctrl_select_default_state(mmc_dev(host->mmc)); } return pinctrl_select_state(imx_data->pinctrl, pinctrl); @@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); - if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) { + if (esdhc_is_usdhc(imx_data)) { imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, ESDHC_PINCTRL_STATE_100MHZ); imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, @@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, host->mmc->parent->platform_data); /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request write-protect gpio!\n"); return err; } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } /* card_detect */ switch (boarddata->cd_type) { case ESDHC_CD_GPIO: - err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request card-detect gpio!\n"); @@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } - imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(imx_data->pins_default)) - dev_warn(mmc_dev(host->mmc), "could not get default state\n"); - if (esdhc_is_usdhc(imx_data)) { host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c index a1aa21b9ae1c..92f30a1db435 100644 --- a/drivers/mmc/host/sdhci-milbeaut.c +++ b/drivers/mmc/host/sdhci-milbeaut.c @@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ret = 0; struct f_sdhost_priv *priv; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "%s: no irq specified\n", __func__); + if (irq < 0) return irq; - } host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) @@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) host->ops = &sdhci_milbeaut_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 3d0bb5e2e09b..c3a160c18047 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -15,6 +15,7 @@ #include <linux/regulator/consumer.h> #include "sdhci-pltfm.h" +#include "cqhci.h" #define CORE_MCI_VERSION 0x50 #define CORE_VERSION_MAJOR_SHIFT 28 @@ -122,6 +123,10 @@ #define msm_host_writel(msm_host, val, host, offset) \ msm_host->var_ops->msm_writel_relaxed(val, host, offset) +/* CQHCI vendor specific registers */ +#define CQHCI_VENDOR_CFG1 0xA00 +#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) + struct sdhci_msm_offset { u32 core_hc_mode; u32 core_mci_data_cnt; @@ -1567,6 +1572,127 @@ out: __sdhci_msm_set_clock(host, clock); } +/*****************************************************************************\ + * * + * MSM Command Queue Engine (CQE) * + * * +\*****************************************************************************/ + +static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + return 0; +} + +void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 ctrl; + + /* + * When CQE is halted, the legacy SDHCI path operates only + * on 16-byte descriptors in 64bit mode. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 16; + + spin_lock_irqsave(&host->lock, flags); + + /* + * During CQE command transfers, command complete bit gets latched. + * So s/w should clear command complete interrupt status when CQE is + * either halted or disabled. Otherwise unexpected SDCHI legacy + * interrupt gets triggered when CQE is halted/disabled. + */ + ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); + ctrl |= SDHCI_INT_RESPONSE; + sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); + + spin_unlock_irqrestore(&host->lock, flags); + + sdhci_cqe_disable(mmc, recovery); +} + +static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_msm_cqe_disable, +}; + +static int sdhci_msm_cqe_add_host(struct sdhci_host *host, + struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + u32 cqcfg; + int ret; + + /* + * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. + * So ensure ADMA table is allocated for 16byte descriptors. + */ + if (host->caps & SDHCI_CAN_64BIT) + host->alloc_desc_sz = 16; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = cqhci_pltfm_init(pdev); + if (IS_ERR(cq_host)) { + ret = PTR_ERR(cq_host); + dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); + goto cleanup; + } + + msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host->ops = &sdhci_msm_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) { + dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", + mmc_hostname(host->mmc), ret); + goto cleanup; + } + + /* Disable cqe reset due to cqe enable signal */ + cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); + cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; + cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); + + /* + * SDHC expects 12byte ADMA descriptors till CQE is enabled. + * So limit desc_sz to 12 so that the data commands that are sent + * during card initialization (before CQE gets enabled) would + * get executed without any issues. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 12; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + dev_info(&pdev->dev, "%s: CQE init: success\n", + mmc_hostname(host->mmc)); + return ret; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + /* * Platform specific register write functions. This is so that, if any * register write needs to be followed up by platform specific actions, @@ -1731,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .set_uhs_signaling = sdhci_msm_set_uhs_signaling, .write_w = sdhci_msm_writew, .write_b = sdhci_msm_writeb, + .irq = sdhci_msm_cqe_irq, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { @@ -1746,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; - struct resource *core_memres; struct clk *clk; int ret; u16 host_version, core_minor; @@ -1754,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) u8 core_major; const struct sdhci_msm_offset *msm_offset; const struct sdhci_msm_variant_info *var_info; + struct device_node *node = pdev->dev.of_node; host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); if (IS_ERR(host)) @@ -1847,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) } if (!msm_host->mci_removed) { - core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); - msm_host->core_mem = devm_ioremap_resource(&pdev->dev, - core_memres); - + msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(msm_host->core_mem)) { ret = PTR_ERR(msm_host->core_mem); goto clk_disable; @@ -1952,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(&pdev->dev); host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; - ret = sdhci_add_host(host); + if (of_property_read_bool(node, "supports-cqe")) + ret = sdhci_msm_cqe_add_host(host, pdev); + else + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; sdhci_msm_set_regulator_caps(msm_host); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 5959e394b416..ab2bd314a390 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -33,7 +33,14 @@ #define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */ +struct sdhci_at91_soc_data { + const struct sdhci_pltfm_data *pdata; + bool baseclk_is_generated_internally; + unsigned int divider_for_baseclk; +}; + struct sdhci_at91_priv { + const struct sdhci_at91_soc_data *soc_data; struct clk *hclock; struct clk *gck; struct clk *mainck; @@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_power = sdhci_at91_set_power, }; -static const struct sdhci_pltfm_data soc_data_sama5d2 = { +static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = { .ops = &sdhci_at91_sama5d2_ops, }; +static const struct sdhci_at91_soc_data soc_data_sama5d2 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = false, +}; + +static const struct sdhci_at91_soc_data soc_data_sam9x60 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = true, + .divider_for_baseclk = 2, +}; + static const struct of_device_id sdhci_at91_dt_match[] = { { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 }, + { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 }, {} }; MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match); @@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); - int ret; unsigned int caps0, caps1; unsigned int clk_base, clk_mul; - unsigned int gck_rate, real_gck_rate; + unsigned int gck_rate, clk_base_rate; unsigned int preset_div; - /* - * The mult clock is provided by as a generated clock by the PMC - * controller. In order to set the rate of gck, we have to get the - * base clock rate and the clock mult from capabilities. - */ clk_prepare_enable(priv->hclock); caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); - clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; - clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; - gck_rate = clk_base * 1000000 * (clk_mul + 1); - ret = clk_set_rate(priv->gck, gck_rate); - if (ret < 0) { - dev_err(dev, "failed to set gck"); - clk_disable_unprepare(priv->hclock); - return ret; - } - /* - * We need to check if we have the requested rate for gck because in - * some cases this rate could be not supported. If it happens, the rate - * is the closest one gck can provide. We have to update the value - * of clk mul. - */ - real_gck_rate = clk_get_rate(priv->gck); - if (real_gck_rate != gck_rate) { - clk_mul = real_gck_rate / (clk_base * 1000000) - 1; - caps1 &= (~SDHCI_CLOCK_MUL_MASK); - caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & - SDHCI_CLOCK_MUL_MASK); - /* Set capabilities in r/w mode. */ - writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, - host->ioaddr + SDMMC_CACR); - writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); - /* Set capabilities in ro mode. */ - writel(0, host->ioaddr + SDMMC_CACR); - dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n", - clk_mul, real_gck_rate); - } + + gck_rate = clk_get_rate(priv->gck); + if (priv->soc_data->baseclk_is_generated_internally) + clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk; + else + clk_base_rate = clk_get_rate(priv->mainck); + + clk_base = clk_base_rate / 1000000; + clk_mul = gck_rate / clk_base_rate - 1; + + caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK; + caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK; + caps1 &= ~SDHCI_CLOCK_MUL_MASK; + caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK; + /* Set capabilities in r/w mode. */ + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); + writel(caps0, host->ioaddr + SDHCI_CAPABILITIES); + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); + /* Set capabilities in ro mode. */ + writel(0, host->ioaddr + SDMMC_CACR); + + dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n", + clk_mul, gck_rate, clk_base_rate); /* * We have to set preset values because it depends on the clk_mul @@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev) * maximum sd clock value is 120 MHz instead of 208 MHz. For that * reason, we need to use presets to support SDR104. */ - preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR12); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR25); - preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR50); - preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR104); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_DDR50); @@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { static int sdhci_at91_probe(struct platform_device *pdev) { const struct of_device_id *match; - const struct sdhci_pltfm_data *soc_data; + const struct sdhci_at91_soc_data *soc_data; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_at91_priv *priv; @@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev) return -EINVAL; soc_data = match->data; - host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv)); + host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv)); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); priv = sdhci_pltfm_priv(pltfm_host); + priv->soc_data = soc_data; priv->mainck = devm_clk_get(&pdev->dev, "baseclk"); if (IS_ERR(priv->mainck)) { - dev_err(&pdev->dev, "failed to get baseclk\n"); - return PTR_ERR(priv->mainck); + if (soc_data->baseclk_is_generated_internally) { + priv->mainck = NULL; + } else { + dev_err(&pdev->dev, "failed to get baseclk\n"); + ret = PTR_ERR(priv->mainck); + goto sdhci_pltfm_free; + } } priv->hclock = devm_clk_get(&pdev->dev, "hclock"); if (IS_ERR(priv->hclock)) { dev_err(&pdev->dev, "failed to get hclock\n"); - return PTR_ERR(priv->hclock); + ret = PTR_ERR(priv->hclock); + goto sdhci_pltfm_free; } priv->gck = devm_clk_get(&pdev->dev, "multclk"); if (IS_ERR(priv->gck)) { dev_err(&pdev->dev, "failed to get multclk\n"); - return PTR_ERR(priv->gck); + ret = PTR_ERR(priv->gck); + goto sdhci_pltfm_free; } ret = sdhci_at91_set_clks_presets(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 500f70a6ee42..5d8dd870bd44 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -173,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host, u16 ret; int shift = (spec_reg & 0x2) * 8; + if (spec_reg == SDHCI_TRANSFER_MODE) + return pltfm_host->xfer_mode_shadow; + if (spec_reg == SDHCI_HOST_VERSION) ret = value & 0xffff; else @@ -562,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) static void esdhc_clock_enable(struct sdhci_host *host, bool enable) { - u32 val; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); ktime_t timeout; + u32 val, clk_en; + + clk_en = ESDHC_CLOCK_SDCLKEN; + + /* + * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version + * is 2.2 or lower. + */ + if (esdhc->vendor_ver <= VENDOR_V_22) + clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | + ESDHC_CLOCK_PEREN); val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); if (enable) - val |= ESDHC_CLOCK_SDCLKEN; + val |= clk_en; else - val &= ~ESDHC_CLOCK_SDCLKEN; + val &= ~clk_en; sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - /* Wait max 20 ms */ + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (1) { + while (esdhc->vendor_ver > VENDOR_V_22) { bool timedout = ktime_after(ktime_get(), timeout); - if (sdhci_readl(host, ESDHC_PRSSTAT) & val) + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) break; if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); break; } - udelay(10); + usleep_range(10, 20); } } @@ -621,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - int pre_div = 1; - int div = 1; - int division; + unsigned int pre_div = 1, div = 1; + unsigned int clock_fixup = 0; ktime_t timeout; - long fixup = 0; u32 temp; - host->mmc->actual_clock = 0; - if (clock == 0) { + host->mmc->actual_clock = 0; esdhc_clock_enable(host, false); return; } - /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ + /* Start pre_div at 2 for vendor version < 2.3. */ if (esdhc->vendor_ver < VENDOR_V_23) pre_div = 2; + /* Fix clock value. */ if (host->mmc->card && mmc_card_sd(host->mmc->card) && - esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) - fixup = esdhc->clk_fixup->sd_dflt_max_clk; + esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) + clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk; else if (esdhc->clk_fixup) - fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; - - if (fixup && clock > fixup) - clock = fixup; + clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | - ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + if (clock_fixup == 0 || clock < clock_fixup) + clock_fixup = clock; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + /* Calculate pre_div and div. */ + while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256) pre_div *= 2; - while (host->max_clk / pre_div / div > clock && div < 16) + while (host->max_clk / pre_div / div > clock_fixup && div < 16) div++; + esdhc->div_ratio = pre_div * div; + + /* Limit clock division for HS400 200MHz clock for quirk. */ if (esdhc->quirk_limited_clk_division && clock == MMC_HS200_MAX_DTR && (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 || host->flags & SDHCI_HS400_TUNING)) { - division = pre_div * div; - if (division <= 4) { + if (esdhc->div_ratio <= 4) { pre_div = 4; div = 1; - } else if (division <= 8) { + } else if (esdhc->div_ratio <= 8) { pre_div = 4; div = 2; - } else if (division <= 12) { + } else if (esdhc->div_ratio <= 12) { pre_div = 4; div = 3; } else { pr_warn("%s: using unsupported clock division.\n", mmc_hostname(host->mmc)); } + esdhc->div_ratio = pre_div * div; } + host->mmc->actual_clock = host->max_clk / esdhc->div_ratio; + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); - host->mmc->actual_clock = host->max_clk / pre_div / div; - esdhc->div_ratio = pre_div * div; + clock, host->mmc->actual_clock); + + /* Set clock division into register. */ pre_div >>= 1; div--; + esdhc_clock_enable(host, false); + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); + temp &= ~ESDHC_CLOCK_MASK; + temp |= ((div << ESDHC_DIVIDER_SHIFT) | + (pre_div << ESDHC_PREDIV_SHIFT)); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ + timeout = ktime_add_ms(ktime_get(), 20); + while (esdhc->vendor_ver > VENDOR_V_22) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + usleep_range(10, 20); + } + + /* Additional setting for HS400. */ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && clock == MMC_HS200_MAX_DTR) { temp = sdhci_readl(host, ESDHC_TBCTL); @@ -711,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) esdhc_clock_enable(host, false); esdhc_flush_async_fifo(host); } - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - while (1) { - bool timedout = ktime_after(ktime_get(), timeout); - - if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) - break; - if (timedout) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - return; - } - udelay(10); - } - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= ESDHC_CLOCK_SDCLKEN; - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + esdhc_clock_enable(host, true); } static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) @@ -758,23 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u32 val; + u32 val, bus_width = 0; + /* + * Add delay to make sure all the DMA transfers are finished + * for quirk. + */ if (esdhc->quirk_delay_before_data_reset && (mask & SDHCI_RESET_DATA) && (host->flags & SDHCI_REQ_USE_DMA)) mdelay(5); + /* + * Save bus-width for eSDHC whose vendor version is 2.2 + * or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; + } + sdhci_reset(host, mask); - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + /* + * Restore bus-width setting and interrupt registers for eSDHC + * whose vendor version is 2.2 or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + val &= ~ESDHC_CTRL_BUSWIDTH_MASK; + val |= bus_width; + sdhci_writel(host, val, ESDHC_PROCTL); - if (mask & SDHCI_RESET_ALL) { + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } + + /* + * Some bits have to be cleaned manually for eSDHC whose spec + * version is higher than 3.0 for all reset. + */ + if ((mask & SDHCI_RESET_ALL) && + (esdhc->spec_ver >= SDHCI_SPEC_300)) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; sdhci_writel(host, val, ESDHC_TBCTL); + /* + * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to + * 0 for quirk. + */ if (esdhc->quirk_unreliable_pulse_detection) { val = sdhci_readl(host, ESDHC_DLLCFG1); val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; @@ -854,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc, } static struct soc_device_attribute soc_tuning_erratum_type1[] = { - { .family = "QorIQ T1023", .revision = "1.0", }, - { .family = "QorIQ T1040", .revision = "1.0", }, - { .family = "QorIQ T2080", .revision = "1.0", }, - { .family = "QorIQ LS1021A", .revision = "1.0", }, + { .family = "QorIQ T1023", }, + { .family = "QorIQ T1040", }, + { .family = "QorIQ T2080", }, + { .family = "QorIQ LS1021A", }, { }, }; static struct soc_device_attribute soc_tuning_erratum_type2[] = { - { .family = "QorIQ LS1012A", .revision = "1.0", }, - { .family = "QorIQ LS1043A", .revision = "1.*", }, - { .family = "QorIQ LS1046A", .revision = "1.0", }, - { .family = "QorIQ LS1080A", .revision = "1.0", }, - { .family = "QorIQ LS2080A", .revision = "1.0", }, - { .family = "QorIQ LA1575A", .revision = "1.0", }, + { .family = "QorIQ LS1012A", }, + { .family = "QorIQ LS1043A", }, + { .family = "QorIQ LS1046A", }, + { .family = "QorIQ LS1080A", }, + { .family = "QorIQ LS2080A", }, + { .family = "QorIQ LA1575A", }, { }, }; @@ -888,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable) esdhc_clock_enable(host, true); } -static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, +static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start, u8 *window_end) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u8 tbstat_15_8, tbstat_7_0; u32 val; - if (esdhc->quirk_tuning_erratum_type1) { - *window_start = 5 * esdhc->div_ratio; - *window_end = 3 * esdhc->div_ratio; - return; - } - /* Write TBCTL[11:8]=4'h8 */ val = sdhci_readl(host, ESDHC_TBCTL); val &= ~(0xf << 8); @@ -920,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, val = sdhci_readl(host, ESDHC_TBSTAT); val = sdhci_readl(host, ESDHC_TBSTAT); + *window_end = val & 0xff; + *window_start = (val >> 8) & 0xff; +} + +static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, + u8 *window_end) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 start_ptr, end_ptr; + + if (esdhc->quirk_tuning_erratum_type1) { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + return; + } + + esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr); + /* Reset data lines by setting ESDHCCTL[RSTD] */ sdhci_reset(host, SDHCI_RESET_DATA); /* Write 32'hFFFF_FFFF to IRQSTAT register */ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS); - /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio - * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio, + /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2 + * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2, * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio. */ - tbstat_7_0 = val & 0xff; - tbstat_15_8 = (val >> 8) & 0xff; - if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) { + if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) { *window_start = 8 * esdhc->div_ratio; *window_end = 4 * esdhc->div_ratio; } else { @@ -1006,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) if (ret) break; + /* For type2 affected platforms of the tuning erratum, + * tuning may succeed although eSDHC might not have + * tuned properly. Need to check tuning window. + */ + if (esdhc->quirk_tuning_erratum_type2 && + !host->tuning_err) { + esdhc_tuning_window_ptr(host, &window_start, + &window_end); + if (abs(window_start - window_end) > + (4 * esdhc->div_ratio + 2)) + host->tuning_err = -EAGAIN; + } + /* If HW tuning fails and triggers erratum, * try workaround. */ @@ -1238,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) * 1/2 peripheral clock. */ if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") || - of_device_is_compatible(np, "fsl,ls1028a-esdhc")) + of_device_is_compatible(np, "fsl,ls1028a-esdhc") || + of_device_is_compatible(np, "fsl,ls1088a-esdhc")) esdhc->peripheral_clock = clk_get_rate(clk) / 2; else esdhc->peripheral_clock = clk_get_rate(clk); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 083e7e053c95..882053151a47 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -7,6 +7,7 @@ */ #include <linux/delay.h> +#include <linux/mmc/mmc.h> #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/of.h> @@ -85,6 +86,7 @@ /* sdhci-omap controller flags */ #define SDHCI_OMAP_REQUIRE_IODELAY BIT(0) +#define SDHCI_OMAP_SPECIAL_RESET BIT(1) struct sdhci_omap_data { u32 offset; @@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host) struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); - reg |= CON_DMA_MASTER; + reg &= ~CON_DMA_MASTER; + /* Switch to DMA slave mode when using external DMA */ + if (!host->use_external_dma) + reg |= CON_DMA_MASTER; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); return 0; @@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host, sdhci_omap_start_clock(omap_host); } +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long limit = MMC_TIMEOUT_US; + unsigned long i = 0; /* Don't reset data lines during tuning operation */ if (omap_host->is_tuning) mask &= ~SDHCI_RESET_DATA; + if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) { + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); + while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) && + (i++ < limit)) + udelay(1); + i = 0; + while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) && + (i++ < limit)) + udelay(1); + + if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) + dev_err(mmc_dev(host->mmc), + "Timeout waiting on controller reset in %s\n", + __func__); + return; + } + sdhci_reset(host, mask); } @@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) return intmask; } +static void sdhci_omap_set_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_ERASE) + sdhci_set_data_timeout_irq(host, false); + + __sdhci_set_timeout(host, cmd); +} + static struct sdhci_ops sdhci_omap_ops = { .set_clock = sdhci_omap_set_clock, .set_power = sdhci_omap_set_power, @@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = { .reset = sdhci_omap_reset, .set_uhs_signaling = sdhci_omap_set_uhs_signaling, .irq = sdhci_omap_irq, + .set_timeout = sdhci_omap_set_timeout, }; static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) @@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = { .offset = 0x200, }; +static const struct sdhci_omap_data am335_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + +static const struct sdhci_omap_data am437_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + static const struct sdhci_omap_data dra7_data = { .offset = 0x200, .flags = SDHCI_OMAP_REQUIRE_IODELAY, @@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = { static const struct of_device_id omap_sdhci_match[] = { { .compatible = "ti,dra7-sdhci", .data = &dra7_data }, { .compatible = "ti,k2g-sdhci", .data = &k2g_data }, + { .compatible = "ti,am335-sdhci", .data = &am335_data }, + { .compatible = "ti,am437-sdhci", .data = &am437_data }, {}, }; MODULE_DEVICE_TABLE(of, omap_sdhci_match); @@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) const struct of_device_id *match; struct sdhci_omap_data *data; const struct soc_device_attribute *soc; + struct resource *regs; match = of_match_device(omap_sdhci_match, dev); if (!match) @@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) } offset = data->offset; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata, sizeof(*omap_host)); if (IS_ERR(host)) { @@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) omap_host->timing = MMC_TIMING_LEGACY; omap_host->flags = data->flags; host->ioaddr += offset; + host->mapbase = regs->start + offset; mmc = host->mmc; sdhci_get_of_property(pdev); @@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning; host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq; + /* Switch to external DMA only if there is the "dmas" property */ + if (of_find_property(dev->of_node, "dmas", NULL)) + sdhci_switch_external_dma(host, true); + ret = sdhci_setup_host(host); if (ret) goto err_put_sync; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 5091e2c1c0e5..525de2454a4d 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1991,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( if (slot->cd_idx >= 0) { ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, - slot->cd_override_level, 0, NULL); + slot->cd_override_level, 0); if (ret && ret != -EPROBE_DEFER) ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, slot->cd_override_level, - 0, NULL); + 0); if (ret == -EPROBE_DEFER) goto remove; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 51e096f27388..64200c78e90d 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -117,7 +117,6 @@ struct sdhci_s3c { struct s3c_sdhci_platdata *pdata; int cur_clk; int ext_cd_irq; - int ext_cd_gpio; struct clk *clk_io; struct clk *clk_bus[MAX_BUS_CLK]; @@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sdhci_host *host; struct sdhci_s3c *sc; - struct resource *res; int ret, irq, ptr, clks; if (!pdev->dev.platform_data && !pdev->dev.of_node) { @@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_pdata_io_clk; } else { memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); - sc->ext_cd_gpio = -1; /* invalid gpio number */ } drv_data = sdhci_s3c_get_driver_data(pdev); @@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_no_busclks; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err_req_regs; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index e43143223320..f4b05dd6c20a 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev) * We must request the IRQ after sdhci_add_host(), as the tasklet only * gets setup in sdhci_add_host() and we oops. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto err_request_cd; if (!ret) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 916b5b09c3d1..b4b63089a4e2 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = { static int sdhci_probe(struct platform_device *pdev) { struct sdhci_host *host; - struct resource *iomem; struct spear_sdhci *sdhci; struct device *dev; int ret; @@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev) goto err; } - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret); @@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev) * It is optional to use GPIOs for sdhci card detection. If we * find a descriptor using slot GPIO, we use it. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto disable_clk; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 7bc950520fd9..403ac44a7378 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; - if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 1b1c26da3fe0..63db84481dff 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -10,6 +10,7 @@ */ #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/ktime.h> #include <linux/highmem.h> #include <linux/io.h> @@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } -static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) { if (enable) host->ier |= SDHCI_INT_DATA_TIMEOUT; @@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } +EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); -static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - u8 count; - - if (host->ops->set_timeout) { - host->ops->set_timeout(host, cmd); - } else { - bool too_big = false; - - count = sdhci_calc_timeout(host, cmd, &too_big); - - if (too_big && - host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { - sdhci_calc_sw_timeout(host, cmd); - sdhci_set_data_timeout_irq(host, false); - } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { - sdhci_set_data_timeout_irq(host, true); - } - - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + bool too_big = false; + u8 count = sdhci_calc_timeout(host, cmd, &too_big); + + if (too_big && + host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { + sdhci_calc_sw_timeout(host, cmd); + sdhci_set_data_timeout_irq(host, false); + } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { + sdhci_set_data_timeout_irq(host, true); } + + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); } +EXPORT_SYMBOL_GPL(__sdhci_set_timeout); -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - struct mmc_data *data = cmd->data; - - host->data_timeout = 0; - - if (sdhci_data_line_cmd(cmd)) - sdhci_set_timeout(host, cmd); - - if (!data) - return; + if (host->ops->set_timeout) + host->ops->set_timeout(host, cmd); + else + __sdhci_set_timeout(host, cmd); +} +static void sdhci_initialize_data(struct sdhci_host *host, + struct mmc_data *data) +{ WARN_ON(host->data); /* Sanity checks */ @@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) host->data = data; host->data_early = 0; host->data->bytes_xfered = 0; +} + +static inline void sdhci_set_block_info(struct sdhci_host *host, + struct mmc_data *data) +{ + /* Set the DMA boundary value and block size */ + sdhci_writew(host, + SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), + SDHCI_BLOCK_SIZE); + /* + * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count + * can be supported, in that case 16-bit block count register must be 0. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { + if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) + sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); + sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); + } else { + sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + } +} + +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { struct scatterlist *sg; @@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_set_transfer_irqs(host); - /* Set the DMA boundary value and block size */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), - SDHCI_BLOCK_SIZE); + sdhci_set_block_info(host, data); +} - /* - * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count - * can be supported, in that case 16-bit block count register must be 0. - */ - if (host->version >= SDHCI_SPEC_410 && host->v4_mode && - (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { - if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) - sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); - sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + +static int sdhci_external_dma_init(struct sdhci_host *host) +{ + int ret = 0; + struct mmc_host *mmc = host->mmc; + + host->tx_chan = dma_request_chan(mmc->parent, "tx"); + if (IS_ERR(host->tx_chan)) { + ret = PTR_ERR(host->tx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request TX DMA channel.\n"); + host->tx_chan = NULL; + return ret; + } + + host->rx_chan = dma_request_chan(mmc->parent, "rx"); + if (IS_ERR(host->rx_chan)) { + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + ret = PTR_ERR(host->rx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request RX DMA channel.\n"); + host->rx_chan = NULL; + } + + return ret; +} + +static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + +static int sdhci_external_dma_setup(struct sdhci_host *host, + struct mmc_command *cmd) +{ + int ret, i; + enum dma_transfer_direction dir; + struct dma_async_tx_descriptor *desc; + struct mmc_data *data = cmd->data; + struct dma_chan *chan; + struct dma_slave_config cfg; + dma_cookie_t cookie; + int sg_cnt; + + if (!host->mapbase) + return -EINVAL; + + cfg.src_addr = host->mapbase + SDHCI_BUFFER; + cfg.dst_addr = host->mapbase + SDHCI_BUFFER; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + /* Sanity check: all the SG entries must be aligned by block size. */ + for (i = 0; i < data->sg_len; i++) { + if ((data->sg + i)->length % data->blksz) + return -EINVAL; + } + + chan = sdhci_external_dma_channel(host, data); + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) + return ret; + + sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); + if (sg_cnt <= 0) + return -EINVAL; + + dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + desc->callback = NULL; + desc->callback_param = NULL; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + ret = cookie; + + return ret; +} + +static void sdhci_external_dma_release(struct sdhci_host *host) +{ + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + if (host->rx_chan) { + dma_release_channel(host->rx_chan); + host->rx_chan = NULL; + } + + sdhci_switch_external_dma(host, false); +} + +static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); + + host->flags |= SDHCI_REQ_USE_DMA; + sdhci_set_transfer_irqs(host); + + sdhci_set_block_info(host, data); +} + +static void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (!sdhci_external_dma_setup(host, cmd)) { + __sdhci_external_dma_prepare_data(host, cmd); } else { - sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + sdhci_external_dma_release(host); + pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", + mmc_hostname(host->mmc)); + sdhci_prepare_data(host, cmd); } } +static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct dma_chan *chan; + + if (!cmd->data) + return; + + chan = sdhci_external_dma_channel(host, cmd->data); + if (chan) + dma_async_issue_pending(chan); +} + +#else + +static inline int sdhci_external_dma_init(struct sdhci_host *host) +{ + return -EOPNOTSUPP; +} + +static inline void sdhci_external_dma_release(struct sdhci_host *host) +{ +} + +static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + /* This should never happen */ + WARN_ON_ONCE(1); +} + +static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ +} + +static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return NULL; +} + +#endif + +void sdhci_switch_external_dma(struct sdhci_host *host, bool en) +{ + host->use_external_dma = en; +} +EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); + static inline bool sdhci_auto_cmd12(struct sdhci_host *host, struct mmc_request *mrq) { @@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); } -static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) { int i; - if (host->cmd && host->cmd->mrq == mrq) - host->cmd = NULL; - - if (host->data_cmd && host->data_cmd->mrq == mrq) - host->data_cmd = NULL; - - if (host->data && host->data->mrq == mrq) - host->data = NULL; - - if (sdhci_needs_reset(host, mrq)) - host->pending_reset = true; - for (i = 0; i < SDHCI_MAX_MRQS; i++) { if (host->mrqs_done[i] == mrq) { WARN_ON(1); @@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) } WARN_ON(i >= SDHCI_MAX_MRQS); +} + +static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +{ + if (host->cmd && host->cmd->mrq == mrq) + host->cmd = NULL; + + if (host->data_cmd && host->data_cmd->mrq == mrq) + host->data_cmd = NULL; + + if (host->data && host->data->mrq == mrq) + host->data = NULL; + + if (sdhci_needs_reset(host, mrq)) + host->pending_reset = true; + + sdhci_set_mrq_done(host, mrq); sdhci_del_timer(host, mrq); @@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host) /* * Need to send CMD12 if - - * a) open-ended multiblock transfer (no CMD23) + * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) * b) error in multiblock transfer */ if (data->stop && - (data->error || - !data->mrq->sbc)) { + ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || + data->error)) { /* * 'cap_cmd_during_tfr' request must not use the command line * after mmc_command_done() has been called. It is upper layer's @@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) } host->cmd = cmd; + host->data_timeout = 0; if (sdhci_data_line_cmd(cmd)) { WARN_ON(host->data_cmd); host->data_cmd = cmd; + sdhci_set_timeout(host, cmd); } - sdhci_prepare_data(host, cmd); + if (cmd->data) { + if (host->use_external_dma) + sdhci_external_dma_prepare_data(host, cmd); + else + sdhci_prepare_data(host, cmd); + } sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); @@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) timeout += 10 * HZ; sdhci_mod_timer(host, cmd->mrq, timeout); + if (host->use_external_dma) + sdhci_external_dma_pre_transfer(host, cmd); + sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); } EXPORT_SYMBOL_GPL(sdhci_send_command); @@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_led_activate(host); - /* - * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED - * requests if Auto-CMD12 is enabled. - */ - if (sdhci_auto_cmd12(host, mrq)) { - if (mrq->stop) { - mrq->data->stop = NULL; - mrq->stop = NULL; - } - } - if (!present || host->flags & SDHCI_DEVICE_DEAD) { mrq->cmd->error = -ENOMEDIUM; sdhci_finish_mrq(host, mrq); @@ -2661,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host) if (host->flags & SDHCI_REQ_USE_DMA) { struct mmc_data *data = mrq->data; + if (host->use_external_dma && data && + (mrq->cmd->error || data->error)) { + struct dma_chan *chan = sdhci_external_dma_channel(host, data); + + host->mrqs_done[i] = NULL; + spin_unlock_irqrestore(&host->lock, flags); + dmaengine_terminate_sync(chan); + spin_lock_irqsave(&host->lock, flags); + sdhci_set_mrq_done(host, mrq); + } + if (data && data->host_cookie == COOKIE_MAPPED) { if (host->bounce_buffer) { /* @@ -3796,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host) if (sdhci_can_64bit_dma(host)) host->flags |= SDHCI_USE_64_BIT_DMA; + if (host->use_external_dma) { + ret = sdhci_external_dma_init(host); + if (ret == -EPROBE_DEFER) + goto unreg; + /* + * Fall back to use the DMA/PIO integrated in standard SDHCI + * instead of external DMA devices. + */ + else if (ret) + sdhci_switch_external_dma(host, false); + /* Disable internal DMA sources */ + else + host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); + } + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->set_dma_mask) ret = host->ops->set_dma_mask(host); @@ -3822,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host) dma_addr_t dma; void *buf; - if (host->flags & SDHCI_USE_64_BIT_DMA) { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_64_DESC_SZ(host); - host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); - } else { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_32_DESC_SZ; - host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; - } + if (!(host->flags & SDHCI_USE_64_BIT_DMA)) + host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; + else if (!host->alloc_desc_sz) + host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); + + host->desc_sz = host->alloc_desc_sz; + host->adma_table_sz = host->adma_table_cnt * host->desc_sz; host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; /* @@ -3913,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host) if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else if (host->version >= SDHCI_SPEC_300) { - if (host->clk_mul) { - mmc->f_min = (host->max_clk * host->clk_mul) / 1024; + if (host->clk_mul) max_clk = host->max_clk * host->clk_mul; - } else - mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; + /* + * Divided Clock Mode minimum clock rate is always less than + * Programmable Clock Mode minimum clock rate. + */ + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; @@ -4276,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + host->adma_table_sz, host->align_buffer, host->align_addr); + + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } @@ -4321,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host) pr_info("%s: SDHCI controller on %s [%s] using %s\n", mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), + host->use_external_dma ? "External DMA" : (host->flags & SDHCI_USE_ADMA) ? (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); @@ -4409,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) host->adma_table_sz, host->align_buffer, host->align_addr); + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index fe83ece6965b..a6a3ddcf97e7 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -487,6 +487,7 @@ struct sdhci_host { int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ + phys_addr_t mapbase; /* physical address base */ char *bounce_buffer; /* For packing SDMA reads/writes */ dma_addr_t bounce_addr; unsigned int bounce_buffer_size; @@ -535,6 +536,7 @@ struct sdhci_host { bool pending_reset; /* Cmd/data reset is pending */ bool irq_wake_enabled; /* IRQ wakeup is enabled */ bool v4_mode; /* Host Version 4 Enable */ + bool use_external_dma; /* Host selects to use external DMA */ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ struct mmc_command *cmd; /* Current command */ @@ -556,7 +558,8 @@ struct sdhci_host { dma_addr_t adma_addr; /* Mapped ADMA descr. table */ dma_addr_t align_addr; /* Mapped bounce buffer */ - unsigned int desc_sz; /* ADMA descriptor size */ + unsigned int desc_sz; /* ADMA current descriptor size */ + unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */ struct workqueue_struct *complete_wq; /* Request completion wq */ struct work_struct complete_work; /* Request completion work */ @@ -564,6 +567,11 @@ struct sdhci_host { struct timer_list timer; /* Timer for timeouts */ struct timer_list data_timer; /* Timer for data timeouts */ +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; +#endif + u32 caps; /* CAPABILITY_0 */ u32 caps1; /* CAPABILITY_1 */ bool read_caps; /* Capability flags have been read */ @@ -795,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host); void sdhci_reset_tuning(struct sdhci_host *host); void sdhci_send_tuning(struct sdhci_host *host, u32 opcode); void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode); +void sdhci_switch_external_dma(struct sdhci_host *host, bool en); +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index b8e897e31e2e..3afea580fbea 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) writeb(val, host->ioaddr + reg); } +static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err = sdhci_execute_tuning(mmc, opcode); + + if (err) + return err; + /* + * Tuning data remains in the buffer after tuning. + * Do a command and data reset to get rid of it + */ + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + + return 0; +} + +static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + static struct sdhci_ops sdhci_am654_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = { .set_power = sdhci_am654_set_power, .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, + .irq = sdhci_am654_cqhci_irq, .reset = sdhci_reset, }; static const struct sdhci_pltfm_data sdhci_am654_pdata = { .ops = &sdhci_am654_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = { .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT, }; -static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) -{ - int cmd_error = 0; - int data_error = 0; - - if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) - return intmask; - - cqhci_irq(host->mmc, intmask, cmd_error, data_error); - - return 0; -} - static struct sdhci_ops sdhci_j721e_8bit_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { .ops = &sdhci_j721e_8bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { .ops = &sdhci_j721e_4bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev) struct sdhci_am654_data *sdhci_am654; const struct of_device_id *match; struct sdhci_host *host; - struct resource *res; struct clk *clk_xin; struct device *dev = &pdev->dev; void __iomem *base; @@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_disable; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(base)) { ret = PTR_ERR(base); goto pm_runtime_put; @@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_put; } + host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; + ret = sdhci_am654_init(host); if (ret) goto pm_runtime_put; diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index fa0dfc657c22..4625cc071b61 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ctrl = 0, ret = 0; struct f_sdhost_priv *priv; u32 reg = 0; @@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) host->ops = &sdhci_f_sdh30_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 98c575de43c7..7e1fd557109c 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host) host->chan_rx = sh_mmcif_request_dma_pdata(host, pdata->slave_id_rx); } else { - host->chan_tx = dma_request_slave_channel(dev, "tx"); - host->chan_rx = dma_request_slave_channel(dev, "rx"); + host->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->chan_tx)) + host->chan_tx = NULL; + host->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->chan_rx)) + host->chan_rx = NULL; } dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, host->chan_rx); @@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev) struct sh_mmcif_host *host; struct device *dev = &pdev->dev; struct sh_mmcif_plat_data *pd = dev->platform_data; - struct resource *res; void __iomem *reg; const char *name; @@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) if (irq[0] < 0) return -ENXIO; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(dev, res); + reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) return PTR_ERR(reg); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d577a6b0ceae..f87d7967457f 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, if (ret) return ret; - host->reg_base = devm_ioremap_resource(&pdev->dev, - platform_get_resource(pdev, IORESOURCE_MEM, 0)); + host->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->reg_base)) return PTR_ERR(host->reg_base); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index c4a1d49fbea4..1e424bcdbd5f 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, { struct tmio_mmc_host *host; struct mmc_host *mmc; - struct resource *res; void __iomem *ctl; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctl = devm_ioremap_resource(&pdev->dev, res); + ctl = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctl)) return ERR_CAST(ctl); @@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) * Look for a card detect GPIO, if it fails with anything * else than a probe deferral, just live without it. */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 0c72ec5546c3..a1683c49cb90 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -59,7 +59,6 @@ struct uniphier_sd_priv { struct tmio_mmc_data tmio_data; struct pinctrl *pinctrl; - struct pinctrl_state *pinstate_default; struct pinctrl_state *pinstate_uhs; struct clk *clk; struct reset_control *rst; @@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, { struct tmio_mmc_host *host = mmc_priv(mmc); struct uniphier_sd_priv *priv = uniphier_sd_priv(host); - struct pinctrl_state *pinstate; + struct pinctrl_state *pinstate = NULL; u32 val, tmp; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: val = UNIPHIER_SD_VOLT_330; - pinstate = priv->pinstate_default; break; case MMC_SIGNAL_VOLTAGE_180: val = UNIPHIER_SD_VOLT_180; @@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); writel(tmp, host->ctl + UNIPHIER_SD_VOLT); - pinctrl_select_state(priv->pinctrl, pinstate); + if (pinstate) + pinctrl_select_state(priv->pinctrl, pinstate); + else + pinctrl_select_default_state(mmc_dev(mmc)); return 0; } @@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, if (IS_ERR(priv->pinctrl)) return PTR_ERR(priv->pinctrl); - priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(priv->pinstate_default)) - return PTR_ERR(priv->pinstate_default); - priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); if (IS_ERR(priv->pinstate_uhs)) return PTR_ERR(priv->pinstate_uhs); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index b11ac2314328..9a0b1e4e405d 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -199,7 +199,6 @@ struct usdhi6_host { /* Pin control */ struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_uhs; }; @@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) }; int ret; - host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); + host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, host->chan_tx); - if (!host->chan_tx) + if (IS_ERR(host->chan_tx)) { + host->chan_tx = NULL; return; + } cfg.direction = DMA_MEM_TO_DEV; cfg.dst_addr = start + USDHI6_SD_BUF0; @@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) if (ret < 0) goto e_release_tx; - host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); + host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, host->chan_rx); - if (!host->chan_rx) + if (IS_ERR(host->chan_rx)) { + host->chan_rx = NULL; goto e_release_tx; + } cfg.direction = DMA_DEV_TO_MEM; cfg.src_addr = cfg.dst_addr; @@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage) host->pins_uhs); default: - return pinctrl_select_state(host->pinctrl, - host->pins_default); + return pinctrl_select_default_state(mmc_dev(host->mmc)); } } @@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev) } host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); - if (!IS_ERR(host->pins_uhs)) { - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - - if (IS_ERR(host->pins_default)) { - dev_err(dev, - "UHS pinctrl requires a default pin state.\n"); - ret = PTR_ERR(host->pins_default); - goto e_free_mmc; - } - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->base = devm_ioremap_resource(dev, res); diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c index edf94ee54ec7..aa9368bf7a0c 100644 --- a/drivers/mtd/nand/onenand/omap2.c +++ b/drivers/mtd/nand/onenand/omap2.c @@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) unsigned long timeout; u32 syscfg; - if (state == FL_RESETING || state == FL_PREPARING_ERASE || + if (state == FL_RESETTING || state == FL_PREPARING_ERASE || state == FL_VERIFYING_ERASE) { int i = 21; unsigned int intr_flags = ONENAND_INT_MASTER; switch (state) { - case FL_RESETING: + case FL_RESETTING: intr_flags |= ONENAND_INT_RESET; break; case FL_PREPARING_ERASE: @@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c, struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; - tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0); + tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!tx) { dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n"); return -EIO; @@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; xtra = count & 3; @@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); @@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev) c->gpmc_cs, c->phys_base, c->onenand.base, c->dma_chan ? "DMA" : "PIO"); - if ((r = onenand_scan(&c->mtd, 1)) < 0) + r = onenand_scan(&c->mtd, 1); + if (r < 0) goto err_release_dma; freq = omap2_onenand_get_freq(c->onenand.version_id); diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index 77bd32a683e1..85640ee11c86 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); status &= 0x60; @@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); } else { ops.mode = MTD_OPS_PLACE_OOB; ops.ooblen = len; @@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd) this->boundary[die] = bdry & FLEXONENAND_PI_MASK; this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); printk(KERN_INFO "Die %d boundary: %d%s\n", die, this->boundary[die], locked ? "(Locked)" : "(Unlocked)"); @@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die, ret = this->wait(mtd, FL_WRITING); out: this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); if (!ret) /* Recalculate device size on boundary change*/ flexonenand_get_size(mtd); @@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd) /* Reset OneNAND to read default register values */ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM); /* Wait reset */ - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); /* Restore system configuration 1 */ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); diff --git a/drivers/mtd/nand/onenand/samsung_mtd.c b/drivers/mtd/nand/onenand/samsung_mtd.c index 55e5536a5850..beb7987e4c2b 100644 --- a/drivers/mtd/nand/onenand/samsung_mtd.c +++ b/drivers/mtd/nand/onenand/samsung_mtd.c @@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, normal: if (count != mtd->writesize) { /* Copy the bufferram to memory to prevent unaligned access */ - memcpy(this->page_buf, p, mtd->writesize); - p = this->page_buf + offset; + memcpy_fromio(this->page_buf, p, mtd->writesize); + memcpy(buffer, this->page_buf + offset, count); + } else { + memcpy_fromio(buffer, p, count); } - memcpy(buffer, p, count); - return 0; } diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index 3a36285a8d8a..f6c7102a1e32 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -914,8 +914,8 @@ static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl) /* Prepare CDMA descriptor. */ static void cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, - char nf_mem, u32 flash_ptr, char *mem_ptr, - char *ctrl_data_ptr, u16 ctype) + char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr, + dma_addr_t ctrl_data_ptr, u16 ctype) { struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc; @@ -931,13 +931,13 @@ cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, cdma_desc->command_flags |= CDMA_CF_DMA_MASTER; cdma_desc->command_flags |= CDMA_CF_INT; - cdma_desc->memory_pointer = (uintptr_t)mem_ptr; + cdma_desc->memory_pointer = mem_ptr; cdma_desc->status = 0; cdma_desc->sync_flag_pointer = 0; cdma_desc->sync_arguments = 0; cdma_desc->command_type = ctype; - cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr; + cdma_desc->ctrl_data_ptr = ctrl_data_ptr; } static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl, @@ -1280,8 +1280,7 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr, } cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page, - (void *)dma_buf, (void *)dma_ctrl_dat, - ctype); + dma_buf, dma_ctrl_dat, ctype); status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); @@ -1360,7 +1359,7 @@ static int cadence_nand_erase(struct nand_chip *chip, u32 page) cadence_nand_cdma_desc_prepare(cdns_ctrl, cdns_chip->cs[chip->cur_cs], - page, NULL, NULL, + page, 0, 0, CDMA_CT_ERASE); status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); if (status) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 334fe3130285..b9d5d55a5edb 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this) struct resources *r = &this->resources; int ret; + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) + return ret; + ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) goto err_out; @@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this) */ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); - return 0; err_out: + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); return ret; } @@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev) return ret; } + /* Set flag to get timing setup restored for next exec_op */ + if (this->hw.clk_rate) + this->hw.must_apply_timings = true; + /* re-init the BCH registers */ ret = bch_set_geometry(this); if (ret) { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 9e63800f768a..3ba73f18841f 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -37,6 +37,7 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) +#define FMC2_TIMEOUT_US 1000 #define FMC2_TIMEOUT_MS 1000 /* Timings */ @@ -53,6 +54,8 @@ #define FMC2_PMEM 0x88 #define FMC2_PATT 0x8c #define FMC2_HECCR 0x94 +#define FMC2_ISR 0x184 +#define FMC2_ICR 0x188 #define FMC2_CSQCR 0x200 #define FMC2_CSQCFGR1 0x204 #define FMC2_CSQCFGR2 0x208 @@ -118,6 +121,12 @@ #define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) #define FMC2_PATT_DEFAULT 0x0a0a0a0a +/* Register: FMC2_ISR */ +#define FMC2_ISR_IHLF BIT(1) + +/* Register: FMC2_ICR */ +#define FMC2_ICR_CIHLF BIT(1) + /* Register: FMC2_CSQCR */ #define FMC2_CSQCR_CSQSTART BIT(0) @@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, stm32_fmc2_set_buswidth_16(fmc2, true); } +static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + const struct nand_sdr_timings *timings; + u32 isr, sr; + + /* Check if there is no pending requests to the NAND flash */ + if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, + sr & FMC2_SR_NWRF, 1, + FMC2_TIMEOUT_US)) + dev_warn(fmc2->dev, "Waitrdy timeout\n"); + + /* Wait tWB before R/B# signal is low */ + timings = nand_get_sdr_timings(&chip->data_interface); + ndelay(PSEC_TO_NSEC(timings->tWB_max)); + + /* R/B# signal is low, clear high level flag */ + writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); + + /* Wait R/B# signal is high */ + return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, + isr, isr & FMC2_ISR_IHLF, + 5, 1000 * timeout_ms); +} + static int stm32_fmc2_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) @@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, break; case NAND_OP_WAITRDY_INSTR: - ret = nand_soft_waitrdy(chip, - instr->ctx.waitrdy.timeout_ms); + ret = stm32_fmc2_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); break; } } diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 4744bf94ad9a..b9f272408c4d 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -247,7 +247,8 @@ static int sm_read_sector(struct sm_ftl *ftl, /* FTL can contain -1 entries that are by default filled with bits */ if (block == -1) { - memset(buffer, 0xFF, SM_SECTOR_SIZE); + if (buffer) + memset(buffer, 0xFF, SM_SECTOR_SIZE); return 0; } diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index f4afe123e9dc..b0cd443dd758 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -2124,6 +2124,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) return 0; + nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; + return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); } @@ -4596,6 +4598,7 @@ static void sst_set_default_init(struct spi_nor *nor) static void st_micron_set_default_init(struct spi_nor *nor) { nor->flags |= SNOR_F_HAS_LOCK; + nor->flags &= ~SNOR_F_HAS_16BIT_SR; nor->params.quad_enable = NULL; nor->params.set_4byte = st_micron_set_4byte; } @@ -4768,9 +4771,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor) static void spansion_post_sfdp_fixups(struct spi_nor *nor) { - struct mtd_info *mtd = &nor->mtd; - - if (mtd->size <= SZ_16M) + if (nor->params.size <= SZ_16M) return; nor->flags |= SNOR_F_4B_OPCODES; diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index 4e1789ea2bc3..eacd428e07e9 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -102,6 +102,7 @@ #define TCAN4X5X_MODE_NORMAL BIT(7) #define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) +#define TCAN4X5X_DISABLE_INH_MSK BIT(9) #define TCAN4X5X_SW_RESET BIT(2) @@ -166,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) } } +static int tcan4x5x_reset(struct tcan4x5x_priv *priv) +{ + int ret = 0; + + if (priv->reset_gpio) { + gpiod_set_value(priv->reset_gpio, 1); + + /* tpulse_width minimum 30us */ + usleep_range(30, 100); + gpiod_set_value(priv->reset_gpio, 0); + } else { + ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_SW_RESET); + if (ret) + return ret; + } + + usleep_range(700, 1000); + + return ret; +} + static int regmap_spi_gather_write(void *context, const void *reg, size_t reg_len, const void *val, size_t val_len) @@ -348,14 +371,23 @@ static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) TCAN4X5X_DISABLE_WAKE_MSK, 0x00); } +static int tcan4x5x_disable_state(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_INH_MSK, 0x01); +} + static int tcan4x5x_parse_config(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + int ret; tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", GPIOD_OUT_HIGH); if (IS_ERR(tcan4x5x->device_wake_gpio)) { - if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) return -EPROBE_DEFER; tcan4x5x_disable_wake(cdev); @@ -366,18 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev) if (IS_ERR(tcan4x5x->reset_gpio)) tcan4x5x->reset_gpio = NULL; - usleep_range(700, 1000); + ret = tcan4x5x_reset(tcan4x5x); + if (ret) + return ret; tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, "device-state", GPIOD_IN); - if (IS_ERR(tcan4x5x->device_state_gpio)) + if (IS_ERR(tcan4x5x->device_state_gpio)) { tcan4x5x->device_state_gpio = NULL; - - tcan4x5x->power = devm_regulator_get_optional(cdev->dev, - "vsup"); - if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) - return -EPROBE_DEFER; + tcan4x5x_disable_state(cdev); + } return 0; } @@ -412,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi) if (!priv) return -ENOMEM; + priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); + if (PTR_ERR(priv->power) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else + priv->power = NULL; + mcan_class->device_data = priv; m_can_class_get_clocks(mcan_class); @@ -451,11 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi) priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, &spi->dev, &tcan4x5x_regmap); - ret = tcan4x5x_parse_config(mcan_class); + ret = tcan4x5x_power_enable(priv->power, 1); if (ret) goto out_clk; - tcan4x5x_power_enable(priv->power, 1); + ret = tcan4x5x_parse_config(mcan_class); + if (ret) + goto out_power; + + ret = tcan4x5x_init(mcan_class); + if (ret) + goto out_power; ret = m_can_class_register(mcan_class); if (ret) diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 8caf7af0dee2..99101d7027a8 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) struct net_device *dev = napi->dev; struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; - int npackets = 0; - int ret = 1; + int work_done = 0; struct sk_buff *skb; struct can_frame *frame; u8 canrflg; - while (npackets < quota) { + while (work_done < quota) { canrflg = in_8(®s->canrflg); if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) break; @@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) stats->rx_packets++; stats->rx_bytes += frame->can_dlc; - npackets++; + work_done++; netif_receive_skb(skb); } - if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { - napi_complete(&priv->napi); - clear_bit(F_RX_PROGRESS, &priv->flags); - if (priv->can.state < CAN_STATE_BUS_OFF) - out_8(®s->canrier, priv->shadow_canrier); - ret = 0; + if (work_done < quota) { + if (likely(napi_complete_done(&priv->napi, work_done))) { + clear_bit(F_RX_PROGRESS, &priv->flags); + if (priv->can.state < CAN_STATE_BUS_OFF) + out_8(®s->canrier, priv->shadow_canrier); + } } - return ret; + return work_done; } static irqreturn_t mscan_isr(int irq, void *dev_id) diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 2e57122f02fb..2f5c287eac95 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work) */ static void slcan_write_wakeup(struct tty_struct *tty) { - struct slcan *sl = tty->disc_data; + struct slcan *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } /* Send a can_frame to a TTY queue. */ @@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* Flush network side */ diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 2f74f6704c12..a4b4b742c80c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, hconf, sizeof(*hconf), 1000); @@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, dconf, sizeof(*dconf), 1000); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 5fc0be564274..7ab87a758754 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *ep; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { ep = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index ae4c37e1bb75..1b9957f12459 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *endpoint; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e43040c9f9ee..3e8635311d0d 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -68,7 +68,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); - reg |= (MII_SW_OR | LINK_STS); + reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G); core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 120a65d3e3ef..b016cc205f81 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) { u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST; + /* Use the default high priority for management frames sent to + * the CPU. + */ + port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI; + return mv88e6390_g1_monitor_write(chip, ptr, port); } diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index bc5a6b2bb1e4..5324c6f4ae90 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -211,6 +211,7 @@ #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000 +#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff /* Offset 0x1C: Global Control 2 */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 7fe256c5739d..0b43c650e100 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port) } static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, - phy_interface_t mode) + phy_interface_t mode, bool force) { u8 lane; u16 cmode; @@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } - /* cmode doesn't change, nothing to do for us */ - if (cmode == chip->ports[port].cmode) + /* cmode doesn't change, nothing to do for us unless forced */ + if (cmode == chip->ports[port].cmode && !force) return 0; lane = mv88e6xxx_serdes_get_lane(chip, port); @@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (port != 9 && port != 10) return -EOPNOTSUPP; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, @@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, break; } - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, @@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, true); } int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 1da5ac111499..bb91f3d17cf2 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -582,7 +582,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, struct device *dev = &priv->spidev->dev; struct device_node *child; - for_each_child_of_node(ports_node, child) { + for_each_available_child_of_node(ports_node, child) { struct device_node *phy_node; phy_interface_t phy_mode; u32 index; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index a17a4da7bc15..c85e3e29012c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -403,6 +403,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; + aq_nic_set_loopback(self); + err = self->aq_hw_ops->hw_start(self->aq_hw); if (err < 0) goto err_exit; @@ -413,8 +415,6 @@ int aq_nic_start(struct aq_nic_s *self) INIT_WORK(&self->service_task, aq_nic_service_task); - aq_nic_set_loopback(self); - timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); aq_nic_service_timer_cb(&self->service_timer); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 58e891af6e09..ec041f78d063 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -1525,9 +1525,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .rx_extract_ts = hw_atl_b0_rx_extract_ts, .extract_hwts = hw_atl_b0_extract_hwts, .hw_set_offload = hw_atl_b0_hw_offload_set, - .hw_get_hw_stats = hw_atl_utils_get_hw_stats, - .hw_get_fw_version = hw_atl_utils_get_fw_version, - .hw_set_offload = hw_atl_b0_hw_offload_set, .hw_set_loopback = hw_atl_b0_set_loopback, .hw_set_fc = hw_atl_b0_set_fc, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 8910b62e67ed..f547baa6c954 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -667,9 +667,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) u32 speed; mpi_state = hw_atl_utils_mpi_get_state(self); - speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | - FW2X_RATE_2G5 | FW2X_RATE_5G | - FW2X_RATE_10G); + speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT; if (!speed) { link_status->mbps = 0U; diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 035dbb1b2c98..ec25fd81985d 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) int ethaddr_bytes = ETH_ALEN; memset(ppattern + offset, 0xff, magicsync); - for (j = 0; j < magicsync; j++) - set_bit(len++, (unsigned long *) pmask); + for (j = 0; j < magicsync; j++) { + pmask[len >> 3] |= BIT(len & 7); + len++; + } for (j = 0; j < B44_MAX_PATTERNS; j++) { if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) @@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) for (k = 0; k< ethaddr_bytes; k++) { ppattern[offset + magicsync + (j * ETH_ALEN) + k] = macaddr[k]; - set_bit(len++, (unsigned long *) pmask); + pmask[len >> 3] |= BIT(len & 7); + len++; } } return len - 1; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 825af709708e..d6b1a153f9df 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; - priv->ring_map[q + port * num_tx_queues] = ring; + priv->ring_map[qp + port * num_tx_queues] = ring; qp++; } @@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, struct net_device *slave_dev; unsigned int num_tx_queues; struct net_device *dev; - unsigned int q, port; + unsigned int q, qp, port; priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); if (priv->netdev != info->master) @@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, continue; ring->inspect = false; - priv->ring_map[q + port * num_tx_queues] = NULL; + qp = ring->switch_queue; + priv->ring_map[qp + port * num_tx_queues] = NULL; } return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c779f9cf8822..e6f18f6070ef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11065,11 +11065,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys; - if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && - keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && - keys1->ports.ports == keys2->ports.ports && - keys1->basic.ip_proto == keys2->basic.ip_proto && - keys1->basic.n_proto == keys2->basic.n_proto && + if (keys1->basic.n_proto != keys2->basic.n_proto || + keys1->basic.ip_proto != keys2->basic.ip_proto) + return false; + + if (keys1->basic.n_proto == htons(ETH_P_IP)) { + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) + return false; + } else { + if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, + sizeof(keys1->addrs.v6addrs.src)) || + memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, + sizeof(keys1->addrs.v6addrs.dst))) + return false; + } + + if (keys1->ports.ports == keys2->ports.ports && keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) @@ -11361,7 +11373,7 @@ int bnxt_get_port_parent_id(struct net_device *dev, return -EOPNOTSUPP; /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP; ppid->id_len = sizeof(bp->switch_id); @@ -11734,6 +11746,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) put_unaligned_le32(dw, &dsn[0]); pci_read_config_dword(pdev, pos + 4, &dw); put_unaligned_le32(dw, &dsn[4]); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; } @@ -11845,9 +11858,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_PF(bp)) { /* Read the adapter's DSN to use as the eswitch switch_id */ - rc = bnxt_pcie_dsn_get(bp, bp->switch_id); - if (rc) - goto init_err_pci_clean; + bnxt_pcie_dsn_get(bp, bp->switch_id); } /* MTU range: 60 - FW defined max */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 505af5cfb1bd..f14335433a64 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1532,6 +1532,7 @@ struct bnxt { #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_MULTI_HOST 0x100000 + #define BNXT_FLAG_DSN_VALID 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 @@ -1936,9 +1937,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type) case HWRM_CFA_ENCAP_RECORD_FREE: case HWRM_CFA_DECAP_FILTER_ALLOC: case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_ALLOC: - case HWRM_CFA_NTUPLE_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_CFG: case HWRM_CFA_EM_FLOW_ALLOC: case HWRM_CFA_EM_FLOW_FREE: case HWRM_CFA_EM_FLOW_CFG: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index f9bf7d7250ab..b010b34cdaf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp) struct net_device *dev; int rc, i; + if (!(bp->flags & BNXT_FLAG_DSN_VALID)) + return -ENODEV; + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); if (!bp->vf_reps) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 120fa05a39ff..0a8624be44a9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, DMA_END_ADDR); /* Initialize Tx NAPI */ - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, - NAPI_POLL_WEIGHT); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); } /* Initialize a RDMA ring */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index c5ee363ca5dc..f7d87c71aaa9 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -611,21 +611,24 @@ static const struct phylink_mac_ops macb_phylink_ops = { .mac_link_up = macb_mac_link_up, }; +static bool macb_phy_handle_exists(struct device_node *dn) +{ + dn = of_parse_phandle(dn, "phy-handle", 0); + of_node_put(dn); + return dn != NULL; +} + static int macb_phylink_connect(struct macb *bp) { + struct device_node *dn = bp->pdev->dev.of_node; struct net_device *dev = bp->dev; struct phy_device *phydev; int ret; - if (bp->pdev->dev.of_node && - of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) { - ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node, - 0); - if (ret) { - netdev_err(dev, "Could not attach PHY (%d)\n", ret); - return ret; - } - } else { + if (dn) + ret = phylink_of_phy_connect(bp->phylink, dn, 0); + + if (!dn || (ret && !macb_phy_handle_exists(dn))) { phydev = phy_find_first(bp->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); @@ -634,10 +637,11 @@ static int macb_phylink_connect(struct macb *bp) /* attach the mac to the phy */ ret = phylink_connect_phy(bp->phylink, phydev); - if (ret) { - netdev_err(dev, "Could not attach to PHY (%d)\n", ret); - return ret; - } + } + + if (ret) { + netdev_err(dev, "Could not attach PHY (%d)\n", ret); + return ret; } phylink_start(bp->phylink); @@ -4088,7 +4092,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, mgmt->rate = 0; mgmt->hw.init = &init; - *tx_clk = clk_register(NULL, &mgmt->hw); + *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); if (IS_ERR(*tx_clk)) return PTR_ERR(*tx_clk); @@ -4416,7 +4420,6 @@ err_out_free_netdev: err_disable_clocks: clk_disable_unprepare(tx_clk); - clk_unregister(tx_clk); clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); clk_disable_unprepare(rx_clk); @@ -4446,7 +4449,6 @@ static int macb_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { clk_disable_unprepare(bp->tx_clk); - clk_unregister(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); clk_disable_unprepare(bp->rx_clk); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 2824b5ed4ced..883cfa9c4b6d 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (!is_offload(adapter)) return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; if (!(adapter->flags & FULL_INIT_DONE)) return -EIO; /* need the memory controllers */ if (copy_from_user(&t, useraddr, sizeof(t))) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index aca9f7a20a2a..4144c230dc97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos) static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos) { v = seq_tab_get_idx(seq->private, *pos + 1); - if (v) - ++*pos; + ++(*pos); return v; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 12ff69b3ba91..0dedd3e9c31e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3135,9 +3135,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; + struct ch_sched_queue qe = { 0 }; + struct ch_sched_params p = { 0 }; struct sched_class *e; - struct ch_sched_params p; - struct ch_sched_queue qe; u32 req_rate; int err = 0; @@ -3154,6 +3154,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return -EINVAL; } + qe.queue = index; + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { + dev_err(adap->pdev_dev, + "Queue %u already bound to class %u of type: %u\n", + index, e->idx, e->info.u.params.level); + return -EBUSY; + } + /* Convert from Mbps to Kbps */ req_rate = rate * 1000; @@ -3183,7 +3192,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return 0; /* Fetch any available unused or matching scheduling class */ - memset(&p, 0, sizeof(p)); p.type = SCHED_CLASS_TYPE_PACKET; p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; p.u.params.mode = SCHED_CLASS_MODE_CLASS; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 102b370fbd3e..6d485803ddbe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, struct flow_action *actions = &cls->rule->action; struct port_info *pi = netdev2pinfo(dev); struct flow_action_entry *entry; + struct ch_sched_queue qe; + struct sched_class *e; u64 max_link_rate; u32 i, speed; int ret; @@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, } } + for (i = 0; i < pi->nqsets; i++) { + memset(&qe, 0, sizeof(qe)); + qe.queue = i; + + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) { + NL_SET_ERR_MSG_MOD(extack, + "Some queues are already bound to different class"); + return -EBUSY; + } + } + return 0; } +static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc) +{ + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + int ret; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = tc; + ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE); + if (ret) + goto out_free; + } + + return 0; + +out_free: + while (i--) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } + + return ret; +} + +static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } +} + static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct tc_cls_matchall_offload *cls) { @@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct adapter *adap = netdev2adap(dev); struct flow_action_entry *entry; struct sched_class *e; + int ret; u32 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; @@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, return -ENOMEM; } + ret = cxgb4_matchall_tc_bind_queues(dev, e->idx); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Could not bind queues to traffic class"); + goto out_free; + } + tc_port_matchall->egress.hwtc = e->idx; tc_port_matchall->egress.cookie = cls->cookie; tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; + +out_free: + cxgb4_sched_class_free(dev, e->idx); + return ret; } static void cxgb4_matchall_free_tc(struct net_device *dev) @@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev) struct adapter *adap = netdev2adap(dev); tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + cxgb4_matchall_tc_unbind_queues(dev); cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index 8971dddcdb7a..ec3eb45ee3b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); u32 speed, qcount = 0, qoffset = 0; + u32 start_a, start_b, end_a, end_b; int ret; - u8 i; + u8 i, j; if (!mqprio->qopt.num_tc) return 0; @@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev, qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset); qcount += mqprio->qopt.count[i]; + start_a = mqprio->qopt.offset[i]; + end_a = start_a + mqprio->qopt.count[i] - 1; + for (j = i + 1; j < mqprio->qopt.num_tc; j++) { + start_b = mqprio->qopt.offset[j]; + end_b = start_b + mqprio->qopt.count[j] - 1; + + /* If queue count is 0, then the traffic + * belonging to this class will not use + * ETHOFLD queues. So, no need to validate + * further. + */ + if (!mqprio->qopt.count[i]) + break; + + if (!mqprio->qopt.count[j]) + continue; + + if (max_t(u32, start_a, start_b) <= + min_t(u32, end_a, end_b)) { + netdev_err(dev, + "Queues can't overlap across tc\n"); + return -EINVAL; + } + } + /* Convert byte per second to bits per second */ min_rate += (mqprio->min_rate[i] * 8); max_rate += (mqprio->max_rate[i] * 8); diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index e9e45006632d..1a16449e9deb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) { v = l2t_get_idx(seq, *pos); - if (v) - ++*pos; + ++(*pos); return v; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 3e61bd5d0c29..cebe1412d960 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi, return found; } +struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p) +{ + struct port_info *pi = netdev2pinfo(dev); + struct sched_queue_entry *qe = NULL; + struct adapter *adap = pi->adapter; + struct sge_eth_txq *txq; + + if (p->queue < 0 || p->queue >= pi->nqsets) + return NULL; + + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; + qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); + return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL; +} + static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) { struct sched_queue_entry *qe = NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index e92ff68bdd0a..5cc74a5a1774 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id) return true; } +struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p); int cxgb4_sched_class_bind(struct net_device *dev, void *arg, enum sched_bind_type type); int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 05c1899f6628..9294027e9d90 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + struct device *dev = &fep->pdev->dev; u32 *buf = (u32 *)regbuf; u32 i, off; + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return; regs->version = fec_enet_register_version; @@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev, off >>= 2; buf[off] = readl(&theregs[off]); } + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } static int fec_enet_get_ts_info(struct net_device *ndev, diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 41c6fa200e74..e1901874c19f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -110,7 +110,7 @@ do { \ /* Interface Mode Register (IF_MODE) */ #define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */ -#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */ +#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */ #define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */ #define IF_MODE_RGMII 0x00000004 #define IF_MODE_RGMII_AUTO 0x00008000 @@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, tmp = 0; switch (phy_if) { case PHY_INTERFACE_MODE_XGMII: - tmp |= IF_MODE_XGMII; + tmp |= IF_MODE_10G; break; default: tmp |= IF_MODE_GMII; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index e03b30c60dcf..c82c85ef5fb3 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -49,6 +49,7 @@ struct tgec_mdio_controller { struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; + bool has_a011043; }; static u32 xgmac_read32(void __iomem *regs, @@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; /* Return all Fs if nothing was there */ - if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { + if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && + !priv->has_a011043) { dev_err(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); @@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev) priv->is_little_endian = of_property_read_bool(pdev->dev.of_node, "little-endian"); + priv->has_a011043 = of_property_read_bool(pdev->dev.of_node, + "fsl,erratum-a011043"); + ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "cannot register MDIO bus\n"); diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index edec61dfc868..9f52e72ff641 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, rx->cnt = cnt; rx->fill_cnt += work_done; - /* restock desc ring slots */ - dma_wmb(); /* Ensure descs are visible before ringing doorbell */ gve_rx_write_doorbell(priv, rx); return gve_rx_work_pending(rx); } diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index f4889431f9b7..d0244feb0301 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) * may have added descriptors without ringing the doorbell. */ - /* Ensure tx descs from a prior gve_tx are visible before - * ringing doorbell. - */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } @@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) return NETDEV_TX_OK; - /* Ensure tx descs are visible before ringing doorbell */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 14ab20491fd0..eb69e5c81a4d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { - netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } @@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) container_of(napi, struct hns_nic_ring_data, napi); struct hnae_ring *ring = ring_data->ring; -try_again: clean_complete += ring_data->poll_one( ring_data, budget - clean_complete, ring_data->ex_process); @@ -1066,7 +1064,7 @@ try_again: napi_complete(napi); ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); } else { - goto try_again; + return budget; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 69545dd6c938..b3deb5e5ce29 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -54,6 +54,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); #define HNS3_INNER_VLAN_TAG 1 #define HNS3_OUTER_VLAN_TAG 2 +#define HNS3_MIN_TX_LEN 33U + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -1405,6 +1407,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) int bd_num = 0; int ret; + /* Hardware can only handle short frames above 32 bytes */ + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) + return NETDEV_TX_OK; + /* Prefetch the data used later */ prefetch(skb->data); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6c51b1bad8c4..37a2314d3e6b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -185,13 +185,12 @@ struct e1000_phy_regs { /* board specific private data structure */ struct e1000_adapter { + struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct timer_list blink_timer; struct work_struct reset_task; - struct delayed_work watchdog_task; - - struct workqueue_struct *e1000_workqueue; + struct work_struct watchdog_task; const struct e1000_info *ei; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fe7997c18a10..7c5b18d87b49 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) napi_synchronize(&adapter->napi); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); spin_lock(&adapter->stats64_lock); @@ -5155,11 +5153,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) } } +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(struct timer_list *t) +{ + struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, - watchdog_task.work); + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@ -5407,9 +5419,8 @@ link_up: /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, - round_jiffies(2 * HZ)); + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); } #define E1000_TX_FLAGS_CSUM 0x00000001 @@ -7449,21 +7460,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, - e1000e_driver_name); - - if (!adapter->e1000_workqueue) { - err = -ENOMEM; - goto err_workqueue; - } - - INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task); - queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task, - 0); - + timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); @@ -7557,9 +7558,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_register: - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); -err_workqueue: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: @@ -7604,17 +7602,15 @@ static void e1000_remove(struct pci_dev *pdev) * from being rescheduled. */ set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->downshift_task); cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); - cancel_delayed_work(&adapter->watchdog_task); - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); - if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 9f0a4e92a231..37514a75f928 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw) (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; /* fall through */ default: break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index d4055037af89..45b90eb11adb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { - hw_dbg(hw, "Buffer to small for PBA data.\n"); + hw_dbg(hw, "Buffer too small for PBA data.\n"); return I40E_ERR_PARAM; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6a3f0fc56c3b..69523ac85639 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2322,6 +2322,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, } /** + * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL + * @vqs: virtchnl_queue_select structure containing bitmaps to validate + * + * Returns true if validation was successful, else false. + */ +static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) +{ + if ((!vqs->rx_queues && !vqs->tx_queues) || + vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || + vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) + return false; + + return true; +} + +/** * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } - if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { + if (i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } - if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) || - vqs->rx_queues > I40E_MAX_VF_QUEUES || - vqs->tx_queues > I40E_MAX_VF_QUEUES) { + if (i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; } diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 29de3ae96ef2..bd1b1ed323f4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 821987da5698..8e16be960e96 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, * * Returns ptr to the filter object or NULL when no memory available. **/ -static struct -iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, - const u8 *macaddr) +struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { struct iavf_mac_filter *f; @@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work) struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; + struct iavf_mac_filter *f, *ftmp; struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; - struct iavf_mac_filter *f; u32 reg_val; int i = 0, err; bool running; @@ -2181,6 +2180,16 @@ continue_reset: spin_lock_bh(&adapter->mac_vlan_list_lock); + /* Delete filter for the current MAC address, it could have + * been changed by the PF via administratively set MAC. + * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. + */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { + list_del(&f->list); + kfree(f); + } + } /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index c46770eba320..1ab9cb339acb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } + spin_lock_bh(&adapter->mac_vlan_list_lock); + iavf_add_filter(adapter, adapter->hw.mac.addr); + spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_process_config(adapter); } break; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 8a6ef3514129..438b42ce2cd9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) dev_spec->module_plugged = true; if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { hw->phy.media_type = e1000_media_type_internal_serdes; - } else if (eth_flags->e100_base_fx) { + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { dev_spec->sgmii_active = true; hw->phy.media_type = e1000_media_type_internal_serdes; } else if (eth_flags->e1000_base_t) { @@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; } - /* do not change link mode for 100BaseFX */ - if (dev_spec->eth_flags.e100_base_fx) - break; - /* change current link mode setting */ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; - if (hw->phy.media_type == e1000_media_type_copper) + if (dev_spec->sgmii_active) ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; else ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 4690d6c87f39..445fbdce3e25 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev, advertising &= ~ADVERTISED_1000baseKX_Full; } } - if (eth_flags->e100_base_fx) { + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { supported |= SUPPORTED_100baseT_Full; advertising |= ADVERTISED_100baseT_Full; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 82a30b597cf9..a2b2ad1f60b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; - u64 action; + u8 queue; spin_lock(&adapter->fdir_perfect_lock); @@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { - action = filter->action; - if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) - action = - (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + if (filter->action == IXGBE_FDIR_DROP_QUEUE) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && (ring >= adapter->num_rx_queues)) { + e_err(drv, "FDIR restore failed without VF, ring: %u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[action]->reg_idx); + &filter->filter, filter->sw_idx, queue); } spin_unlock(&adapter->fdir_perfect_lock); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 076f2da36f27..64ec0e7c64b4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) struct ixgbe_hw *hw = &adapter->hw; int count = 0; - if ((netdev_uc_count(netdev)) > 10) { - pr_err("Too many unicast filters - No Space\n"); - return -ENOSPC; - } - if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 71a872d46bc4..67ad8b8b127d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2081,7 +2081,11 @@ static int mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp) { - u32 ret, act = bpf_prog_run_xdp(prog, xdp); + unsigned int len; + u32 ret, act; + + len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: @@ -2094,9 +2098,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, if (err) { ret = MVNETA_XDP_DROPPED; __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); } else { ret = MVNETA_XDP_REDIR; } @@ -2106,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); break; default: bpf_warn_invalid_xdp_action(act); @@ -2119,8 +2121,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, case XDP_DROP: __page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + len, true); ret = MVNETA_XDP_DROPPED; break; } diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c index eaf08f7ad128..64ed725aec28 100644 --- a/drivers/net/ethernet/mellanox/mlx4/crdump.c +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev) crdump_enable_crspace_access(dev, cr_space); /* Get the available snapshot ID for the dumps */ - id = devlink_region_shapshot_id_get(devlink); + id = devlink_region_snapshot_id_get(devlink); /* Try to capture dumps */ mlx4_crdump_collect_crspace(dev, cr_space, id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 68d593074f6c..d48292ccda29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -122,6 +122,22 @@ enum { #endif }; +#define MLX5E_TTC_NUM_GROUPS 3 +#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) +#define MLX5E_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ + MLX5E_TTC_GROUP2_SIZE +\ + MLX5E_TTC_GROUP3_SIZE) + +#define MLX5E_INNER_TTC_NUM_GROUPS 3 +#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) +#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ + MLX5E_INNER_TTC_GROUP2_SIZE +\ + MLX5E_INNER_TTC_GROUP3_SIZE) + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_table { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 1d6b58860da6..3a975641f902 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv, struct devlink_health_reporter *reporter, char *err_str, struct mlx5e_err_ctx *err_ctx) { - if (!reporter) { - netdev_err(priv->netdev, err_str); + netdev_err(priv->netdev, err_str); + + if (!reporter) return err_ctx->recover(&err_ctx->ctx); - } + return devlink_health_report(reporter, err_str, err_ctx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 778dab1af8fc..f260dd96873b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct tx_sync_info { u64 rcd_sn; - s32 sync_len; + u32 sync_len; int nr_frags; skb_frag_t frags[MAX_SKB_FRAGS]; }; @@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval { static enum mlx5e_ktls_sync_retval tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, - u32 tcp_seq, struct tx_sync_info *info) + u32 tcp_seq, int datalen, struct tx_sync_info *info) { struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; struct tls_record_info *record; int remaining, i = 0; unsigned long flags; + bool ends_before; spin_lock_irqsave(&tx_ctx->lock, flags); record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); @@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, goto out; } - if (unlikely(tcp_seq < tls_record_start_seq(record))) { - ret = tls_record_is_start_marker(record) ? - MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + /* There are the following cases: + * 1. packet ends before start marker: bypass offload. + * 2. packet starts before start marker and ends after it: drop, + * not supported, breaks contract with kernel. + * 3. packet ends before tls record info starts: drop, + * this packet was already acknowledged and its record info + * was released. + */ + ends_before = before(tcp_seq + datalen, tls_record_start_seq(record)); + + if (unlikely(tls_record_is_start_marker(record))) { + ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + goto out; + } else if (ends_before) { + ret = MLX5E_KTLS_SYNC_FAIL; goto out; } @@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, u8 num_wqebbs; int i = 0; - ret = tx_sync_info_get(priv_tx, seq, &info); + ret = tx_sync_info_get(priv_tx, seq, datalen, &info); if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { stats->tls_skip_no_sync_data++; @@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, goto err_out; } - if (unlikely(info.sync_len < 0)) { - if (likely(datalen <= -info.sync_len)) - return MLX5E_KTLS_SYNC_DONE; - - stats->tls_drop_bypass_req++; - goto err_out; - } - stats->tls_ooo++; tx_post_resync_params(sq, priv_tx, info.rcd_sn); @@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, if (unlikely(contig_wqebbs_room < num_wqebbs)) mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); - tx_post_resync_params(sq, priv_tx, info.rcd_sn); - for (; i < info.nr_frags; i++) { unsigned int orig_fsz, frag_offset = 0, n = 0; skb_frag_t *f = &info.frags[i]; @@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, enum mlx5e_ktls_sync_retval ret = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); - if (likely(ret == MLX5E_KTLS_SYNC_DONE)) + switch (ret) { + case MLX5E_KTLS_SYNC_DONE: *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); - else if (ret == MLX5E_KTLS_SYNC_FAIL) + break; + case MLX5E_KTLS_SYNC_SKIP_NO_DATA: + if (likely(!skb->decrypted)) + goto out; + WARN_ON_ONCE(1); + /* fall-through */ + default: /* MLX5E_KTLS_SYNC_FAIL */ goto err_out; - else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */ - goto out; + } } priv_tx->expected_seq = seq + datalen; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 15b7f0f1427c..73d3dc07331f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -904,22 +904,6 @@ del_rules: return err; } -#define MLX5E_TTC_NUM_GROUPS 3 -#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) -#define MLX5E_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ - MLX5E_TTC_GROUP2_SIZE +\ - MLX5E_TTC_GROUP3_SIZE) - -#define MLX5E_INNER_TTC_NUM_GROUPS 3 -#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ - MLX5E_INNER_TTC_GROUP2_SIZE +\ - MLX5E_INNER_TTC_GROUP3_SIZE) - static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, bool use_ipv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9b32a9c0f497..7e32b9e3667c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; - ft_attr->max_fte = MLX5E_NUM_TT; + ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; ft_attr->prio = MLX5E_TC_PRIO; } @@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info return kmemdup(tun_info, tun_size, GFP_KERNEL); } +static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < out_index; i++) { + if (flow->encaps[i].e != e) + continue; + NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action"); + netdev_err(priv->netdev, "can't duplicate encap action\n"); + return true; + } + + return false; +} + static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct net_device *mirred_dev, @@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, /* must verify if encap is valid or not */ if (e) { + /* Check that entry was not already attached to this flow */ + if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { + err = -EOPNOTSUPP; + goto out_err; + } + mutex_unlock(&esw->offloads.encap_tbl_lock); wait_for_completion(&e->res_ready); @@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, same_hw_devs(priv, netdev_priv(out_dev)); } +static bool is_duplicated_output_device(struct net_device *dev, + struct net_device *out_dev, + int *ifindexes, int if_count, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < if_count; i++) { + if (ifindexes[i] == out_dev->ifindex) { + NL_SET_ERR_MSG_MOD(extack, + "can't duplicate output to same device"); + netdev_err(dev, "can't duplicate output to same device: %s\n", + out_dev->name); + return true; + } + } + + return false; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow *flow, @@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; const struct ip_tunnel_info *info = NULL; + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; bool ft_flow = mlx5e_is_ft_flow(flow); const struct flow_action_entry *act; + int err, i, if_count = 0; bool encap = false; u32 action = 0; - int err, i; if (!flow_action_has_entries(flow_action)) return -EINVAL; @@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_upper; + if (is_duplicated_output_device(priv->netdev, + out_dev, + ifindexes, + if_count, + extack)) + return -EOPNOTSUPP; + + ifindexes[if_count] = out_dev->ifindex; + if_count++; + rcu_read_lock(); uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); @@ -3980,6 +4036,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, u32 rate_mbps; int err; + vport_num = rpriv->rep->vport; + if (vport_num >= MLX5_VPORT_ECPF) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress rate limit is supported only for Eswitch ports connected to VFs"); + return -EOPNOTSUPP; + } + esw = priv->mdev->priv.eswitch; /* rate is given in bytes/sec. * First convert to bits/sec and then round to the nearest mbit/secs. @@ -3988,8 +4051,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, * 1 mbit/sec. */ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; - vport_num = rpriv->rep->vport; - err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 2c965ad0d744..3df3604e8929 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1928,8 +1928,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) struct mlx5_vport *vport; int i; - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { memset(&vport->info, 0, sizeof(vport->info)); + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + } } /* Public E-Switch API */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 243a5440867e..3e6412783078 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -866,7 +866,7 @@ out: */ #define ESW_SIZE (16 * 1024 * 1024) const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, - 64 * 1024, 4 * 1024 }; + 64 * 1024, 128 }; static int get_sz_from_pool(struct mlx5_eswitch *esw) @@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, return -EINVAL; } - mlx5_eswitch_disable(esw, false); + mlx5_eswitch_disable(esw, true); mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); if (err) { @@ -2220,7 +2220,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type int esw_offloads_enable(struct mlx5_eswitch *esw) { - int err; + struct mlx5_vport *vport; + int err, i; if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) @@ -2237,6 +2238,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; + /* Representor will control the vport link state */ + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; + err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); if (err) goto err_vports; @@ -2266,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err, err1; - mlx5_eswitch_disable(esw, false); + mlx5_eswitch_disable(esw, true); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9a48c4310887..8c5df6c7d7b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node) } } -static void del_sw_fte_rcu(struct rcu_head *head) -{ - struct fs_fte *fte = container_of(head, struct fs_fte, rcu); - struct mlx5_flow_steering *steering = get_steering(&fte->node); - - kmem_cache_free(steering->ftes_cache, fte); -} - static void del_sw_fte(struct fs_node *node) { + struct mlx5_flow_steering *steering = get_steering(node); struct mlx5_flow_group *fg; struct fs_fte *fte; int err; @@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node) rhash_fte); WARN_ON(err); ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); - - call_rcu(&fte->rcu, del_sw_fte_rcu); + kmem_cache_free(steering->ftes_cache, fte); } static void del_hw_flow_group(struct fs_node *node) @@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head) } static struct fs_fte * -lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value) +lookup_fte_locked(struct mlx5_flow_group *g, + const u32 *match_value, + bool take_write) { struct fs_fte *fte_tmp; - nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); - - fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte); - if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { - fte_tmp = NULL; - goto out; - } - - if (!fte_tmp->node.active) { - tree_put_node(&fte_tmp->node, false); - fte_tmp = NULL; - goto out; - } - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); - -out: - up_write_ref_node(&g->node, false); - return fte_tmp; -} - -static struct fs_fte * -lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) -{ - struct fs_fte *fte_tmp; - - if (!tree_get_node(&g->node)) - return NULL; - - rcu_read_lock(); - fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte); + if (take_write) + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); + else + nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); + fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, + rhash_fte); if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { - rcu_read_unlock(); fte_tmp = NULL; goto out; } - rcu_read_unlock(); - if (!fte_tmp->node.active) { tree_put_node(&fte_tmp->node, false); fte_tmp = NULL; @@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) } nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); - out: - tree_put_node(&g->node, false); - return fte_tmp; -} - -static struct fs_fte * -lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write) -{ - if (write) - return lookup_fte_for_write_locked(g, match_value); + if (take_write) + up_write_ref_node(&g->node, false); else - return lookup_fte_for_read_locked(g, match_value); + up_read_ref_node(&g->node); + return fte_tmp; } static struct mlx5_flow_handle * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index e8cd997f413e..c2621b911563 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -203,7 +203,6 @@ struct fs_fte { enum fs_fte_status status; struct mlx5_fc *counter; struct rhash_head hash; - struct rcu_head rcu; int modify_mask; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 173e2c12e1c7..f554cfddcf4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) if (err) goto err_load; + if (boot) { + err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1210,6 +1216,9 @@ out: return err; err_reg_dev: + if (boot) + mlx5_devlink_unregister(priv_to_devlink(dev)); +err_devlink_reg: mlx5_unload(dev); err_load: if (boot) @@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) request_module_nowait(MLX5_IB_MOD); - err = mlx5_devlink_register(devlink, &pdev->dev); - if (err) - goto clean_load; - err = mlx5_crdump_enable(dev); if (err) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); @@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_save_state(pdev); return 0; -clean_load: - mlx5_unload_one(dev, true); - err_load_one: mlx5_pci_close(dev); pci_init_err: @@ -1561,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 32e94d2ee5e4..e4cff7abb348 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, /* We need to copy the refcount since this ste * may have been traversed several times */ - refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount)); + new_ste->refcount = cur_ste->refcount; /* Link old STEs rule_mem list to the new ste */ mlx5dr_rule_update_rule_member(cur_ste, new_ste); @@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, if (!rule_mem) return -ENOMEM; + INIT_LIST_HEAD(&rule_mem->list); + INIT_LIST_HEAD(&rule_mem->use_ste_list); + rule_mem->ste = ste; list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 51803eef13dd..c7f10d4f8f8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2019 Mellanox Technologies. */ +#include <linux/smp.h> #include "dr_types.h" #define QUEUE_SIZE 128 @@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, if (!in) goto err_cqwq; - vector = smp_processor_id() % mlx5_comp_vectors_count(mdev); + vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev); err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn); if (err) { kvfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index a5a266983dd3..c6c7d1defbd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) if (dst->next_htbl) dst->next_htbl->pointing_ste = dst; - refcount_set(&dst->refcount, refcount_read(&src->refcount)); + dst->refcount = src->refcount; INIT_LIST_HEAD(&dst->rule_list); list_splice_tail_init(&src->rule_list, &dst->rule_list); @@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste) bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) { - return !refcount_read(&ste->refcount); + return !ste->refcount; } /* Init one ste as a pattern for ste data array */ @@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, htbl->ste_arr = chunk->ste_arr; htbl->hw_ste_arr = chunk->hw_ste_arr; htbl->miss_list = chunk->miss_list; - refcount_set(&htbl->refcount, 0); + htbl->refcount = 0; for (i = 0; i < chunk->num_of_entries; i++) { struct mlx5dr_ste *ste = &htbl->ste_arr[i]; ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; ste->htbl = htbl; - refcount_set(&ste->refcount, 0); + ste->refcount = 0; INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&htbl->miss_list[i]); INIT_LIST_HEAD(&ste->rule_list); @@ -713,7 +713,7 @@ out_free_htbl: int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) { - if (refcount_read(&htbl->refcount)) + if (htbl->refcount) return -EBUSY; mlx5dr_icm_free_chunk(htbl->chunk); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 290fe61c33d0..3fdf4a5eb031 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx; struct mlx5dr_ste { u8 *hw_ste; /* refcount: indicates the num of rules that using this ste */ - refcount_t refcount; + u32 refcount; /* attached to the miss_list head at each htbl entry */ struct list_head miss_list_node; @@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl { struct mlx5dr_ste_htbl { u8 lu_type; u16 byte_mask; - refcount_t refcount; + u32 refcount; struct mlx5dr_icm_chunk *chunk; struct mlx5dr_ste *ste_arr; u8 *hw_ste_arr; @@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl); static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl) { - if (refcount_dec_and_test(&htbl->refcount)) + htbl->refcount--; + if (!htbl->refcount) mlx5dr_ste_htbl_free(htbl); } static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) { - refcount_inc(&htbl->refcount); + htbl->refcount++; } /* STE utils */ @@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste, struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher) { - if (refcount_dec_and_test(&ste->refcount)) + ste->refcount--; + if (!ste->refcount) mlx5dr_ste_free(ste, matcher, nic_matcher); } /* initial as 0, increased only when ste appears in a new rule */ static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste) { - refcount_inc(&ste->refcount); + ste->refcount++; } void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 3d587d0bdbbe..1e32e2443f73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { enum mlx5_flow_destination_type type = dst->dest_attr.type; - u32 id; if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { err = -ENOSPC; goto free_actions; } - switch (type) { - case MLX5_FLOW_DESTINATION_TYPE_COUNTER: - id = dst->dest_attr.counter_id; + if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; - tmp_action = - mlx5dr_action_create_flow_counter(id); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - break; + switch (type) { case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: tmp_action = create_ft_action(dev, dst); if (!tmp_action) { @@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + list_for_each_entry(dst, &fte->node.children, node.list) { + u32 id; + + if (dst->dest_attr.type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -ENOSPC; + goto free_actions; + } + + id = dst->dest_attr.counter_id; + tmp_action = + mlx5dr_action_create_flow_counter(id); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + } + params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f7fd5e8fbf96..8ed15199eb4f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -860,23 +860,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } - if (eth_skb_pad(skb)) { this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); return NETDEV_TX_OK; @@ -1215,6 +1209,9 @@ static void update_stats_cache(struct work_struct *work) periodic_hw_stats.update_dw.work); if (!netif_carrier_ok(mlxsw_sp_port->dev)) + /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as + * necessary when port goes down. + */ goto out; mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, @@ -4324,6 +4321,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; } +static void +mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int i; + + for (i = 0; i < TC_MAX_QUEUE; i++) + mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; +} + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@ -4345,6 +4351,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, } else { netdev_info(mlxsw_sp_port->dev, "link down\n"); netif_carrier_off(mlxsw_sp_port->dev); + mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); } } @@ -5132,6 +5139,27 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); } +static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; + mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; + mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; + mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); +} + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -5634,7 +5662,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = { static struct mlxsw_driver mlxsw_sp3_driver = { .kind = mlxsw_sp3_driver_name, .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp2_init, + .init = mlxsw_sp3_init, .fini = mlxsw_sp_fini, .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 150b3a144b83..3d3cca596116 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -8,6 +8,7 @@ #include <linux/string.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> +#include <linux/mutex.h> #include <net/net_namespace.h> #include <net/tc_act/tc_vlan.h> @@ -25,6 +26,7 @@ struct mlxsw_sp_acl { struct mlxsw_sp_fid *dummy_fid; struct rhashtable ruleset_ht; struct list_head rules; + struct mutex rules_lock; /* Protects rules list */ struct { struct delayed_work dw; unsigned long interval; /* ms */ @@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, goto err_ruleset_block_bind; } + mutex_lock(&mlxsw_sp->acl->rules_lock); list_add_tail(&rule->list, &mlxsw_sp->acl->rules); + mutex_unlock(&mlxsw_sp->acl->rules_lock); block->rule_count++; block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker; return 0; @@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker; ruleset->ht_key.block->rule_count--; + mutex_lock(&mlxsw_sp->acl->rules_lock); list_del(&rule->list); + mutex_unlock(&mlxsw_sp->acl->rules_lock); if (!ruleset->ht_key.chain_index && mlxsw_sp_acl_ruleset_is_singular(ruleset)) mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, @@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) struct mlxsw_sp_acl_rule *rule; int err; - /* Protect internal structures from changes */ - rtnl_lock(); + mutex_lock(&acl->rules_lock); list_for_each_entry(rule, &acl->rules, list) { err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, rule); if (err) goto err_rule_update; } - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return 0; err_rule_update: - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return err; } @@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->dummy_fid = fid; INIT_LIST_HEAD(&acl->rules); + mutex_init(&acl->rules_lock); err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam); if (err) goto err_acl_ops_init; @@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) return 0; err_acl_ops_init: + mutex_destroy(&acl->rules_lock); mlxsw_sp_fid_put(fid); err_fid_get: rhashtable_destroy(&acl->ruleset_ht); @@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam); + mutex_destroy(&acl->rules_lock); WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 68cc6737d45c..0124bfe1963b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -195,6 +195,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } +static u64 +mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->backlog[tclass_num] + + xstats->backlog[tclass_num + 8]; +} + +static u64 +mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->tail_drop[tclass_num] + + xstats->tail_drop[tclass_num + 8]; +} + static void mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, u8 prio_bitmap, u64 *tx_packets, @@ -269,7 +283,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; - red_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; stats_base->drops = red_base->prob_drop + red_base->pdrop; @@ -370,7 +384,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; marks = xstats->ecn - xstats_base->prob_mark; - pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - + xstats_base->pdrop; res->pdrop += pdrops; res->prob_drop += early_drops; @@ -403,9 +418,10 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; - drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + drops = xstats->wred_drop[tclass_num] + + mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - stats_base->drops; - backlog = xstats->backlog[tclass_num]; + backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num); _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); stats_ptr->qstats->overlimits += overlimits; @@ -576,9 +592,9 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, tx_packets = stats->tx_packets - stats_base->tx_packets; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - drops += xstats->tail_drop[i]; + drops += mlxsw_sp_xstats_tail_drop(xstats, i); drops += xstats->wred_drop[i]; - backlog += xstats->backlog[i]; + backlog += mlxsw_sp_xstats_backlog(xstats, i); } drops = drops - stats_base->drops; @@ -614,7 +630,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base->drops = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i); stats_base->drops += xstats->wred_drop[i]; } @@ -651,6 +667,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) return 0; + if (!p->child_handle) { + /* This is an invisible FIFO replacing the original Qdisc. + * Ignore it--the original Qdisc's destroy will follow. + */ + return 0; + } + /* See if the grafted qdisc is already offloaded on any tclass. If so, * unoffload it. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index de6cb22f68b1..f0e98ec8f1ee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its * bytes as being sent. diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index b339125b2f09..05e760444a92 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); + spin_lock_init(&lp->lock); + for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (skb == NULL) { @@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev) return 0; } +/* Wait for the SONIC to become idle. */ +static void sonic_quiesce(struct net_device *dev, u16 mask) +{ + struct sonic_local * __maybe_unused lp = netdev_priv(dev); + int i; + u16 bits; + + for (i = 0; i < 1000; ++i) { + bits = SONIC_READ(SONIC_CMD) & mask; + if (!bits) + return; + if (irqs_disabled() || in_interrupt()) + udelay(20); + else + usleep_range(100, 200); + } + WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits); +} /* * Close the SONIC device @@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev) /* * stop the SONIC, disable interrupts */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev) * put the Sonic into software-reset mode and * disable all interrupts before releasing DMA buffers */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev) * wake the tx queue * Concurrently with all of this, the SONIC is potentially writing to * the status flags of the TDs. - * Until some mutual exclusion is added, this code will not work with SMP. However, - * MIPS Jazz machines and m68k Macs were all uni-processor machines. */ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) @@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); dma_addr_t laddr; int length; - int entry = lp->next_tx; + int entry; + unsigned long flags; netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); @@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + spin_lock_irqsave(&lp->lock, flags); + + entry = lp->next_tx; + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ @@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); - /* - * Must set tx_skb[entry] only after clearing status, and - * before clearing EOL and before stopping queue - */ wmb(); lp->tx_len[entry] = length; lp->tx_laddr[entry] = laddr; @@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; } @@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) struct net_device *dev = dev_id; struct sonic_local *lp = netdev_priv(dev); int status; + unsigned long flags; + + /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt() + * with sonic_send_packet() so that the two functions can share state. + * Secondly, it makes sonic_interrupt() re-entrant, as that is required + * by macsonic which must use two IRQs with different priority levels. + */ + spin_lock_irqsave(&lp->lock, flags); + + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + if (!status) { + spin_unlock_irqrestore(&lp->lock, flags); - if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) return IRQ_NONE; + } do { + SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */ + if (status & SONIC_INT_PKTRX) { netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); sonic_rx(dev); /* got packet(s) */ - SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ } if (status & SONIC_INT_TXDN) { @@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) int td_status; int freed_some = 0; - /* At this point, cur_tx is the index of a TD that is one of: - * unallocated/freed (status set & tx_skb[entry] clear) - * allocated and sent (status set & tx_skb[entry] set ) - * allocated and not yet sent (status clear & tx_skb[entry] set ) - * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) + /* The state of a Transmit Descriptor may be inferred + * from { tx_skb[entry], td_status } as follows. + * { clear, clear } => the TD has never been used + * { set, clear } => the TD was handed to SONIC + * { set, set } => the TD was handed back + * { clear, set } => the TD is available for re-use */ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); @@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) break; - if (td_status & 0x0001) { + if (td_status & SONIC_TCR_PTX) { lp->stats.tx_packets++; lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); } else { - lp->stats.tx_errors++; - if (td_status & 0x0642) + if (td_status & (SONIC_TCR_EXD | + SONIC_TCR_EXC | SONIC_TCR_BCM)) lp->stats.tx_aborted_errors++; - if (td_status & 0x0180) + if (td_status & + (SONIC_TCR_NCRS | SONIC_TCR_CRLS)) lp->stats.tx_carrier_errors++; - if (td_status & 0x0020) + if (td_status & SONIC_TCR_OWC) lp->stats.tx_window_errors++; - if (td_status & 0x0004) + if (td_status & SONIC_TCR_FU) lp->stats.tx_fifo_errors++; } @@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (freed_some || lp->tx_skb[entry] == NULL) netif_wake_queue(dev); /* The ring is no longer full */ lp->cur_tx = entry; - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ } /* @@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (status & SONIC_INT_RFO) { netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", __func__); - lp->stats.rx_fifo_errors++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ } if (status & SONIC_INT_RDE) { netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ } if (status & SONIC_INT_RBAE) { netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ } /* counter overruns; all counters are 16bit wide */ - if (status & SONIC_INT_FAE) { + if (status & SONIC_INT_FAE) lp->stats.rx_frame_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ - } - if (status & SONIC_INT_CRC) { + if (status & SONIC_INT_CRC) lp->stats.rx_crc_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ - } - if (status & SONIC_INT_MP) { + if (status & SONIC_INT_MP) lp->stats.rx_missed_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ - } /* transmit error */ if (status & SONIC_INT_TXER) { - if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) - netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ + u16 tcr = SONIC_READ(SONIC_TCR); + + netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n", + __func__, tcr); + + if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC | + SONIC_TCR_FU | SONIC_TCR_BCM)) { + /* Aborted transmission. Try again. */ + netif_stop_queue(dev); + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + } } /* bus retry */ @@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) /* ... to help debug DMA problems causing endless interrupts. */ /* Bounce the eth interface to turn on the interrupt again. */ SONIC_WRITE(SONIC_IMR, 0); - SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ } - /* load CAM done */ - if (status & SONIC_INT_LCD) - SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ - } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + } while (status); + + spin_unlock_irqrestore(&lp->lock, flags); + return IRQ_HANDLED; } +/* Return the array index corresponding to a given Receive Buffer pointer. */ +static int index_from_addr(struct sonic_local *lp, dma_addr_t addr, + unsigned int last) +{ + unsigned int i = last; + + do { + i = (i + 1) & SONIC_RRS_MASK; + if (addr == lp->rx_laddr[i]) + return i; + } while (i != last); + + return -ENOENT; +} + +/* Allocate and map a new skb to be used as a receive buffer. */ +static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, + struct sk_buff **new_skb, dma_addr_t *new_addr) +{ + *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); + if (!*new_skb) + return false; + + if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) + skb_reserve(*new_skb, 2); + + *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), + SONIC_RBSIZE, DMA_FROM_DEVICE); + if (!*new_addr) { + dev_kfree_skb(*new_skb); + *new_skb = NULL; + return false; + } + + return true; +} + +/* Place a new receive resource in the Receive Resource Area and update RWP. */ +static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp, + dma_addr_t old_addr, dma_addr_t new_addr) +{ + unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP)); + unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP)); + u32 buf; + + /* The resources in the range [RRP, RWP) belong to the SONIC. This loop + * scans the other resources in the RRA, those in the range [RWP, RRP). + */ + do { + buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) | + sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L); + + if (buf == old_addr) + break; + + entry = (entry + 1) & SONIC_RRS_MASK; + } while (entry != end); + + WARN_ONCE(buf != old_addr, "failed to find resource!\n"); + + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16); + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff); + + entry = (entry + 1) & SONIC_RRS_MASK; + + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry)); +} + /* * We have a good packet(s), pass it/them up the network stack. */ static void sonic_rx(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); - int status; int entry = lp->cur_rx; + int prev_entry = lp->eol_rx; + bool rbe = false; while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { - struct sk_buff *used_skb; - struct sk_buff *new_skb; - dma_addr_t new_laddr; - u16 bufadr_l; - u16 bufadr_h; - int pkt_len; - - status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); - if (status & SONIC_RCR_PRX) { - /* Malloc up new buffer. */ - new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); - if (new_skb == NULL) { - lp->stats.rx_dropped++; + u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); + + /* If the RD has LPKT set, the chip has finished with the RB */ + if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) { + struct sk_buff *new_skb; + dma_addr_t new_laddr; + u32 addr = (sonic_rda_get(dev, entry, + SONIC_RD_PKTPTR_H) << 16) | + sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L); + int i = index_from_addr(lp, addr, entry); + + if (i < 0) { + WARN_ONCE(1, "failed to find buffer!\n"); break; } - /* provide 16 byte IP header alignment unless DMA requires otherwise */ - if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) - skb_reserve(new_skb, 2); - - new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), - SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!new_laddr) { - dev_kfree_skb(new_skb); - printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); + + if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) { + struct sk_buff *used_skb = lp->rx_skb[i]; + int pkt_len; + + /* Pass the used buffer up the stack */ + dma_unmap_single(lp->device, addr, SONIC_RBSIZE, + DMA_FROM_DEVICE); + + pkt_len = sonic_rda_get(dev, entry, + SONIC_RD_PKTLEN); + skb_trim(used_skb, pkt_len); + used_skb->protocol = eth_type_trans(used_skb, + dev); + netif_rx(used_skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pkt_len; + + lp->rx_skb[i] = new_skb; + lp->rx_laddr[i] = new_laddr; + } else { + /* Failed to obtain a new buffer so re-use it */ + new_laddr = addr; lp->stats.rx_dropped++; - break; } - - /* now we have a new skb to replace it, pass the used one up the stack */ - dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); - used_skb = lp->rx_skb[entry]; - pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); - skb_trim(used_skb, pkt_len); - used_skb->protocol = eth_type_trans(used_skb, dev); - netif_rx(used_skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; - - /* and insert the new skb */ - lp->rx_laddr[entry] = new_laddr; - lp->rx_skb[entry] = new_skb; - - bufadr_l = (unsigned long)new_laddr & 0xffff; - bufadr_h = (unsigned long)new_laddr >> 16; - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); - } else { - /* This should only happen, if we enable accepting broken packets. */ - lp->stats.rx_errors++; - if (status & SONIC_RCR_FAER) - lp->stats.rx_frame_errors++; - if (status & SONIC_RCR_CRCR) - lp->stats.rx_crc_errors++; - } - if (status & SONIC_RCR_LPKT) { - /* - * this was the last packet out of the current receive buffer - * give the buffer back to the SONIC + /* If RBE is already asserted when RWP advances then + * it's safe to clear RBE after processing this packet. */ - lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); - if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); - if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { - netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ - } - } else - printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", - dev->name); + rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; + sonic_update_rra(dev, lp, addr, new_laddr); + } /* * give back the descriptor */ - sonic_rda_put(dev, entry, SONIC_RD_LINK, - sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); + sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0); sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); - sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, - sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); - lp->eol_rx = entry; - lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; + + prev_entry = entry; + entry = (entry + 1) & SONIC_RDS_MASK; + } + + lp->cur_rx = entry; + + if (prev_entry != lp->eol_rx) { + /* Advance the EOL flag to put descriptors back into service */ + sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL | + sonic_rda_get(dev, prev_entry, SONIC_RD_LINK)); + sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL & + sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK)); + lp->eol_rx = prev_entry; } + + if (rbe) + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* * If any worth-while packets have been received, netif_rx() * has done a mark_bh(NET_BH) for us and will work on them @@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { + unsigned long flags; + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ @@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev) i++; } SONIC_WRITE(SONIC_CDC, 16); - /* issue Load CAM command */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); + + /* LCAM and TXP commands can't be used simultaneously */ + spin_lock_irqsave(&lp->lock, flags); + sonic_quiesce(dev, SONIC_CR_TXP); SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); + sonic_quiesce(dev, SONIC_CR_LCAM); + spin_unlock_irqrestore(&lp->lock, flags); } } @@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev) */ static int sonic_init(struct net_device *dev) { - unsigned int cmd; struct sonic_local *lp = netdev_priv(dev); int i; @@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev) SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + /* While in reset mode, clear CAM Enable register */ + SONIC_WRITE(SONIC_CE, 0); + /* * clear software reset flag, disable receiver, clear and * enable interrupts, then completely initialize the SONIC */ SONIC_WRITE(SONIC_CMD, 0); - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP); + sonic_quiesce(dev, SONIC_CR_ALL); /* * initialize the receive resource area @@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev) } /* initialize all RRA registers */ - lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - - SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_REA, lp->rra_end); - SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); + SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS)); + SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1)); SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); @@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) - break; - } - - netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), i); + sonic_quiesce(dev, SONIC_CR_RRRA); /* * Initialize the receive descriptors so that they @@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev) * load the CAM */ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); - - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) - break; - } - netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); + sonic_quiesce(dev, SONIC_CR_LCAM); /* * enable receiver, disable loopback * and enable all interrupts */ - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); - - cmd = SONIC_READ(SONIC_CMD); - if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) - printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN); netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, SONIC_READ(SONIC_CMD)); diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h index 2b27f7049acb..1df6d2f06cc4 100644 --- a/drivers/net/ethernet/natsemi/sonic.h +++ b/drivers/net/ethernet/natsemi/sonic.h @@ -110,6 +110,9 @@ #define SONIC_CR_TXP 0x0002 #define SONIC_CR_HTX 0x0001 +#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \ + SONIC_CR_RXEN | SONIC_CR_TXP) + /* * SONIC data configuration bits */ @@ -175,6 +178,7 @@ #define SONIC_TCR_NCRS 0x0100 #define SONIC_TCR_CRLS 0x0080 #define SONIC_TCR_EXC 0x0040 +#define SONIC_TCR_OWC 0x0020 #define SONIC_TCR_PMB 0x0008 #define SONIC_TCR_FU 0x0004 #define SONIC_TCR_BCM 0x0002 @@ -274,8 +278,9 @@ #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ #define SONIC_NUM_TDS 16 /* number of transmit descriptors */ -#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) -#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) +#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1) +#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1) +#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1) #define SONIC_RBSIZE 1520 /* size of one resource buffer */ @@ -312,8 +317,6 @@ struct sonic_local { u32 rda_laddr; /* logical DMA address of RDA */ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */ - unsigned int rra_end; - unsigned int cur_rwp; unsigned int cur_rx; unsigned int cur_tx; /* first unacked transmit packet */ unsigned int eol_rx; @@ -322,6 +325,7 @@ struct sonic_local { int msg_enable; struct device *device; /* generic device */ struct net_device_stats stats; + spinlock_t lock; }; #define TX_TIMEOUT (3 * HZ) @@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev); as far as we can tell. */ /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put() is a much better name. */ -static inline void sonic_buf_put(void* base, int bitmode, +static inline void sonic_buf_put(u16 *base, int bitmode, int offset, __u16 val) { if (bitmode) #ifdef __BIG_ENDIAN - ((__u16 *) base + (offset*2))[1] = val; + __raw_writew(val, base + (offset * 2) + 1); #else - ((__u16 *) base + (offset*2))[0] = val; + __raw_writew(val, base + (offset * 2) + 0); #endif else - ((__u16 *) base)[offset] = val; + __raw_writew(val, base + (offset * 1) + 0); } -static inline __u16 sonic_buf_get(void* base, int bitmode, +static inline __u16 sonic_buf_get(u16 *base, int bitmode, int offset) { if (bitmode) #ifdef __BIG_ENDIAN - return ((volatile __u16 *) base + (offset*2))[1]; + return __raw_readw(base + (offset * 2) + 1); #else - return ((volatile __u16 *) base + (offset*2))[0]; + return __raw_readw(base + (offset * 2) + 0); #endif else - return ((volatile __u16 *) base)[offset]; + return __raw_readw(base + (offset * 1) + 0); } /* Inlines that you should actually use for reading/writing DMA buffers */ @@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry, (entry * SIZEOF_SONIC_RR) + offset); } +static inline u16 sonic_rr_addr(struct net_device *dev, int entry) +{ + struct sonic_local *lp = netdev_priv(dev); + + return lp->rra_laddr + + entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); +} + +static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr) +{ + struct sonic_local *lp = netdev_priv(dev); + + return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR * + SONIC_BUS_SCALE(lp->dma_bitmode)); +} + static const char version[] = "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index a496390b8632..07f9067affc6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, break; } entry += p_hdr->size; + cond_resched(); } p_dev->ahw->reset.seq_index = index; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index afa10a163da1..f34ae8c75bc5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, addr += 16; reg_read -= 16; ret += 16; + cond_resched(); } out: mutex_unlock(&adapter->ahw->mem_lock); @@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; + cond_resched(); } fw_dump->clr = 1; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e19b49c4013e..3591285250e1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) if (cd->tsu) { add_tsu_reg(ARSTR); add_tsu_reg(TSU_CTRST); - add_tsu_reg(TSU_FWEN0); - add_tsu_reg(TSU_FWEN1); - add_tsu_reg(TSU_FCM); - add_tsu_reg(TSU_BSYSL0); - add_tsu_reg(TSU_BSYSL1); - add_tsu_reg(TSU_PRISL0); - add_tsu_reg(TSU_PRISL1); - add_tsu_reg(TSU_FWSL0); - add_tsu_reg(TSU_FWSL1); + if (cd->dual_port) { + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + } add_tsu_reg(TSU_FWSLC); - add_tsu_reg(TSU_QTAGM0); - add_tsu_reg(TSU_QTAGM1); - add_tsu_reg(TSU_FWSR); - add_tsu_reg(TSU_FWINMK); - add_tsu_reg(TSU_ADQT0); - add_tsu_reg(TSU_ADQT1); - add_tsu_reg(TSU_VTAG0); - add_tsu_reg(TSU_VTAG1); + if (cd->dual_port) { + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + } add_tsu_reg(TSU_ADSBSY); add_tsu_reg(TSU_TEN); add_tsu_reg(TSU_POST1); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index c56fcbb37066..52ed111d98f4 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2296,7 +2296,7 @@ __setup("sxgbeeth=", sxgbe_cmdline_opt); -MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver"); +MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver"); MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value"); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7e927ad67fa..b7032422393f 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev, phy_ethtool_get_wol(ndev->phydev, wol); } -static int ave_ethtool_set_wol(struct net_device *ndev, - struct ethtool_wolinfo *wol) +static int __ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) { - int ret; - if (!ndev->phydev || (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) return -EOPNOTSUPP; - ret = phy_ethtool_set_wol(ndev->phydev, wol); + return phy_ethtool_set_wol(ndev->phydev, wol); +} + +static int ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + int ret; + + ret = __ave_ethtool_set_wol(ndev, wol); if (!ret) device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); @@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev) /* set wol initial state disabled */ wol.wolopts = 0; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (!phy_interface_is_rgmii(phydev)) phy_set_max_speed(phydev, SPEED_100); @@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev) ave_ethtool_get_wol(ndev, &wol); wol.wolopts = priv->wolopts; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (ndev->phydev) { ret = phy_resume(ndev->phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 1c8d84ed8410..01b484cb177e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) /* default */ break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; break; case PHY_INTERFACE_MODE_RMII: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 26353ef616b8..7d40760e9ba8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) * rate, which then uses the auto-reparenting feature of the * clock driver, and enabling/disabling the clock. */ - if (gmac->interface == PHY_INTERFACE_MODE_RGMII) { + if (phy_interface_mode_is_rgmii(gmac->interface)) { clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); clk_prepare_enable(gmac->tx_clk); gmac->clk_enabled = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6f51a265459d..80d59b775907 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -106,6 +106,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_DEBUG_FS +static const struct net_device_ops stmmac_netdev_ops; static void stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(struct net_device *dev); #endif @@ -4256,6 +4257,34 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); +/* Use network device events to rename debugfs file entries. + */ +static int stmmac_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct stmmac_priv *priv = netdev_priv(dev); + + if (dev->netdev_ops != &stmmac_netdev_ops) + goto done; + + switch (event) { + case NETDEV_CHANGENAME: + if (priv->dbgfs_dir) + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, + priv->dbgfs_dir, + stmmac_fs_dir, + dev->name); + break; + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block stmmac_notifier = { + .notifier_call = stmmac_device_event, +}; + static void stmmac_init_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -4270,12 +4299,15 @@ static void stmmac_init_fs(struct net_device *dev) /* Entry to report the DMA HW features */ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, &stmmac_dma_cap_fops); + + register_netdevice_notifier(&stmmac_notifier); } static void stmmac_exit_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + unregister_netdevice_notifier(&stmmac_notifier); debugfs_remove_recursive(priv->dbgfs_dir); } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cc8d7e7bf9ac..d10ac54bf385 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -320,7 +320,7 @@ out: static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, struct device_node *np, struct device *dev) { - bool mdio = false; + bool mdio = !of_phy_is_fixed_link(np); static const struct of_device_id need_mdio_ids[] = { { .compatible = "snps,dwc-qos-ethernet-4.10" }, {}, @@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) *mac = NULL; } - rc = of_get_phy_mode(np, &plat->phy_interface); - if (rc) - return ERR_PTR(rc); + plat->phy_interface = device_get_phy_mode(&pdev->dev); + if (plat->phy_interface < 0) + return ERR_PTR(plat->phy_interface); plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 13227909287c..450d7dac3ea6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -80,7 +80,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, if (attr->max_size && (attr->max_size > size)) size = attr->max_size; - skb = netdev_alloc_skb_ip_align(priv->dev, size); + skb = netdev_alloc_skb(priv->dev, size); if (!skb) return NULL; @@ -244,6 +244,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, struct net_device *orig_ndev) { struct stmmac_test_priv *tpriv = pt->af_packet_priv; + unsigned char *src = tpriv->packet->src; + unsigned char *dst = tpriv->packet->dst; struct stmmachdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; @@ -260,15 +262,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, goto out; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (tpriv->packet->dst) { - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (dst) { + if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) goto out; } if (tpriv->packet->sarc) { - if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) + if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest)) goto out; - } else if (tpriv->packet->src) { - if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) + } else if (src) { + if (!ether_addr_equal_unaligned(ehdr->h_source, src)) goto out; } @@ -714,7 +716,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb, struct ethhdr *ehdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) + if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr)) goto out; if (ehdr->h_proto != htons(ETH_P_PAUSE)) goto out; @@ -851,12 +853,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb, if (tpriv->vlan_id) { if (skb->vlan_proto != htons(proto)) goto out; - if (skb->vlan_tci != tpriv->vlan_id) + if (skb->vlan_tci != tpriv->vlan_id) { + /* Means filter did not work. */ + tpriv->ok = false; + complete(&tpriv->comp); goto out; + } } ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst)) goto out; ihdr = ip_hdr(skb); @@ -965,6 +971,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv) { int ret, prev_cap = priv->dma_cap.vlhash; + if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_vlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@ -1057,6 +1066,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv) { int ret, prev_cap = priv->dma_cap.vlhash; + if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_dvlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@ -1323,16 +1335,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1399,7 +1414,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1444,16 +1460,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1525,7 +1544,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1578,7 +1598,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb, struct arphdr *ahdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src)) goto out; ahdr = arp_hdr(skb); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 7d972e0fd2b0..9ffae12a2122 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv, { int ret = 0; + /* When RSS is enabled, the filtering will be bypassed */ + if (priv->rss.enable) + return -EBUSY; + switch (cls->command) { case FLOW_CLS_REPLACE: ret = tc_add_flow(priv, cls); diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index fca471e27f39..9b3ba98726d7 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, return NULL; } - if (sock->sk->sk_protocol != IPPROTO_UDP) { + sk = sock->sk; + if (sk->sk_protocol != IPPROTO_UDP || + sk->sk_type != SOCK_DGRAM || + (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { pr_debug("socket fd=%d not UDP\n", fd); sk = ERR_PTR(-EINVAL); goto out_sock; } - lock_sock(sock->sk); - if (sock->sk->sk_user_data) { + lock_sock(sk); + if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); - goto out_sock; + goto out_rel_sock; } - sk = sock->sk; sock_hold(sk); tuncfg.sk_user_data = gtp; @@ -826,8 +828,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); -out_sock: +out_rel_sock: release_sock(sock->sk); +out_sock: sockfd_put(sock); return sk; } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 857c4bea451c..e66d77dc28c8 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1443,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev, /* Halt and release the rndis device */ rndis_filter_halt_device(net_dev, rndis_dev); - net_dev->extension = NULL; - netvsc_device_remove(dev); } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05631d97eeb4..c5bf61565726 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *dest; if (vlan->mode == MACVLAN_MODE_BRIDGE) { - const struct ethhdr *eth = (void *)skb->data; + const struct ethhdr *eth = skb_eth_hdr(skb); /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 059711edfc61..4b39aba2e9c4 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file, get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE); - id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev)); + id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev)); err = devlink_region_snapshot_create(nsim_dev->dummy_region, dummy_data, id, kfree); if (err) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 5848219005d7..8dc461f7574b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -340,14 +340,14 @@ config DAVICOM_PHY Currently supports dm9161e and dm9131 config DP83822_PHY - tristate "Texas Instruments DP83822 PHY" + tristate "Texas Instruments DP83822/825 PHYs" ---help--- - Supports the DP83822 PHY. + Supports the DP83822 and DP83825I PHYs. config DP83TC811_PHY - tristate "Texas Instruments DP83TC822 PHY" + tristate "Texas Instruments DP83TC811 PHY" ---help--- - Supports the DP83TC822 PHY. + Supports the DP83TC811 PHY. config DP83848_PHY tristate "Texas Instruments DP83848 PHY" diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 9cd9dcee4eb2..01cf71358359 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -97,6 +97,7 @@ #define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03 #define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14) #define DP83867_PHYCR_RESERVED_MASK BIT(11) +#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10) /* RGMIIDCTL bits */ #define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf @@ -599,7 +600,12 @@ static int dp83867_phy_reset(struct phy_device *phydev) usleep_range(10, 20); - return 0; + /* After reset FORCE_LINK_GOOD bit is set. Although the + * default value should be unset. Disable FORCE_LINK_GOOD + * for the phy to work properly. + */ + return phy_modify(phydev, MII_DP83867_PHYCTRL, + DP83867_PHYCR_FORCE_LINK_GOOD, 0); } static struct phy_driver dp83867_driver[] = { diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 1585eebb73fe..ee7a718662c6 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -566,6 +566,9 @@ static int phylink_register_sfp(struct phylink *pl, struct sfp_bus *bus; int ret; + if (!fwnode) + return 0; + bus = sfp_bus_find_fwnode(fwnode); if (IS_ERR(bus)) { ret = PTR_ERR(bus); diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 2a91c192659f..61d7e0d1d77d 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work) */ static void slip_write_wakeup(struct tty_struct *tty) { - struct slip *sl = tty->disc_data; + struct slip *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } static void sl_tx_timeout(struct net_device *dev) @@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* VSV = very important to remove timers */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 683d371e6e82..35e884a8242d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1936,6 +1936,10 @@ drop: if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); + if (frags) { + tfile->napi.skb = NULL; + mutex_unlock(&tfile->napi_mutex); + } return total_len; } } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f940dc6485e5..c2a58f05b9a1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -20,6 +20,7 @@ #include <linux/mdio.h> #include <linux/phy.h> #include <net/ip6_checksum.h> +#include <net/vxlan.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irq.h> @@ -2724,11 +2725,6 @@ static int lan78xx_stop(struct net_device *net) return 0; } -static int lan78xx_linearize(struct sk_buff *skb) -{ - return skb_linearize(skb); -} - static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { @@ -2740,8 +2736,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, return NULL; } - if (lan78xx_linearize(skb) < 0) + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); return NULL; + } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; @@ -3671,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net) tasklet_schedule(&dev->bh); } +static netdev_features_t lan78xx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE) + features &= ~NETIF_F_GSO_MASK; + + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + return features; +} + static const struct net_device_ops lan78xx_netdev_ops = { .ndo_open = lan78xx_open, .ndo_stop = lan78xx_stop, @@ -3684,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = { .ndo_set_features = lan78xx_set_features, .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, + .ndo_features_check = lan78xx_features_check, }; static void lan78xx_stat_monitor(struct timer_list *t) @@ -3753,6 +3765,7 @@ static int lan78xx_probe(struct usb_interface *intf, /* MTU range: 68 - 9000 */ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; + netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4196c0e32740..9485c8d1de8a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c5ebf35d2488..3f425f974d03 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -31,7 +31,7 @@ #define NETNEXT_VERSION "11" /* Information for net */ -#define NET_VERSION "10" +#define NET_VERSION "11" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -68,6 +68,7 @@ #define PLA_LED_FEATURE 0xdd92 #define PLA_PHYAR 0xde00 #define PLA_BOOT_CTRL 0xe004 +#define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 #define PLA_EEEP_CR 0xe080 @@ -95,6 +96,7 @@ #define PLA_TALLYCNT 0xe890 #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 +#define PLA_CONFIG6 0xe90a /* CONFIG6 */ #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a @@ -107,6 +109,7 @@ #define PLA_BP_EN 0xfc38 #define USB_USB2PHY 0xb41e +#define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 @@ -300,6 +303,9 @@ #define LINK_ON_WAKE_EN 0x0010 #define LINK_OFF_WAKE_EN 0x0008 +/* PLA_CONFIG6 */ +#define LANWAKE_CLR_EN BIT(0) + /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 @@ -312,6 +318,7 @@ /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 +#define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 @@ -324,6 +331,7 @@ #define MAC_CLK_SPDWN_EN BIT(15) /* PLA_MAC_PWR_CTRL3 */ +#define PLA_MCU_SPDWN_EN BIT(14) #define PKT_AVAIL_SPDWN_EN 0x0100 #define SUSPEND_SPDWN_EN 0x0004 #define U1U2_SPDWN_EN 0x0002 @@ -354,6 +362,9 @@ /* PLA_BOOT_CTRL */ #define AUTOLOAD_DONE 0x0002 +/* PLA_LWAKE_CTRL_REG */ +#define LANWAKE_PIN BIT(7) + /* PLA_SUSPEND_FLAG */ #define LINK_CHG_EVENT BIT(0) @@ -365,13 +376,18 @@ #define DEBUG_LTSSM 0x0082 /* PLA_EXTRA_STATUS */ +#define CUR_LINK_OK BIT(15) #define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */ #define LINK_CHANGE_FLAG BIT(8) +#define POLL_LINK_CHG BIT(0) /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 +/* USB_SSPHYLINK1 */ +#define DELAY_PHY_PWR_CHG BIT(1) + /* USB_SSPHYLINK2 */ #define pwd_dn_scale_mask 0x3ffe #define pwd_dn_scale(x) ((x) << 1) @@ -2863,6 +2879,17 @@ static int rtl8153_enable(struct r8152 *tp) r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); + if (tp->version == RTL_VER_09) { + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data &= ~FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + usleep_range(1000, 2000); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + } + return rtl_enable(tp); } @@ -3376,8 +3403,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - r8153_u2p3en(tp, true); - r8153b_u1u2en(tp, true); + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); } } @@ -4675,7 +4702,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) r8153_aldps_en(tp, true); r8152b_enable_fc(tp); - r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } @@ -4954,6 +4980,8 @@ static void rtl8152_down(struct r8152 *tp) static void rtl8153_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -4961,6 +4989,19 @@ static void rtl8153_up(struct r8152 *tp) r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1); + ocp_data &= ~DELAY_PHY_PWR_CHG; + ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data); + r8153_aldps_en(tp, true); switch (tp->version) { @@ -4979,11 +5020,17 @@ static void rtl8153_up(struct r8152 *tp) static void rtl8153_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data &= ~LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); @@ -4994,6 +5041,8 @@ static void rtl8153_down(struct r8152 *tp) static void rtl8153b_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -5004,18 +5053,29 @@ static void rtl8153b_up(struct r8152 *tp) r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153_aldps_en(tp, true); - r8153_u2p3en(tp, true); - r8153b_u1u2en(tp, true); + + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); @@ -5387,6 +5447,16 @@ static void r8153_init(struct r8152 *tp) else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); + + r8153_queue_wake(tp, false); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); @@ -5416,10 +5486,19 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); r8153_power_cut_en(tp, false); + rtl_runtime_suspend_enable(tp, false); r8153_u1u2en(tp, true); r8153_mac_clk_spd(tp, false); usb_enable_lpm(tp->udev); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); @@ -5484,7 +5563,17 @@ static void r8153b_init(struct r8152 *tp) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - r8153b_u1u2en(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ @@ -5492,6 +5581,19 @@ static void r8153b_init(struct r8152 *tp) ocp_data |= MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + if (tp->version == RTL_VER_09) { + /* Disable Test IO for 32QFN */ + if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= TEST_IO_OFF; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + } + } + set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ @@ -6597,6 +6699,9 @@ static int rtl8152_probe(struct usb_interface *intf, return -ENODEV; } + if (intf->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { @@ -6704,6 +6809,11 @@ static int rtl8152_probe(struct usb_interface *intf, intf->needs_remote_wakeup = 1; + if (!rtl_can_wakeup(tp)) + __rtl_set_wol(tp, 0); + else + tp->saved_wolopts = __rtl_get_wol(tp); + tp->rtl_ops.init(tp); #if IS_BUILTIN(CONFIG_USB_RTL8152) /* Retry in case request_firmware() is not ready yet. */ @@ -6721,10 +6831,6 @@ static int rtl8152_probe(struct usb_interface *intf, goto out1; } - if (!rtl_can_wakeup(tp)) - __rtl_set_wol(tp, 0); - - tp->saved_wolopts = __rtl_get_wol(tp); if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); else diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3ec6b506033d..1c5159dcc720 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2541,7 +2541,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2581,7 +2581,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index ca0f3be2b6bf..aef7de225783 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = { }, }; -static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; +static struct ucc_tdm_info utdm_info[UCC_MAX_NUM]; static int uhdlc_init(struct ucc_hdlc_private *priv) { diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 0f1217b506ad..e30d91a38cfb 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev) { struct lapbethdev *lapbeth; - list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) { + list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) { if (lapbeth->ethdev == dev) return lapbeth; } diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index e2e679a01b65..77ccf3672ede 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb, spin_lock_irqsave(&sdla_lock, flags); SDLA_WINDOW(dev, addr); - pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK)); + pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK)); __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); SDLA_WINDOW(dev, addr); pbuf->opp_flag = 1; diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index f43c06569ea1..c4c8f1b62e1e 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { case AIROGVLIST: ridcode = RID_APLIST; break; case AIROGDRVNAM: ridcode = RID_DRVNAME; break; case AIROGEHTENC: ridcode = RID_ETHERENCAP; break; - case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; - case AIROGWEPKNV: ridcode = RID_WEP_PERM; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; + case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break; + case AIROGWEPKNV: ridcode = RID_WEP_PERM; break; case AIROGSTAT: ridcode = RID_STATUS; break; case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; case AIROGSTATSC32: ridcode = RID_STATS; break; @@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { return -EINVAL; } - if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) + if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) { + /* Only super-user can read WEP keys */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + } + + if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index cd73fc5cfcbb..fd454836adbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_station_priv *sta_priv = NULL; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; __le16 fc; u8 hdr_len; @@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, if (unlikely(!dev_cmd)) goto drop_unlock_priv; - memset(dev_cmd, 0, sizeof(*dev_cmd)); dev_cmd->hdr.cmd = REPLY_TX; tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 40fe2d667622..48d375a86d86 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; bool enabled; - int i, n_profiles, tbl_rev; - int ret = 0; + int i, n_profiles, tbl_rev, pos; + int ret = 0; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) @@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - for (i = 0; i < n_profiles; i++) { - /* the tables start at element 3 */ - int pos = 3; + /* the tables start at element 3 */ + pos = 3; + for (i = 0; i < n_profiles; i++) { /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index ed90dd104366..4c60f9959f7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, { int ret = 0; - /* if the FW crashed or not debug monitor cfg was given, there is - * no point in changing the recording state - */ - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) || - (!fwrt->trans->dbg.dest_tlv && - fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return 0; if (fw_has_capa(&fwrt->fw->ucode_capa, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 92d9898ab7c2..c2f7252ae4e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -379,7 +379,7 @@ enum { /* CSR GIO */ -#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) +#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002) /* * UCODE-DRIVER GP (general purpose) mailbox register 1 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index f266647dc08c..ce8f248c33ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt, if (!frag || frag->size || !pages) return -EIO; - while (pages) { + /* + * We try to allocate as many pages as we can, starting with + * the requested amount and going down until we can allocate + * something. Because of DIV_ROUND_UP(), pages will never go + * down to 0 and stop the loop, so stop when pages reaches 1, + * which is too small anyway. + */ + while (pages > 1) { block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE, &physical, GFP_KERNEL | __GFP_NOWARN); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 4096ccf58b07..bc8c959588ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling, module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); -module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444); -MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); - module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index ebea3f308b5d..82e5cac23d8d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -115,7 +115,6 @@ enum iwl_uapsd_disable { * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT - * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. @@ -136,7 +135,6 @@ struct iwl_mod_params { int antenna_coupling; char *nvm_file; u32 uapsd_disable; - bool lar_disable; bool fw_monitor; bool disable_11ac; /** diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 1e240a2a8329..d4f834b52f50 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags { NVM_CHANNEL_DC_HIGH = BIT(12), }; +/** + * enum iwl_reg_capa_flags - global flags applied for the whole regulatory + * domain. + * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the + * 2.4Ghz band is allowed. + * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the + * 5Ghz band is allowed. + * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. + * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. + * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. + */ +enum iwl_reg_capa_flags { + REG_CAPA_BF_CCD_LOW_BAND = BIT(0), + REG_CAPA_BF_CCD_HIGH_BAND = BIT(1), + REG_CAPA_160MHZ_ALLOWED = BIT(2), + REG_CAPA_80MHZ_ALLOWED = BIT(3), + REG_CAPA_MCS_8_ALLOWED = BIT(4), + REG_CAPA_MCS_9_ALLOWED = BIT(5), + REG_CAPA_40MHZ_FORBIDDEN = BIT(7), + REG_CAPA_DC_HIGH_ENABLED = BIT(9), +}; + static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { @@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported) + u8 tx_chains, u8 rx_chains) { struct iwl_nvm_data *data; bool lar_enabled; @@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; } - if (lar_fw_supported && lar_enabled) + if (lar_enabled && + fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw)) @@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, + u16 cap_flags, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; @@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, (flags & NL80211_RRF_NO_IR)) flags |= NL80211_RRF_GO_CONCURRENT; + /* + * cap_flags is per regulatory domain so apply it for every channel + */ + if (ch_idx >= NUM_2GHZ_CHANNELS) { + if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN) + flags |= NL80211_RRF_NO_HT40; + + if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_80MHZ; + + if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_160MHZ; + } + return flags; } struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info) + u16 geo_info, u16 cap) { int ch_idx; u16 ch_flags; @@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, - ch_flags, cfg); + ch_flags, cap, + cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || @@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) }; int ret; - bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; @@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { + if (le32_to_cpu(rsp->regulatory.lar_enabled) && + fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) { nvm->lar_enabled = true; sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index b7e1ddf8f177..fb0b385d10fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags { */ struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported); + u8 tx_chains, u8 rx_chains); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW @@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info); + u16 geo_info, u16 cap); /** * struct iwl_nvm_section - describes an NVM section in memory. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 28bdc9a9617e..f91197e4ae40 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -66,7 +66,9 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops) + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP @@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd), - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); + cmd_pool_size, cmd_pool_align, + SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 8cadad7364ac..e33df5ad00e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -193,6 +193,18 @@ struct iwl_device_cmd { }; } __packed; +/** + * struct iwl_device_tx_cmd - buffer for TX command + * @hdr: the header + * @payload: the payload placeholder + * + * The actual structure is sized dynamically according to need. + */ +struct iwl_device_tx_cmd { + struct iwl_cmd_header hdr; + u8 payload[]; +} __packed; + #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) /* @@ -544,7 +556,7 @@ struct iwl_trans_ops { int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue); + struct iwl_device_tx_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); @@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) return trans->ops->dump_data(trans, dump_mask); } -static inline struct iwl_device_cmd * +static inline struct iwl_device_tx_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { - return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); + return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, - struct iwl_device_cmd *dev_cmd) + struct iwl_device_tx_cmd *dev_cmd) { kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue) + struct iwl_device_tx_cmd *dev_cmd, int queue) { if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; @@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops); + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 60aff2ecec12..58df25e2fb32 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -154,5 +154,6 @@ #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT false #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 +#define IWL_MVM_USE_NSSN_SYNC 0 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index dd685f7eb410..c09624d8d7ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) return 0; } + if (!mvm->fwrt.ppag_table.enabled) { + IWL_DEBUG_RADIO(mvm, + "PPAG not enabled, command not sent.\n"); + return 0; + } + IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); - IWL_DEBUG_RADIO(mvm, "PPAG is %s\n", - mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled"); for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 32dc9d6f0fb6..6717f25c46b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), - __le16_to_cpu(resp->geo_info)); + __le16_to_cpu(resp->geo_info), + __le16_to_cpu(resp->cap)); /* Store the return source id */ src_id = resp->source_id; kfree(resp); @@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) return ret; } +static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + if (likely(sta)) { + if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) + return; + } else { + if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) + return; + } + + ieee80211_free_txskb(mvm->hw, skb); +} + static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } } - if (sta) { - if (iwl_mvm_tx_skb(mvm, skb, sta)) - goto drop; - return; - } - - if (iwl_mvm_tx_skb_non_sta(mvm, skb)) - goto drop; + iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); @@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) break; } - if (!txq->sta) - iwl_mvm_tx_skb_non_sta(mvm, skb); - else - iwl_mvm_tx_skb(mvm, skb, txq->sta); + iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); @@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, return ret; } +static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) +{ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + rinfo->bw = RATE_INFO_BW_20; + break; + case RATE_MCS_CHAN_WIDTH_40: + rinfo->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rinfo->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rinfo->bw = RATE_INFO_BW_160; + break; + } + + if (rate_n_flags & RATE_MCS_HT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_HT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_HE_MSK) { + u32 gi_ltf = u32_get_bits(rate_n_flags, + RATE_MCS_HE_GI_LTF_MSK); + + rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + + if (rate_n_flags & RATE_MCS_HE_106T_MSK) { + rinfo->bw = RATE_INFO_BW_HE_RU; + rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_MU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_TRIG: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) + rinfo->he_dcm = 1; + } else { + switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) { + case IWL_RATE_1M_PLCP: + rinfo->legacy = 10; + break; + case IWL_RATE_2M_PLCP: + rinfo->legacy = 20; + break; + case IWL_RATE_5M_PLCP: + rinfo->legacy = 55; + break; + case IWL_RATE_11M_PLCP: + rinfo->legacy = 110; + break; + case IWL_RATE_6M_PLCP: + rinfo->legacy = 60; + break; + case IWL_RATE_9M_PLCP: + rinfo->legacy = 90; + break; + case IWL_RATE_12M_PLCP: + rinfo->legacy = 120; + break; + case IWL_RATE_18M_PLCP: + rinfo->legacy = 180; + break; + case IWL_RATE_24M_PLCP: + rinfo->legacy = 240; + break; + case IWL_RATE_36M_PLCP: + rinfo->legacy = 360; + break; + case IWL_RATE_48M_PLCP: + rinfo->legacy = 480; + break; + case IWL_RATE_54M_PLCP: + rinfo->legacy = 540; + break; + } + } +} + static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } + if (iwl_mvm_has_tlc_offload(mvm)) { + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + + iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } + /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 3ec8de00f3aa..67ab7e7e9c9d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - if (iwlwifi_mod_params.lar_disable) - return false; - /* * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM @@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta); +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 945c1ea5cda8..46128a2a9c6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __be16 *hw; const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; - bool lar_enabled; int regulatory_type; /* Checking for required sections */ - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { + if (mvm->trans->cfg->nvm_type == IWL_NVM) { if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); @@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; - lar_enabled = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, + return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, regulatory, mac_override, phy_sku, - mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, - lar_enabled); + mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant); } /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index ef99c49247b7..c15f7dbc9516 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, - .metadata.sync = 0, - .nssn_sync.baid = baid, - .nssn_sync.nssn = nssn, - }; - - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); + if (IWL_MVM_USE_NSSN_SYNC) { + struct iwl_mvm_rss_sync_notif notif = { + .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, + .metadata.sync = 0, + .nssn_sync.baid = baid, + .nssn_sync.nssn = nssn, + }; + + iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, + sizeof(notif)); + } } #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a046ac9fa852..a5af8f4128b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) cmd_size = sizeof(struct iwl_scan_config_v2); else cmd_size = sizeof(struct iwl_scan_config_v1); - cmd_size += num_channels; + cmd_size += mvm->fw->ucode_capa.n_scan_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); if (!cfg) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dc5c02fbc65a..ddfc9a668036 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ -static struct iwl_device_cmd * +static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); @@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, if (unlikely(!dev_cmd)) return NULL; - /* Make sure we zero enough of dev_cmd */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); - - memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { @@ -597,7 +592,7 @@ out: } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, - struct iwl_device_cmd *cmd) + struct iwl_device_tx_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); @@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; @@ -1078,7 +1073,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; @@ -1154,7 +1149,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); - return 0; + return -1; } if (!iwl_mvm_has_new_tx_api(mvm)) { @@ -1206,8 +1201,8 @@ drop: return -1; } -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta) +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index d38cefbb779e..e249e3fd14c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -57,6 +57,42 @@ #include "internal.h" #include "iwl-prph.h" +static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys, + int depth) +{ + void *result; + + if (WARN(depth > 2, + "failed to allocate DMA memory not crossing 2^32 boundary")) + return NULL; + + result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL); + + if (!result) + return NULL; + + if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) { + void *old = result; + dma_addr_t oldphys = *phys; + + result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, + phys, + depth + 1); + dma_free_coherent(trans->dev, size, old, oldphys); + } + + return result; +} + +static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys) +{ + return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); +} + void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { struct iwl_self_init_dram *dram = &trans->init_dram; @@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; u32 control_flags = 0, rb_size; + dma_addr_t phys; int ret; - ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), - &trans_pcie->ctxt_info_dma_addr, - GFP_KERNEL); + ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, + sizeof(*ctxt_info), + &phys); if (!ctxt_info) return -ENOMEM; + trans_pcie->ctxt_info_dma_addr = phys; + ctxt_info->version.version = 0; ctxt_info->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index a091690f6c79..f14bcef3495e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -305,7 +305,7 @@ struct iwl_cmd_meta { #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { - struct iwl_device_cmd *cmd; + void *cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; @@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ +/* + * We need this inline in case dma_addr_t is only 32-bits - since the + * hardware is always 64-bit, the issue can still occur in that case, + * so use u64 for 'phys' here to force the addition in 64-bit. + */ +static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len) +{ + return upper_32_bits(phys) != upper_32_bits(phys + len); +} + int iwl_pcie_tx_init(struct iwl_trans *trans); int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size); @@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); @@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans); void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb); #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb); #endif /* common functions that are used by gen3 transport */ @@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 452da44a21e0..f0b8ff67a1bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1529,13 +1529,13 @@ out: napi = &rxq->napi; if (napi->poll) { + napi_gro_flush(napi, false); + if (napi->rx_count) { netif_receive_skb_list(&napi->rx_list); INIT_LIST_HEAD(&napi->rx_list); napi->rx_count = 0; } - - napi_gro_flush(napi, false); } iwl_pcie_rxq_restock(trans, rxq); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index a0677131634d..f60d66f1e55b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -79,6 +79,7 @@ #include "iwl-agn-hw.h" #include "fw/error-dump.h" #include "fw/dbg.h" +#include "fw/api/tx.h" #include "internal.h" #include "iwl-fh.h" @@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans) u16 cap; /* - * HW bug W/A for instability in PCIe bus L0S->L1 transition. - * Check if BIOS (or OS) enabled L1-ASPM on this device. - * If so (likely), disable L0S, so device moves directly L0->L1; - * costs negligible amount of power savings. - * If not (unlikely), enable L0S, so there is at least some - * power savings, even without L1. + * L0S states have been found to be unstable with our devices + * and in newer hardware they are not officially supported at + * all, so we must always set the L0S_DISABLED bit. */ + iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) - iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - else - iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); @@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; - int ret, addr_size; + int ret, addr_size, txcmd_size, txcmd_align; + const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; + + if (!cfg_trans->gen2) { + ops = &trans_ops_pcie; + txcmd_size = sizeof(struct iwl_tx_cmd); + txcmd_align = sizeof(void *); + } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { + txcmd_size = sizeof(struct iwl_tx_cmd_gen2); + txcmd_align = 64; + } else { + txcmd_size = sizeof(struct iwl_tx_cmd_gen3); + txcmd_align = 128; + } + + txcmd_size += sizeof(struct iwl_cmd_header); + txcmd_size += 36; /* biggest possible 802.11 header */ + + /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ + if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) + return ERR_PTR(-EINVAL); ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); - if (cfg_trans->gen2) - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie_gen2); - else - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie); - + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, + txcmd_size, txcmd_align); if (!trans) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 8ca0250de99e..bfb984b2e00c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); struct iwl_tfh_tb *tb; + /* + * Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_pcie_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) return -EINVAL; tb = &tfd->tbs[idx]; @@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, return idx; } +static struct page *get_workaround_page(struct iwl_trans *trans, + struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct page **page_ptr; + struct page *ret; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + + ret = alloc_page(GFP_ATOMIC); + if (!ret) + return NULL; + + /* set the chaining pointer to the previous page if there */ + *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; + *page_ptr = ret; + + return ret; +} + +/* + * Add a TB and if needed apply the FH HW bug workaround; + * meta != NULL indicates that it's a page mapping and we + * need to dma_unmap_page() and set the meta->tbs bit in + * this case. + */ +static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + dma_addr_t phys, void *virt, + u16 len, struct iwl_cmd_meta *meta) +{ + dma_addr_t oldphys = phys; + struct page *page; + int ret; + + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + + if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) { + ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); + + if (ret < 0) + goto unmap; + + if (meta) + meta->tbs |= BIT(ret); + + ret = 0; + goto trace; + } + + /* + * Work around a hardware bug. If (as expressed in the + * condition above) the TB ends on a 32-bit boundary, + * then the next TB may be accessed with the wrong + * address. + * To work around it, copy the data elsewhere and make + * a new mapping for it so the device will not fail. + */ + + if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { + ret = -ENOBUFS; + goto unmap; + } + + page = get_workaround_page(trans, skb); + if (!page) { + ret = -ENOMEM; + goto unmap; + } + + memcpy(page_address(page), virt, len); + + phys = dma_map_single(trans->dev, page_address(page), len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); + if (ret < 0) { + /* unmap the new allocation as single */ + oldphys = phys; + meta = NULL; + goto unmap; + } + IWL_WARN(trans, + "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", + len, (unsigned long long)oldphys, (unsigned long long)phys); + + ret = 0; +unmap: + if (meta) + dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); +trace: + trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); + + return ret; +} + static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, int start_len, - u8 hdr_len, struct iwl_device_cmd *dev_cmd) + u8 hdr_len, + struct iwl_device_tx_cmd *dev_cmd) { #ifdef CONFIG_INET - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; @@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, u16 length, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; struct tso_t tso; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), @@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; /* * Pull the ieee80211 header to be able to use TSO core, @@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, dev_kfree_skb(csum_skb); goto out_err; } + /* + * No need for _with_wa, this is from the TSO page and + * we leave some space at the end of it so can't hit + * the buggy scenario. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_phys, tb_len); @@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, /* put the payload */ while (data_left) { + int ret; + tb_len = min_t(unsigned int, tso.size, data_left); tb_phys = dma_map_single(trans->dev, tso.data, tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, + tb_phys, tso.data, + tb_len, NULL); + if (ret) { dev_kfree_skb(csum_skb); goto out_err; } - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, - tb_phys, tb_len); data_left -= tb_len; tso_build_data(skb, &tso, tb_len); @@ -372,7 +487,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* @@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, @@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; - int tb_idx; + unsigned int fragsz = skb_frag_size(frag); + int ret; - if (!skb_frag_size(frag)) + if (!fragsz) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - return -ENOMEM; - tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, - skb_frag_size(frag)); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), - tb_phys, skb_frag_size(frag)); - if (tb_idx < 0) - return tb_idx; - - out_meta->tbs |= BIT(tb_idx); + fragsz, DMA_TO_DEVICE); + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb_frag_address(frag), + fragsz, out_meta); + if (ret) + return ret; } return 0; @@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, /* The first TB points to bi-directional DMA data */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* @@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { + int ret; + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, tb2_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb->data + hdr_len, tb2_len, + NULL); + if (ret) goto out_err; - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, - tb_phys, tb2_len); } if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; skb_walk_frags(skb, frag) { + int ret; + tb_phys = dma_map_single(trans->dev, frag->data, skb_headlen(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + frag->data, + skb_headlen(frag), NULL); + if (ret) goto out_err; - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag)); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data, - tb_phys, skb_headlen(frag)); if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta)) goto out_err; } @@ -538,7 +670,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { @@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, } int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_cmd_meta *out_meta; @@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index f21f16ab2a97..b0eb52b4951b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->write_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[read_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb) { struct page **page_ptr; + struct page *next; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + next = *page_ptr; + *page_ptr = NULL; - if (*page_ptr) { - __free_page(*page_ptr); - *page_ptr = NULL; + while (next) { + struct page *tmp = next; + + next = *(void **)(page_address(next) + PAGE_SIZE - + sizeof(void *)); + __free_page(tmp); } } @@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct iwl_device_cmd *dev_cmd_ptr; + struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); @@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); + struct page **page_ptr; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + + if (WARN_ON(*page_ptr)) + return NULL; if (!p->page) goto alloc; - /* enough room on this page */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) - return p; + /* + * Check if there's enough room on this page + * + * Note that we put a page chaining pointer *last* in the + * page - we need it somewhere, and if it's there then we + * avoid DMA mapping the last bits of the page which may + * trigger the 32-bit boundary hardware bug. + * + * (see also get_workaround_page() in tx-gen2.c) + */ + if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - + sizeof(void *)) + goto out; /* We don't have enough room on this page, get a new one. */ __free_page(p->page); @@ -2072,6 +2095,11 @@ alloc: if (!p->page) return NULL; p->pos = page_address(p->page); + /* set the chaining pointer to NULL */ + *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; +out: + *page_ptr = p->page; + get_page(p->page); return p; } @@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; @@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; @@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); @@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; @@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 57edfada0665..c9401c121a14 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) int hw, ap, ap_max = ie[1]; u8 hw_rate; + if (ap_max > MAX_RATES) { + lbs_deb_assoc("invalid rates\n"); + return tlv; + } /* Advance past IE header */ ie += 2; @@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, struct cmd_ds_802_11_ad_hoc_join cmd; u8 preamble = RADIO_PREAMBLE_SHORT; int ret = 0; + int hw, i; + u8 rates_max; + u8 *rates; /* TODO: set preamble based on scan result */ ret = lbs_set_radio(priv, preamble, 1); @@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, if (!rates_eid) { lbs_add_rates(cmd.bss.rates); } else { - int hw, i; - u8 rates_max = rates_eid[1]; - u8 *rates = cmd.bss.rates; + rates_max = rates_eid[1]; + if (rates_max > MAX_RATES) { + lbs_deb_join("invalid rates"); + goto out; + } + rates = cmd.bss.rates; for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { u8 hw_rate = lbs_rates[hw].bitrate / 5; for (i = 0; i < rates_max; i++) { diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c index 55116f395f9a..a4a785467748 100644 --- a/drivers/net/wireless/mediatek/mt76/airtime.c +++ b/drivers/net/wireless/mediatek/mt76/airtime.c @@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status, return 0; sband = dev->hw->wiphy->bands[status->band]; - if (!sband || status->rate_idx > sband->n_bitrates) + if (!sband || status->rate_idx >= sband->n_bitrates) return 0; rate = &sband->bitrates[status->rate_idx]; diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index b9f2a401041a..96018fd65779 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; - mt76_led_cleanup(dev); + if (IS_ENABLED(CONFIG_MT76_LEDS)) + mt76_led_cleanup(dev); mt76_tx_status_check(dev, NULL, true); ieee80211_unregister_hw(hw); } diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index 4590fbf82dc2..f5bb7ace2ff5 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) cmd, sizeof(cmd), false); rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), - &transferred, 0); + &transferred, 5000); kfree(buffer); if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index c6439638a419..b9358db83e96 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config NVME_CORE tristate + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY config BLK_DEV_NVME tristate "NVM Express block device" diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 667f18f465be..5dc32b72e7fa 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status) case NVME_SC_CAP_EXCEEDED: return BLK_STS_NOSPC; case NVME_SC_LBA_RANGE: + case NVME_SC_CMD_INTERRUPTED: + case NVME_SC_NS_NOT_READY: return BLK_STS_TARGET; case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_ONCS_NOT_SUPPORTED: diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 56c21b501185..72a7e41f3018 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) +{ + switch (cdw10 & 0xff) { + case NVME_FEAT_HOST_ID: + return sizeof(req->sq->ctrl->hostid); + default: + return 0; + } +} + u64 nvmet_get_log_page_offset(struct nvme_command *cmd) { return le64_to_cpu(cmd->get_log_page.lpo); @@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; - if (!nvmet_check_data_len(req, 0)) + if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10))) return; switch (cdw10 & 0xff) { diff --git a/drivers/opp/core.c b/drivers/opp/core.c index be7a7d332332..ba43e6a3dc0a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -988,7 +988,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); INIT_LIST_HEAD(&opp_table->opp_list); kref_init(&opp_table->kref); - kref_init(&opp_table->list_kref); /* Secure the device table modification */ list_add(&opp_table->node, &opp_tables); @@ -1072,33 +1071,6 @@ static void _opp_table_kref_release(struct kref *kref) mutex_unlock(&opp_table_lock); } -void _opp_remove_all_static(struct opp_table *opp_table) -{ - struct dev_pm_opp *opp, *tmp; - - list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { - if (!opp->dynamic) - dev_pm_opp_put(opp); - } - - opp_table->parsed_static_opps = false; -} - -static void _opp_table_list_kref_release(struct kref *kref) -{ - struct opp_table *opp_table = container_of(kref, struct opp_table, - list_kref); - - _opp_remove_all_static(opp_table); - mutex_unlock(&opp_table_lock); -} - -void _put_opp_list_kref(struct opp_table *opp_table) -{ - kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release, - &opp_table_lock); -} - void dev_pm_opp_put_opp_table(struct opp_table *opp_table) { kref_put_mutex(&opp_table->kref, _opp_table_kref_release, @@ -1202,6 +1174,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +void _opp_remove_all_static(struct opp_table *opp_table) +{ + struct dev_pm_opp *opp, *tmp; + + mutex_lock(&opp_table->lock); + + if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps) + goto unlock; + + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { + if (!opp->dynamic) + dev_pm_opp_put_unlocked(opp); + } + +unlock: + mutex_unlock(&opp_table->lock); +} + /** * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs * @dev: device for which we do this operation @@ -2276,7 +2266,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev) return; } - _put_opp_list_kref(opp_table); + _opp_remove_all_static(opp_table); /* Drop reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1cbb58240b80..9cd8f0adacae 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp; /* OPP table is already initialized for the device */ + mutex_lock(&opp_table->lock); if (opp_table->parsed_static_opps) { - kref_get(&opp_table->list_kref); + opp_table->parsed_static_opps++; + mutex_unlock(&opp_table->lock); return 0; } - /* - * Re-initialize list_kref every time we add static OPPs to the OPP - * table as the reference count may be 0 after the last tie static OPPs - * were removed. - */ - kref_init(&opp_table->list_kref); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -678,15 +676,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); of_node_put(np); - return ret; + goto remove_static_opp; } else if (opp) { count++; } } /* There should be one of more OPP defined */ - if (WARN_ON(!count)) - return -ENOENT; + if (WARN_ON(!count)) { + ret = -ENOENT; + goto remove_static_opp; + } list_for_each_entry(opp, &opp_table->opp_list, node) pstate_count += !!opp->pstate; @@ -695,15 +695,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) if (pstate_count && pstate_count != count) { dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); - return -ENOENT; + ret = -ENOENT; + goto remove_static_opp; } if (pstate_count) opp_table->genpd_performance_state = true; - opp_table->parsed_static_opps = true; - return 0; + +remove_static_opp: + _opp_remove_all_static(opp_table); + + return ret; } /* Initializes OPP tables based on old-deprecated bindings */ @@ -738,6 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", __func__, freq, ret); + _opp_remove_all_static(opp_table); return ret; } nr -= 2; diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 01a500e2c40a..d14e27102730 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -127,11 +127,10 @@ enum opp_table_access { * @dev_list: list of devices that share these OPPs * @opp_list: table of opps * @kref: for reference count of the table. - * @list_kref: for reference count of the OPP list. * @lock: mutex protecting the opp_list and dev_list. * @np: struct device_node pointer for opp's DT node. * @clock_latency_ns_max: Max clock latency in nanoseconds. - * @parsed_static_opps: True if OPPs are initialized from DT. + * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. * @suspend_opp: Pointer to OPP to be used during device suspend. * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. @@ -167,7 +166,6 @@ struct opp_table { struct list_head dev_list; struct list_head opp_list; struct kref kref; - struct kref list_kref; struct mutex lock; struct device_node *np; @@ -176,7 +174,7 @@ struct opp_table { /* For backward compatibility with v1 bindings */ unsigned int voltage_tolerance_v1; - bool parsed_static_opps; + unsigned int parsed_static_opps; enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0d92374b0a11..a3a1a0ea64f4 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5074,18 +5074,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS /* - * Some devices have a broken ATS implementation causing IOMMU stalls. - * Don't use ATS for those devices. + * Some devices require additional driver setup to enable ATS. Don't use + * ATS for those devices as ATS will be enabled before the driver has had a + * chance to load and configure the device. */ -static void quirk_no_ats(struct pci_dev *pdev) +static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - pci_info(pdev, "disabling ATS (broken on this device)\n"); + if (pdev->device == 0x7340 && pdev->revision != 0xc5) + return; + + pci_info(pdev, "disabling ATS\n"); pdev->ats_cap = 0; } /* AMD Stoney platform GPU */ -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); +/* AMD Iceland dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi14 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ /* Freescale PCIe doesn't support MSI in RC mode */ diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 55083c67b2bb..95dca2cb5265 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); - goto ddr_perf_err; + goto cpuhp_state_err; } pmu->cpuhp_state = ret; /* Register the pmu instance for cpu hotplug */ - cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + goto cpuhp_instance_err; + } /* Request irq */ irq = of_irq_get(np, 0); @@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev) return 0; ddr_perf_err: - if (pmu->cpuhp_state) - cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); - + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(pmu->cpuhp_state); +cpuhp_state_err: ida_simple_remove(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; @@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev) struct ddr_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); + cpuhp_remove_multi_state(pmu->cpuhp_state); irq_set_affinity_hint(pmu->irq, NULL); perf_pmu_unregister(&pmu->pmu); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 96183e31b96a..584de8f807cc 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu) hisi_pmu->ops->stop_counters(hisi_pmu); } + /* - * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1. - * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu - * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID - * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID - * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID - * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1]. + * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be + * determined from the MPIDR_EL1, but the encoding varies by CPU: + * + * - For MT variants of TSV110: + * SCCL is Aff2[7:3], CCL is Aff2[2:0] + * + * - For other MT parts: + * SCCL is Aff3[7:0], CCL is Aff2[7:0] + * + * - For non-MT parts: + * SCCL is Aff2[7:0], CCL is Aff1[7:0] */ -static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id) +static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) { u64 mpidr = read_cpuid_mpidr(); - - if (mpidr & MPIDR_MT_BITMASK) { - if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) { - int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (sccl_id) - *sccl_id = aff2 >> 3; - if (ccl_id) - *ccl_id = aff2 & 0x7; - } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - } + int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + bool mt = mpidr & MPIDR_MT_BITMASK; + int sccl, ccl; + + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + } else if (mt) { + sccl = aff3; + ccl = aff2; } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + sccl = aff2; + ccl = aff1; } + + if (scclp) + *scclp = sccl; + if (cclp) + *cclp = ccl; } /* diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index ead06c6c2601..12e71a315a2c 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -115,7 +115,7 @@ struct cpcap_usb_ints_state { enum cpcap_gpio_mode { CPCAP_DM_DP, CPCAP_MDM_RX_TX, - CPCAP_UNKNOWN, + CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */ CPCAP_OTG_DM_DP, }; @@ -134,6 +134,8 @@ struct cpcap_phy_ddata { struct iio_channel *id; struct regulator *vusb; atomic_t active; + unsigned int vbus_provider:1; + unsigned int docked:1; }; static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata) @@ -207,6 +209,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata, static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata); static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata); +static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata, + enum musb_vbus_id_status status) +{ + int error; + + error = musb_mailbox(status); + if (!error) + return; + + dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n", + __func__, error); +} + static void cpcap_usb_detect(struct work_struct *work) { struct cpcap_phy_ddata *ddata; @@ -220,16 +235,66 @@ static void cpcap_usb_detect(struct work_struct *work) if (error) return; - if (s.id_ground) { - dev_dbg(ddata->dev, "id ground, USB host mode\n"); + vbus = cpcap_usb_vbus_valid(ddata); + + /* We need to kick the VBUS as USB A-host */ + if (s.id_ground && ddata->vbus_provider) { + dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n"); + + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, + CPCAP_BIT_VBUSSTBY_EN | + CPCAP_BIT_VBUSEN_SPI, + CPCAP_BIT_VBUSEN_SPI); + if (error) + goto out_err; + + return; + } + + if (vbus && s.id_ground && ddata->docked) { + dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n"); + + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + return; + } + + /* No VBUS needed with docks */ + if (vbus && s.id_ground && !ddata->vbus_provider) { + dev_dbg(ddata->dev, "connected to a dock\n"); + + ddata->docked = true; + error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + /* + * Force check state again after musb has reoriented, + * otherwise devices won't enumerate after loading PHY + * driver. + */ + schedule_delayed_work(&ddata->detect_work, + msecs_to_jiffies(1000)); + + return; + } + + if (s.id_ground && !ddata->docked) { + dev_dbg(ddata->dev, "id ground, USB host mode\n"); + + ddata->vbus_provider = true; + + error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, CPCAP_BIT_VBUSSTBY_EN | CPCAP_BIT_VBUSEN_SPI, @@ -248,43 +313,26 @@ static void cpcap_usb_detect(struct work_struct *work) vbus = cpcap_usb_vbus_valid(ddata); + /* Otherwise assume we're connected to a USB host */ if (vbus) { - /* Are we connected to a docking station with vbus? */ - if (s.id_ground) { - dev_dbg(ddata->dev, "connected to a dock\n"); - - /* No VBUS needed with docks */ - error = cpcap_usb_set_usb_mode(ddata); - if (error) - goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); - if (error) - goto out_err; - - return; - } - - /* Otherwise assume we're connected to a USB host */ dev_dbg(ddata->dev, "connected to USB host\n"); error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_VALID); - if (error) - goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID); return; } + ddata->vbus_provider = false; + ddata->docked = false; + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); + /* Default to debug UART mode */ error = cpcap_usb_set_uart_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - goto out_err; - dev_dbg(ddata->dev, "set UART mode\n"); return; @@ -376,7 +424,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) goto out_err; @@ -403,6 +452,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable UART mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -415,7 +469,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) return error; @@ -434,12 +489,6 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; - error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2, - CPCAP_BIT_USBXCVREN, - CPCAP_BIT_USBXCVREN); - if (error) - goto out_err; - error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, CPCAP_BIT_PU_SPI | CPCAP_BIT_DMPD_SPI | @@ -455,6 +504,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable USB mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -649,9 +703,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev) if (error) dev_err(ddata->dev, "could not set UART mode\n"); - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - dev_err(ddata->dev, "could not set mailbox\n"); + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); usb_remove_phy(&ddata->phy); cancel_delayed_work_sync(&ddata->detect_work); diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index ee184d5607bd..f20524f0c21d 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work) struct phy_mdm6600 *ddata; struct device *dev; DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES); - int error, i, val = 0; + int error; ddata = container_of(work, struct phy_mdm6600, status_work.work); dev = ddata->dev; @@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work) if (error) return; - for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) { - val |= test_bit(i, values) << i; - dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", - __func__, i, test_bit(i, values), val); - } - ddata->status = values[0]; + ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1); dev_info(dev, "modem status: %i %s\n", ddata->status, - phy_mdm6600_status_name[ddata->status & 7]); + phy_mdm6600_status_name[ddata->status]); complete(&ddata->ack); } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 091e20303a14..66f91726b8b2 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -66,7 +66,7 @@ /* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ #define CLAMP_EN BIT(0) /* enables i/o clamp_n */ -#define PHY_INIT_COMPLETE_TIMEOUT 1000 +#define PHY_INIT_COMPLETE_TIMEOUT 10000 #define POWER_DOWN_DELAY_US_MIN 10 #define POWER_DOWN_DELAY_US_MAX 11 diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c index 2b97fb1185a0..9ca20c947283 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c @@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate && !cfg->fracdiv) break; @@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate) break; diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig index f1806fd781a0..530426a74f75 100644 --- a/drivers/pinctrl/cirrus/Kconfig +++ b/drivers/pinctrl/cirrus/Kconfig @@ -2,6 +2,7 @@ config PINCTRL_LOCHNAGAR tristate "Cirrus Logic Lochnagar pinctrl driver" depends on MFD_LOCHNAGAR + select GPIOLIB select PINMUX select PINCONF select GENERIC_PINCONF diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2bbd8ee93507..46600d9380ea 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1535,15 +1535,8 @@ int pinctrl_init_done(struct device *dev) return ret; } -#ifdef CONFIG_PM - -/** - * pinctrl_pm_select_state() - select pinctrl state for PM - * @dev: device to select default state for - * @state: state to set - */ -static int pinctrl_pm_select_state(struct device *dev, - struct pinctrl_state *state) +static int pinctrl_select_bound_state(struct device *dev, + struct pinctrl_state *state) { struct dev_pin_info *pins = dev->pins; int ret; @@ -1558,15 +1551,27 @@ static int pinctrl_pm_select_state(struct device *dev, } /** - * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * pinctrl_select_default_state() - select default pinctrl state * @dev: device to select default state for */ -int pinctrl_pm_select_default_state(struct device *dev) +int pinctrl_select_default_state(struct device *dev) { if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->default_state); + return pinctrl_select_bound_state(dev, dev->pins->default_state); +} +EXPORT_SYMBOL_GPL(pinctrl_select_default_state); + +#ifdef CONFIG_PM + +/** + * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * @dev: device to select default state for + */ +int pinctrl_pm_select_default_state(struct device *dev) +{ + return pinctrl_select_default_state(dev); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state); @@ -1579,7 +1584,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->sleep_state); + return pinctrl_select_bound_state(dev, dev->pins->sleep_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state); @@ -1592,7 +1597,7 @@ int pinctrl_pm_select_idle_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->idle_state); + return pinctrl_select_bound_state(dev, dev->pins->idle_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state); #endif diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 44d7f50bbc82..d936e7aa74c4 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -49,6 +49,7 @@ .padown_offset = SPT_PAD_OWN, \ .padcfglock_offset = SPT_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ + .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ .pin_base = (s), \ .npins = ((e) - (s) + 1), \ diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 3c80828a5e50..bbc919bef2bf 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc, return ret; meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit); + bit = bit << 1; ret = regmap_read(pc->reg_ds, reg, &val); if (ret) diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c index bb0edf51dfda..5731d1b60e28 100644 --- a/drivers/platform/chrome/wilco_ec/keyboard_leds.c +++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c @@ -73,13 +73,6 @@ static int send_kbbl_msg(struct wilco_ec_device *ec, return ret; } - if (response->status) { - dev_err(ec->dev, - "EC reported failure sending keyboard LEDs command: %d", - response->status); - return -EIO; - } - return 0; } @@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness) { struct wilco_keyboard_leds_msg request; struct wilco_keyboard_leds_msg response; + int ret; memset(&request, 0, sizeof(request)); request.command = WILCO_EC_COMMAND_KBBL; @@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness) request.mode = WILCO_KBBL_MODE_FLAG_PWM; request.percent = brightness; - return send_kbbl_msg(ec, &request, &response); + ret = send_kbbl_msg(ec, &request, &response); + if (ret < 0) + return ret; + + if (response.status) { + dev_err(ec->dev, + "EC reported failure sending keyboard LEDs command: %d", + response.status); + return -EIO; + } + + return 0; } static int kbbl_exist(struct wilco_ec_device *ec, bool *exists) @@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec) if (ret < 0) return ret; + if (response.status) { + dev_err(ec->dev, + "EC reported failure sending keyboard LEDs command: %d", + response.status); + return -EIO; + } + if (response.mode & WILCO_KBBL_MODE_FLAG_PWM) return response.percent; diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 9a5c9fd2dbc6..5739a9669b29 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info { * @work: work struct for deferred process * @timer: background timer * @vring: Tx/Rx ring - * @spin_lock: spin lock + * @spin_lock: Tx/Rx spin lock * @is_ready: ready flag */ struct mlxbf_tmfifo { @@ -164,7 +164,7 @@ struct mlxbf_tmfifo { struct work_struct work; struct timer_list timer; struct mlxbf_tmfifo_vring *vring[2]; - spinlock_t spin_lock; /* spin lock */ + spinlock_t spin_lock[2]; /* spin lock */ bool is_ready; }; @@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); /* Use spin-lock to protect the 'cons->tx_buf'. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); while (size > 0) { addr = cons->tx_buf.buf + cons->tx_buf.tail; @@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) } } - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } /* Rx/Tx one word in the descriptor buffer. */ @@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, fifo->vring[is_rx] = NULL; /* Notify upper layer that packet is done. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); vring_interrupt(0, vring->vq); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); } mlxbf_tmfifo_desc_done: @@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) * worker handler. */ if (vring->vdev_id == VIRTIO_ID_CONSOLE) { - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; mlxbf_tmfifo_console_output(tm_vdev, vring); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events)) { return true; @@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev) if (!fifo) return -ENOMEM; - spin_lock_init(&fifo->spin_lock); + spin_lock_init(&fifo->spin_lock[0]); + spin_lock_init(&fifo->spin_lock[1]); INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); mutex_init(&fifo->lock); diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index 706207d192ae..77be37a1fbcf 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) item = pdata->items; for (i = 0; i < pdata->counter; i++, item++) { + if (item->capability) { + /* + * Read group capability register to get actual number + * of interrupt capable components and set group mask + * accordingly. + */ + ret = regmap_read(priv->regmap, item->capability, + ®val); + if (ret) + goto out; + + item->mask = GENMASK((regval & item->mask) - 1, 0); + } + /* Clear group presense event. */ ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF, 0); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 27d5b40fb717..587403c44598 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -997,7 +997,6 @@ config INTEL_SCU_IPC config INTEL_SCU_IPC_UTIL tristate "Intel SCU IPC utility driver" depends on INTEL_SCU_IPC - default y ---help--- The IPC Util driver provides an interface with the SCU enabling low level access for debug work and updating the firmware. Say @@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM depends on PCI && IOSF_MBI && PM help Power-management driver for Intel's Image Signal Processor found on - Bay and Cherry Trail devices. This dummy driver's sole purpose is to - turn the ISP off (put it in D3) to save power and to allow entering - of S0ix modes. + Bay Trail and Cherry Trail devices. This dummy driver's sole purpose + is to turn the ISP off (put it in D3) to save power and to allow + entering of S0ix modes. To compile this driver as a module, choose M here: the module will be called intel_atomisp2_pm. @@ -1337,6 +1336,17 @@ config PCENGINES_APU2 To compile this driver as a module, choose M here: the module will be called pcengines-apuv2. +config INTEL_UNCORE_FREQ_CONTROL + tristate "Intel Uncore frequency control driver" + depends on X86_64 + help + This driver allows control of uncore frequency limits on + supported server platforms. + Uncore frequency controls RING/LLC (last-level cache) clocks. + + To compile this driver as a module, choose M here: the module + will be called intel-uncore-frequency. + source "drivers/platform/x86/intel_speed_select_if/Kconfig" config SYSTEM76_ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 42d85a00be4e..3747b1f07cf1 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o +obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index b361c73636a4..6f12747a359a 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */ { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, { KE_IGNORE, 0x6E, }, /* Low Battery notification */ + { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */ { KE_KEY, 0x7c, { KEY_MICMUTE } }, { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */ diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 821b08e01635..43bb15e05529 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL"); #define NOTIFY_KBD_BRTDWN 0xc5 #define NOTIFY_KBD_BRTTOGGLE 0xc7 #define NOTIFY_KBD_FBM 0x99 +#define NOTIFY_KBD_TTP 0xae #define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0) @@ -81,6 +82,10 @@ MODULE_LICENSE("GPL"); #define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02 #define ASUS_FAN_BOOST_MODES_MASK 0x03 +#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0 +#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1 +#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2 + #define USB_INTEL_XUSB2PR 0xD0 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -198,6 +203,9 @@ struct asus_wmi { u8 fan_boost_mode_mask; u8 fan_boost_mode; + bool throttle_thermal_policy_available; + u8 throttle_thermal_policy_mode; + // The RSOC controls the maximum charging percentage. bool battery_rsoc_available; @@ -512,13 +520,7 @@ static void kbd_led_update(struct asus_wmi *asus) { int ctrl_param = 0; - /* - * bits 0-2: level - * bit 7: light on/off - */ - if (asus->kbd_led_wk > 0) - ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); - + ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); } @@ -1724,6 +1726,107 @@ static ssize_t fan_boost_mode_store(struct device *dev, // Fan boost mode: 0 - normal, 1 - overboost, 2 - silent static DEVICE_ATTR_RW(fan_boost_mode); +/* Throttle thermal policy ****************************************************/ + +static int throttle_thermal_policy_check_present(struct asus_wmi *asus) +{ + u32 result; + int err; + + asus->throttle_thermal_policy_available = false; + + err = asus_wmi_get_devstate(asus, + ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + &result); + if (err) { + if (err == -ENODEV) + return 0; + return err; + } + + if (result & ASUS_WMI_DSTS_PRESENCE_BIT) + asus->throttle_thermal_policy_available = true; + + return 0; +} + +static int throttle_thermal_policy_write(struct asus_wmi *asus) +{ + int err; + u8 value; + u32 retval; + + value = asus->throttle_thermal_policy_mode; + + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + value, &retval); + if (err) { + pr_warn("Failed to set throttle thermal policy: %d\n", err); + return err; + } + + if (retval != 1) { + pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n", + retval); + return -EIO; + } + + return 0; +} + +static int throttle_thermal_policy_set_default(struct asus_wmi *asus) +{ + if (!asus->throttle_thermal_policy_available) + return 0; + + asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + return throttle_thermal_policy_write(asus); +} + +static int throttle_thermal_policy_switch_next(struct asus_wmi *asus) +{ + u8 new_mode = asus->throttle_thermal_policy_mode + 1; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + + asus->throttle_thermal_policy_mode = new_mode; + return throttle_thermal_policy_write(asus); +} + +static ssize_t throttle_thermal_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + u8 mode = asus->throttle_thermal_policy_mode; + + return scnprintf(buf, PAGE_SIZE, "%d\n", mode); +} + +static ssize_t throttle_thermal_policy_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int result; + u8 new_mode; + struct asus_wmi *asus = dev_get_drvdata(dev); + + result = kstrtou8(buf, 10, &new_mode); + if (result < 0) + return result; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + return -EINVAL; + + asus->throttle_thermal_policy_mode = new_mode; + throttle_thermal_policy_write(asus); + + return count; +} + +// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent +static DEVICE_ATTR_RW(throttle_thermal_policy); + /* Backlight ******************************************************************/ static int read_backlight_power(struct asus_wmi *asus) @@ -2005,6 +2108,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) return; } + if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) { + throttle_thermal_policy_switch_next(asus); + return; + } + if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle) return; @@ -2155,6 +2263,7 @@ static struct attribute *platform_attributes[] = { &dev_attr_lid_resume.attr, &dev_attr_als_enable.attr, &dev_attr_fan_boost_mode.attr, + &dev_attr_throttle_thermal_policy.attr, NULL }; @@ -2178,6 +2287,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, devid = ASUS_WMI_DEVID_ALS_ENABLE; else if (attr == &dev_attr_fan_boost_mode.attr) ok = asus->fan_boost_mode_available; + else if (attr == &dev_attr_throttle_thermal_policy.attr) + ok = asus->throttle_thermal_policy_available; if (devid != -1) ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); @@ -2437,6 +2548,12 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_fan_boost_mode; + err = throttle_thermal_policy_check_present(asus); + if (err) + goto fail_throttle_thermal_policy; + else + throttle_thermal_policy_set_default(asus); + err = asus_wmi_sysfs_init(asus->platform_device); if (err) goto fail_sysfs; @@ -2521,6 +2638,7 @@ fail_hwmon: fail_input: asus_wmi_sysfs_exit(asus->platform_device); fail_sysfs: +fail_throttle_thermal_policy: fail_fan_boost_mode: fail_platform: kfree(asus); diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c index be85ed966bf3..b471b86c28fe 100644 --- a/drivers/platform/x86/gpd-pocket-fan.c +++ b/drivers/platform/x86/gpd-pocket-fan.c @@ -16,17 +16,27 @@ #define MAX_SPEED 3 -static int temp_limits[3] = { 55000, 60000, 65000 }; +#define TEMP_LIMIT0_DEFAULT 55000 +#define TEMP_LIMIT1_DEFAULT 60000 +#define TEMP_LIMIT2_DEFAULT 65000 + +#define HYSTERESIS_DEFAULT 3000 + +#define SPEED_ON_AC_DEFAULT 2 + +static int temp_limits[3] = { + TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT, +}; module_param_array(temp_limits, int, NULL, 0444); MODULE_PARM_DESC(temp_limits, "Millicelsius values above which the fan speed increases"); -static int hysteresis = 3000; +static int hysteresis = HYSTERESIS_DEFAULT; module_param(hysteresis, int, 0444); MODULE_PARM_DESC(hysteresis, "Hysteresis in millicelsius before lowering the fan speed"); -static int speed_on_ac = 2; +static int speed_on_ac = SPEED_ON_AC_DEFAULT; module_param(speed_on_ac, int, 0444); MODULE_PARM_DESC(speed_on_ac, "minimum fan speed to allow when system is powered by AC"); @@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev) int i; for (i = 0; i < ARRAY_SIZE(temp_limits); i++) { - if (temp_limits[i] < 40000 || temp_limits[i] > 70000) { + if (temp_limits[i] < 20000 || temp_limits[i] > 90000) { dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n", temp_limits[i]); - return -EINVAL; + temp_limits[0] = TEMP_LIMIT0_DEFAULT; + temp_limits[1] = TEMP_LIMIT1_DEFAULT; + temp_limits[2] = TEMP_LIMIT2_DEFAULT; + break; } } if (hysteresis < 1000 || hysteresis > 10000) { dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n", hysteresis); - return -EINVAL; + hysteresis = HYSTERESIS_DEFAULT; } if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) { dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n", speed_on_ac); - return -EINVAL; + speed_on_ac = SPEED_ON_AC_DEFAULT; } fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ef6d4bd77b1a..43d590250228 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -19,6 +19,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung"); static const struct acpi_device_id intel_hid_ids[] = { + {"INT1051", 0}, {"INT33D5", 0}, {"", 0}, }; diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c new file mode 100644 index 000000000000..2b1a0734c3f8 --- /dev/null +++ b/drivers/platform/x86/intel-uncore-frequency.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Uncore Frequency Setting + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Provide interface to set MSR 620 at a granularity of per die. On CPU online, + * one control CPU is identified per die to read/write limit. This control CPU + * is changed, if the CPU state is changed to offline. When the last CPU is + * offline in a die then remove the sysfs object for that die. + * The majority of actual code is related to sysfs create and read/write + * attributes. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/suspend.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +#define MSR_UNCORE_RATIO_LIMIT 0x620 +#define UNCORE_FREQ_KHZ_MULTIPLIER 100000 + +/** + * struct uncore_data - Encapsulate all uncore data + * @stored_uncore_data: Last user changed MSR 620 value, which will be restored + * on system resume. + * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init + * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init + * @control_cpu: Designated CPU for a die to read/write + * @valid: Mark the data valid/invalid + * + * This structure is used to encapsulate all data related to uncore sysfs + * settings for a die/package. + */ +struct uncore_data { + struct kobject kobj; + u64 stored_uncore_data; + u32 initial_min_freq_khz; + u32 initial_max_freq_khz; + int control_cpu; + bool valid; +}; + +#define to_uncore_data(a) container_of(a, struct uncore_data, kobj) + +/* Max instances for uncore data, one for each die */ +static int uncore_max_entries __read_mostly; +/* Storage for uncore data for all instances */ +static struct uncore_data *uncore_instances; +/* Root of the all uncore sysfs kobjs */ +struct kobject uncore_root_kobj; +/* Stores the CPU mask of the target CPUs to use during uncore read/write */ +static cpumask_t uncore_cpu_mask; +/* CPU online callback register instance */ +static enum cpuhp_state uncore_hp_state __read_mostly; +/* Mutex to control all mutual exclusions */ +static DEFINE_MUTEX(uncore_lock); + +struct uncore_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *c, ssize_t count); +}; + +#define define_one_uncore_ro(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_uncore_rw(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +#define show_uncore_data(member_name) \ + static ssize_t show_##member_name(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct uncore_data *data = to_uncore_data(kobj); \ + return scnprintf(buf, PAGE_SIZE, "%u\n", \ + data->member_name); \ + } \ + define_one_uncore_ro(member_name) + +show_uncore_data(initial_min_freq_khz); +show_uncore_data(initial_max_freq_khz); + +/* Common function to read MSR 0x620 and read min/max */ +static int uncore_read_ratio(struct uncore_data *data, unsigned int *min, + unsigned int *max) +{ + u64 cap; + int ret; + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + return ret; + + *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; + *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; + + return 0; +} + +/* Common function to set min/max ratios to be used by sysfs callbacks */ +static int uncore_write_ratio(struct uncore_data *data, unsigned int input, + int set_max) +{ + int ret; + u64 cap; + + mutex_lock(&uncore_lock); + + input /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (!input || input > 0x7F) { + ret = -EINVAL; + goto finish_write; + } + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + goto finish_write; + + if (set_max) { + cap &= ~0x7F; + cap |= input; + } else { + cap &= ~GENMASK(14, 8); + cap |= (input << 8); + } + + ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); + if (ret) + goto finish_write; + + data->stored_uncore_data = cap; + +finish_write: + mutex_unlock(&uncore_lock); + + return ret; +} + +static ssize_t store_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + const char *buf, ssize_t count, + int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int input; + + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + + uncore_write_ratio(data, input, min_max); + + return count; +} + +static ssize_t show_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + char *buf, int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int min, max; + int ret; + + mutex_lock(&uncore_lock); + ret = uncore_read_ratio(data, &min, &max); + mutex_unlock(&uncore_lock); + if (ret) + return ret; + + if (min_max) + return sprintf(buf, "%u\n", max); + + return sprintf(buf, "%u\n", min); +} + +#define store_uncore_min_max(name, min_max) \ + static ssize_t store_##name(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buf, ssize_t count) \ + { \ + \ + return store_min_max_freq_khz(kobj, attr, buf, count, \ + min_max); \ + } + +#define show_uncore_min_max(name, min_max) \ + static ssize_t show_##name(struct kobject *kobj, \ + struct attribute *attr, char *buf) \ + { \ + \ + return show_min_max_freq_khz(kobj, attr, buf, min_max); \ + } + +store_uncore_min_max(min_freq_khz, 0); +store_uncore_min_max(max_freq_khz, 1); + +show_uncore_min_max(min_freq_khz, 0); +show_uncore_min_max(max_freq_khz, 1); + +define_one_uncore_rw(min_freq_khz); +define_one_uncore_rw(max_freq_khz); + +static struct attribute *uncore_attrs[] = { + &initial_min_freq_khz.attr, + &initial_max_freq_khz.attr, + &max_freq_khz.attr, + &min_freq_khz.attr, + NULL +}; + +static struct kobj_type uncore_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = uncore_attrs, +}; + +static struct kobj_type uncore_root_ktype = { + .sysfs_ops = &kobj_sysfs_ops, +}; + +/* Caller provides protection */ +static struct uncore_data *uncore_get_instance(unsigned int cpu) +{ + int id = topology_logical_die_id(cpu); + + if (id >= 0 && id < uncore_max_entries) + return &uncore_instances[id]; + + return NULL; +} + +static void uncore_add_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (!data) { + mutex_unlock(&uncore_lock); + return; + } + + if (data->valid) { + /* control cpu changed */ + data->control_cpu = cpu; + } else { + char str[64]; + int ret; + + memset(data, 0, sizeof(*data)); + sprintf(str, "package_%02d_die_%02d", + topology_physical_package_id(cpu), + topology_die_id(cpu)); + + uncore_read_ratio(data, &data->initial_min_freq_khz, + &data->initial_max_freq_khz); + + ret = kobject_init_and_add(&data->kobj, &uncore_ktype, + &uncore_root_kobj, str); + if (!ret) { + data->control_cpu = cpu; + data->valid = true; + } + } + mutex_unlock(&uncore_lock); +} + +/* Last CPU in this die is offline, so remove sysfs entries */ +static void uncore_remove_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (data) { + kobject_put(&data->kobj); + data->control_cpu = -1; + data->valid = false; + } + mutex_unlock(&uncore_lock); +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int target; + + /* Check if there is an online cpu in the package for uncore MSR */ + target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + uncore_add_die_entry(cpu); + + return 0; +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + int target; + + /* Check if existing cpu is used for uncore MSRs */ + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + return 0; + + /* Find a new cpu to set uncore MSR */ + target = cpumask_any_but(topology_die_cpumask(cpu), cpu); + + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &uncore_cpu_mask); + uncore_add_die_entry(target); + } else { + uncore_remove_die_entry(cpu); + } + + return 0; +} + +static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode, + void *_unused) +{ + int cpu; + + switch (mode) { + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + for_each_cpu(cpu, &uncore_cpu_mask) { + struct uncore_data *data; + int ret; + + data = uncore_get_instance(cpu); + if (!data || !data->valid || !data->stored_uncore_data) + continue; + + ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT, + data->stored_uncore_data); + if (ret) + return ret; + } + break; + default: + break; + } + return 0; +} + +static struct notifier_block uncore_pm_nb = { + .notifier_call = uncore_pm_notify, +}; + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id intel_uncore_cpu_ids[] = { + ICPU(INTEL_FAM6_BROADWELL_G), + ICPU(INTEL_FAM6_BROADWELL_X), + ICPU(INTEL_FAM6_BROADWELL_D), + ICPU(INTEL_FAM6_SKYLAKE_X), + ICPU(INTEL_FAM6_ICELAKE_X), + ICPU(INTEL_FAM6_ICELAKE_D), + {} +}; + +static int __init intel_uncore_init(void) +{ + const struct x86_cpu_id *id; + int ret; + + id = x86_match_cpu(intel_uncore_cpu_ids); + if (!id) + return -ENODEV; + + uncore_max_entries = topology_max_packages() * + topology_max_die_per_package(); + uncore_instances = kcalloc(uncore_max_entries, + sizeof(*uncore_instances), GFP_KERNEL); + if (!uncore_instances) + return -ENOMEM; + + ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype, + &cpu_subsys.dev_root->kobj, + "intel_uncore_frequency"); + if (ret) + goto err_free; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/uncore-freq:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret < 0) + goto err_rem_kobj; + + uncore_hp_state = ret; + + ret = register_pm_notifier(&uncore_pm_nb); + if (ret) + goto err_rem_state; + + return 0; + +err_rem_state: + cpuhp_remove_state(uncore_hp_state); +err_rem_kobj: + kobject_put(&uncore_root_kobj); +err_free: + kfree(uncore_instances); + + return ret; +} +module_init(intel_uncore_init) + +static void __exit intel_uncore_exit(void) +{ + int i; + + unregister_pm_notifier(&uncore_pm_nb); + cpuhp_remove_state(uncore_hp_state); + for (i = 0; i < uncore_max_entries; ++i) { + if (uncore_instances[i].valid) + kobject_put(&uncore_instances[i].kobj); + } + kobject_put(&uncore_root_kobj); + kfree(uncore_instances); +} +module_exit(intel_uncore_exit) + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver"); diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c index b0f421fea2a5..805fc0d8515c 100644 --- a/drivers/platform/x86/intel_atomisp2_pm.c +++ b/drivers/platform/x86/intel_atomisp2_pm.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry - * Trail devices. The sole purpose of this driver is to allow the ISP to - * be put in D3. + * Dummy driver for Intel's Image Signal Processor found on Bay Trail + * and Cherry Trail devices. The sole purpose of this driver is to allow + * the ISP to be put in D3. * * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com> * @@ -36,8 +36,7 @@ static int isp_set_power(struct pci_dev *dev, bool enable) { unsigned long timeout; - u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : - ISPSSPM0_IUNIT_POWER_OFF; + u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF; /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, @@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable) /* * There should be no IUNIT access while power-down is - * in progress HW sighting: 4567865 + * in progress. HW sighting: 4567865. * Wait up to 50 ms for the IUNIT to shut down. * And we do the same for power on. */ timeout = jiffies + msecs_to_jiffies(50); - while (1) { + do { u32 tmp; /* Wait until ISPSSPM0 bit[25:24] shows the right value */ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp); tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET; if (tmp == val) - break; + return 0; - if (time_after(jiffies, timeout)) { - dev_err(&dev->dev, "IUNIT power-%s timeout.\n", - enable ? "on" : "off"); - return -EBUSY; - } usleep_range(1000, 2000); - } + } while (time_before(jiffies, timeout)); - return 0; + dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off"); + return -EBUSY; } static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel_cht_int33fe_typec.c index 2d097fc2dd46..04138215956b 100644 --- a/drivers/platform/x86/intel_cht_int33fe_typec.c +++ b/drivers/platform/x86/intel_cht_int33fe_typec.c @@ -36,30 +36,6 @@ enum { INT33FE_NODE_MAX, }; -static const struct software_node nodes[]; - -static const struct software_node_ref_args pi3usb30532_ref = { - &nodes[INT33FE_NODE_PI3USB30532] -}; - -static const struct software_node_ref_args dp_ref = { - &nodes[INT33FE_NODE_DISPLAYPORT] -}; - -static struct software_node_ref_args mux_ref; - -static const struct software_node_reference usb_connector_refs[] = { - { "orientation-switch", 1, &pi3usb30532_ref}, - { "mode-switch", 1, &pi3usb30532_ref}, - { "displayport", 1, &dp_ref}, - { } -}; - -static const struct software_node_reference fusb302_refs[] = { - { "usb-role-switch", 1, &mux_ref}, - { } -}; - /* * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates * the max17047 both through the INT33FE ACPI device (it is right there @@ -95,8 +71,18 @@ static const struct property_entry max17047_props[] = { { } }; +/* + * We are not using inline property here because those are constant, + * and we need to adjust this one at runtime to point to real + * software node. + */ +static struct software_node_ref_args fusb302_mux_refs[] = { + { .node = NULL }, +}; + static const struct property_entry fusb302_props[] = { PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"), + PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs), { } }; @@ -112,6 +98,8 @@ static const u32 snk_pdo[] = { PDO_VAR(5000, 12000, 3000), }; +static const struct software_node nodes[]; + static const struct property_entry usb_connector_props[] = { PROPERTY_ENTRY_STRING("data-role", "dual"), PROPERTY_ENTRY_STRING("power-role", "dual"), @@ -119,15 +107,21 @@ static const struct property_entry usb_connector_props[] = { PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo), PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo), PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000), + PROPERTY_ENTRY_REF("orientation-switch", + &nodes[INT33FE_NODE_PI3USB30532]), + PROPERTY_ENTRY_REF("mode-switch", + &nodes[INT33FE_NODE_PI3USB30532]), + PROPERTY_ENTRY_REF("displayport", + &nodes[INT33FE_NODE_DISPLAYPORT]), { } }; static const struct software_node nodes[] = { - { "fusb302", NULL, fusb302_props, fusb302_refs }, + { "fusb302", NULL, fusb302_props }, { "max17047", NULL, max17047_props }, { "pi3usb30532" }, { "displayport" }, - { "connector", &nodes[0], usb_connector_props, usb_connector_refs }, + { "connector", &nodes[0], usb_connector_props }, { } }; @@ -163,9 +157,10 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) { software_node_unregister_nodes(nodes); - if (mux_ref.node) { - fwnode_handle_put(software_node_fwnode(mux_ref.node)); - mux_ref.node = NULL; + if (fusb302_mux_refs[0].node) { + fwnode_handle_put( + software_node_fwnode(fusb302_mux_refs[0].node)); + fusb302_mux_refs[0].node = NULL; } if (data->dp) { @@ -177,25 +172,31 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) { + const struct software_node *mux_ref_node; int ret; - ret = software_node_register_nodes(nodes); - if (ret) - return ret; - - /* The devices that are not created in this driver need extra steps. */ - /* * There is no ACPI device node for the USB role mux, so we need to wait * until the mux driver has created software node for the mux device. * It means we depend on the mux driver. This function will return * -EPROBE_DEFER until the mux device is registered. */ - mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); - if (!mux_ref.node) { - ret = -EPROBE_DEFER; - goto err_remove_nodes; - } + mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); + if (!mux_ref_node) + return -EPROBE_DEFER; + + /* + * Update node used in "usb-role-switch" property. Note that we + * rely on software_node_register_nodes() to use the original + * instance of properties instead of copying them. + */ + fusb302_mux_refs[0].node = mux_ref_node; + + ret = software_node_register_nodes(nodes); + if (ret) + return ret; + + /* The devices that are not created in this driver need extra steps. */ /* * The DP connector does have ACPI device node. In this case we can just diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h index 512ad234ad0d..35ed9711c7b9 100644 --- a/drivers/platform/x86/intel_ips.h +++ b/drivers/platform/x86/intel_ips.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010 Intel Corporation */ diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 292bace83f1e..6f436836fe50 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - ddata = (struct mid_pb_ddata *)id->driver_data; + ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data, + sizeof(*ddata), GFP_KERNEL); if (!ddata) - return -ENODATA; + return -ENOMEM; ddata->dev = &pdev->dev; ddata->irq = irq; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 571b4754477c..144faa8bad3d 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = { {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3}, - {}, + {} }; static const struct pmc_bit_map spt_mphy_map[] = { @@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = { {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13}, {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14}, {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15}, - {}, + {} }; static const struct pmc_bit_map spt_pfear_map[] = { @@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = { {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1}, {"CSME_RTC", SPT_PMC_BIT_CSME_RTC}, {"CSME_PSF", SPT_PMC_BIT_CSME_PSF}, - {}, + {} +}; + +static const struct pmc_bit_map *ext_spt_pfear_map[] = { + spt_pfear_map, + NULL }; static const struct pmc_bit_map spt_ltr_show_map[] = { @@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = { }; static const struct pmc_reg_map spt_reg_map = { - .pfear_sts = spt_pfear_map, + .pfear_sts = ext_spt_pfear_map, .mphy_sts = spt_mphy_map, .pll_sts = spt_pll_map, .ltr_show_sts = spt_ltr_show_map, @@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"SDX", BIT(4)}, {"SPE", BIT(5)}, {"Fuse", BIT(6)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and Elkhart Lake. + */ {"SBR8", BIT(7)}, {"CSME_FSC", BIT(0)}, @@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"HDA_PGD4", BIT(2)}, {"HDA_PGD5", BIT(3)}, {"HDA_PGD6", BIT(4)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and ELkhart Lake. + */ {"PSF6", BIT(5)}, {"PSF7", BIT(6)}, {"PSF8", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_cnp_pfear_map[] = { + cnp_pfear_map, + NULL +}; +static const struct pmc_bit_map icl_pfear_map[] = { /* Ice Lake generation onwards only */ {"RES_65", BIT(0)}, {"RES_66", BIT(1)}, @@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {} }; +static const struct pmc_bit_map *ext_icl_pfear_map[] = { + cnp_pfear_map, + icl_pfear_map, + NULL +}; + +static const struct pmc_bit_map tgl_pfear_map[] = { + /* Tiger Lake and Elkhart Lake generation onwards only */ + {"PSF9", BIT(0)}, + {"RES_66", BIT(1)}, + {"RES_67", BIT(2)}, + {"RES_68", BIT(3)}, + {"RES_69", BIT(4)}, + {"RES_70", BIT(5)}, + {"TBTLSX", BIT(6)}, + {} +}; + +static const struct pmc_bit_map *ext_tgl_pfear_map[] = { + cnp_pfear_map, + tgl_pfear_map, + NULL +}; + static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { {"AUDIO_D3", BIT(0)}, {"OTG_D3", BIT(1)}, @@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = { cnp_slps0_dbg0_map, cnp_slps0_dbg1_map, cnp_slps0_dbg2_map, - NULL, + NULL }; static const struct pmc_bit_map cnp_ltr_show_map[] = { @@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { }; static const struct pmc_reg_map cnp_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_cnp_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = { }; static const struct pmc_reg_map icl_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_icl_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = { .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, }; -static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) -{ - return readb(pmcdev->regbase + offset); -} +static const struct pmc_reg_map tgl_reg_map = { + .pfear_sts = ext_tgl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slps0_dbg_maps = cnp_slps0_dbg_maps, + .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, + .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = CNP_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, +}; static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) { return readl(pmcdev->regbase + reg_offset); } -static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int - reg_offset, u32 val) +static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset, + u32 val) { writel(val, pmcdev->regbase + reg_offset); } @@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void) #if IS_ENABLED(CONFIG_DEBUG_FS) static bool slps0_dbg_latch; -static void pmc_core_display_map(struct seq_file *s, int index, - u8 pf_reg, const struct pmc_bit_map *pf_map) +static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) +{ + return readb(pmcdev->regbase + offset); +} + +static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, + u8 pf_reg, const struct pmc_bit_map **pf_map) { seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", - index, pf_map[index].name, - pf_map[index].bit_mask & pf_reg ? "Off" : "On"); + ip, pf_map[idx][index].name, + pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); } static int pmc_core_ppfear_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->pfear_sts; + const struct pmc_bit_map **maps = pmcdev->map->pfear_sts; u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; - int index, iter; + int index, iter, idx, ip = 0; iter = pmcdev->map->ppfear0_offset; @@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); - for (index = 0; map[index].name && - index < pmcdev->map->ppfear_buckets * 8; index++) - pmc_core_display_map(s, index, pf_regs[index / 8], map); + for (idx = 0; maps[idx]; idx++) { + for (index = 0; maps[idx][index].name && + index < pmcdev->map->ppfear_buckets * 8; ip++, index++) + pmc_core_display_map(s, index, idx, ip, + pf_regs[index / 8], maps); + } return 0; } @@ -561,21 +623,22 @@ out_unlock: } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user -*userbuf, size_t count, loff_t *ppos) +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) { struct pmc_dev *pmcdev = &pmc; const struct pmc_reg_map *map = pmcdev->map; u32 val, buf_size, fd; - int err = 0; + int err; buf_size = count < 64 ? count : 64; - mutex_lock(&pmcdev->lock); - if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) { - err = -EFAULT; - goto out_unlock; - } + err = kstrtou32_from_user(userbuf, buf_size, 10, &val); + if (err) + return err; + + mutex_lock(&pmcdev->lock); if (val > map->ltr_ignore_max) { err = -EINVAL; @@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev, &pmc_core_dev_state); - debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev, - &pmc_core_ppfear_fops); + if (pmcdev->map->pfear_sts) + debugfs_create_file("pch_ip_power_gating_status", 0444, dir, + pmcdev, &pmc_core_ppfear_fops); debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, &pmc_core_ltr_ignore_ops); @@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map), INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map), + INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map), + INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map), + INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); static const struct pci_device_id pmc_pci_ids[] = { - { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0}, - { 0, }, + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) }, + { } }; /* * This quirk can be used on those platforms where - * the platform BIOS enforces 24Mhx Crystal to shutdown + * the platform BIOS enforces 24Mhz crystal to shutdown * before PMC can assert SLP_S0#. */ static int quirk_xtal_ignore(const struct dmi_system_id *id) diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index fdee5772e532..f1a0792b3f91 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Intel Core SoC Power Management Controller Header File * @@ -186,6 +186,8 @@ enum ppfear_regs { #define ICL_NUM_IP_IGN_ALLOWED 20 #define ICL_PMC_LTR_WIGIG 0x1BFC +#define TGL_NUM_IP_IGN_ALLOWED 22 + struct pmc_bit_map { const char *name; u32 bit_mask; @@ -213,7 +215,7 @@ struct pmc_bit_map { * captures them to have a common implementation. */ struct pmc_reg_map { - const struct pmc_bit_map *pfear_sts; + const struct pmc_bit_map **pfear_sts; const struct pmc_bit_map *mphy_sts; const struct pmc_bit_map *pll_sts; const struct pmc_bit_map **slps0_dbg_maps; diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index 6fe829f30997..e1266f5c6359 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { INTEL_CPU_FAM6(KABYLAKE, pmc_core_device), INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device), INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids); diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index 5c1da2bb1435..2433bf73f1ed 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -12,23 +12,13 @@ */ #include <linux/acpi.h> -#include <linux/atomic.h> -#include <linux/bitops.h> #include <linux/delay.h> -#include <linux/device.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/notifier.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/pm.h> -#include <linux/pm_qos.h> -#include <linux/sched.h> -#include <linux/spinlock.h> -#include <linux/suspend.h> #include <asm/intel_pmc_ipc.h> @@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset); } -static inline u8 __maybe_unused ipc_data_readb(u32 offset) -{ - return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset); -} - static inline u32 ipc_data_readl(u32 offset) { return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); @@ -211,35 +196,6 @@ static inline int is_gcr_valid(u32 offset) } /** - * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: data pointer for storing the register output - * - * Reads the 32-bit PMC GCR register at given offset. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_read(u32 offset, u32 *data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - *data = readl(ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_read); - -/** * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register * @offset: offset of GCR register from GCR address base * @data: data pointer for storing the register output @@ -269,36 +225,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data) EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64); /** - * intel_pmc_gcr_write() - Write PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: register update value - * - * Writes the PMC GCR register of given offset with given - * value. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_write(u32 offset, u32 data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - writel(data, ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); - -/** * intel_pmc_gcr_update() - Update PMC GCR register bits * @offset: offset of GCR register from GCR address base * @mask: bit mask for update operation @@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); * * Return: negative value on error or 0 on success. */ -int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) +static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) { u32 new_val; int ret = 0; @@ -339,7 +265,6 @@ gcr_ipc_unlock: spin_unlock(&ipcdev.gcr_lock); return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); static int update_no_reboot_bit(void *priv, bool set) { @@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void) * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_simple_command(int cmd, int sub) +static int intel_pmc_ipc_simple_command(int cmd, int sub) { int ret; @@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub) return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); /** * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers @@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, - u32 outlen, u32 dptr, u32 sptr) +static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, + u32 outlen, u32 dptr, u32 sptr) { u32 wbuf[4] = { 0 }; int ret; @@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); /** * intel_pmc_ipc_command() - IPC command with input/output data @@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, } return (ssize_t)count; } +static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store); static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, struct device_attribute *attr, @@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, int subcmd; int ret; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; if (val) subcmd = 1; @@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, } return (ssize_t)count; } - -static DEVICE_ATTR(simplecmd, S_IWUSR, - NULL, intel_pmc_ipc_simple_cmd_store); -static DEVICE_ATTR(northpeak, S_IWUSR, - NULL, intel_pmc_ipc_northpeak_store); +static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store); static struct attribute *intel_ipc_attrs[] = { &dev_attr_northpeak.attr, @@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = { .attrs = intel_ipc_attrs, }; +static const struct attribute_group *intel_ipc_groups[] = { + &intel_ipc_group, + NULL +}; + static struct resource punit_res_array[] = { /* Punit BIOS */ { @@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev) goto err_irq; } - ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group); - if (ret) { - dev_err(&pdev->dev, "Failed to create sysfs group %d\n", - ret); - goto err_sys; - } - ipcdev.has_gcr_regs = true; return 0; -err_sys: - devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); + err_irq: platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -980,7 +898,6 @@ err_irq: static int ipc_plat_remove(struct platform_device *pdev) { - sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = { .driver = { .name = "pmc-ipc-plat", .acpi_match_table = ACPI_PTR(ipc_acpi_ids), + .dev_groups = intel_ipc_groups, }, }; diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 5ea5245b8b15..3d7da5266136 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -26,11 +26,7 @@ #include <asm/intel_scu_ipc.h> /* IPC defines the following message types */ -#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */ -#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */ -#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */ -#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */ -#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */ +#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */ /* Command id associated with message IPCMSG_PCNTRL */ #define IPC_CMD_PCNTRL_W 0 /* Register write */ @@ -58,56 +54,29 @@ #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ #define IPC_IOC 0x100 /* IPC command register IOC bit */ -#define PCI_DEVICE_ID_LINCROFT 0x082a -#define PCI_DEVICE_ID_PENWELL 0x080e -#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea -#define PCI_DEVICE_ID_TANGIER 0x11a0 - -/* intel scu ipc driver data */ -struct intel_scu_ipc_pdata_t { - u32 i2c_base; - u32 i2c_len; - u8 irq_mode; -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - -/* Penwell and Cloverview */ -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 1, -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { - .i2c_base = 0xff00d000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - struct intel_scu_ipc_dev { struct device *dev; void __iomem *ipc_base; - void __iomem *i2c_base; struct completion cmd_complete; u8 irq_mode; }; static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ +#define IPC_STATUS 0x04 +#define IPC_STATUS_IRQ BIT(2) +#define IPC_STATUS_ERR BIT(1) +#define IPC_STATUS_BUSY BIT(0) + /* - * IPC Read Buffer (Read Only): - * 16 byte buffer for receiving data from SCU, if IPC command - * processing results in response data + * IPC Write/Read Buffers: + * 16 byte buffer for sending and receiving data to and from SCU. */ +#define IPC_WRITE_BUFFER 0x80 #define IPC_READ_BUFFER 0x90 -#define IPC_I2C_CNTRL_ADDR 0 -#define I2C_DATA_ADDR 0x04 +/* Timeout in jiffies */ +#define IPC_TIMEOUT (3 * HZ) static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ @@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ */ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) { - if (scu->irq_mode) { - reinit_completion(&scu->cmd_complete); - writel(cmd | IPC_IOC, scu->ipc_base); - } - writel(cmd, scu->ipc_base); + reinit_completion(&scu->cmd_complete); + writel(cmd | IPC_IOC, scu->ipc_base); } /* @@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) */ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset) { - writel(data, scu->ipc_base + 0x80 + offset); + writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset); } /* @@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 */ static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu) { - return __raw_readl(scu->ipc_base + 0x04); + return __raw_readl(scu->ipc_base + IPC_STATUS); } /* Read ipc byte data */ @@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset) /* Wait till scu status is busy */ static inline int busy_loop(struct intel_scu_ipc_dev *scu) { - u32 status = ipc_read_status(scu); - u32 loop_count = 100000; + unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT); - /* break if scu doesn't reset busy bit after huge retry */ - while ((status & BIT(0)) && --loop_count) { - udelay(1); /* scu processing time is in few u secods */ - status = ipc_read_status(scu); - } + do { + u32 status; - if (status & BIT(0)) { - dev_err(scu->dev, "IPC timed out"); - return -ETIMEDOUT; - } + status = ipc_read_status(scu); + if (!(status & IPC_STATUS_BUSY)) + return (status & IPC_STATUS_ERR) ? -EIO : 0; - if (status & BIT(1)) - return -EIO; + usleep_range(50, 100); + } while (time_before(jiffies, end)); - return 0; + dev_err(scu->dev, "IPC timed out"); + return -ETIMEDOUT; } /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ @@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu) { int status; - if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) { + if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) { dev_err(scu->dev, "IPC timed out\n"); return -ETIMEDOUT; } status = ipc_read_status(scu); - if (status & BIT(1)) + if (status & IPC_STATUS_ERR) return -EIO; return 0; @@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) } /** - * intel_scu_ipc_ioread8 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read byte + * intel_scu_ipc_ioread8 - read a word via the SCU + * @addr: Register on SCU + * @data: Return pointer for read byte * - * Read a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Read a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_ioread8(u16 addr, u8 *data) { @@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data) EXPORT_SYMBOL(intel_scu_ipc_ioread8); /** - * intel_scu_ipc_ioread16 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read word - * - * Read a register pair. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread16(u16 addr, u16 *data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread16); - -/** - * intel_scu_ipc_ioread32 - read a dword via the SCU - * @addr: register on SCU - * @data: return pointer for read dword - * - * Read four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread32(u16 addr, u32 *data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread32); - -/** - * intel_scu_ipc_iowrite8 - write a byte via the SCU - * @addr: register on SCU - * @data: byte to write + * intel_scu_ipc_iowrite8 - write a byte via the SCU + * @addr: Register on SCU + * @data: Byte to write * - * Write a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Write a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_iowrite8(u16 addr, u8 data) { @@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data) EXPORT_SYMBOL(intel_scu_ipc_iowrite8); /** - * intel_scu_ipc_iowrite16 - write a word via the SCU - * @addr: register on SCU - * @data: word to write - * - * Write two registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_readvv - read a set of registers + * @addr: Register list + * @data: Bytes to return + * @len: Length of array * - * This function may sleep. - */ -int intel_scu_ipc_iowrite16(u16 addr, u16 data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite16); - -/** - * intel_scu_ipc_iowrite32 - write a dword via the SCU - * @addr: register on SCU - * @data: dword to write + * Read registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * Write four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * The largest array length permitted by the hardware is 5 items. * - * This function may sleep. - */ -int intel_scu_ipc_iowrite32(u16 addr, u32 data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite32); - -/** - * intel_scu_ipc_readvv - read a set of registers - * @addr: register list - * @data: bytes to return - * @len: length of array - * - * Read registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * The largest array length permitted by the hardware is 5 items. - * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) { @@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_readv); /** - * intel_scu_ipc_writev - write a set of registers - * @addr: register list - * @data: bytes to write - * @len: length of array - * - * Write registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_writev - write a set of registers + * @addr: Register list + * @data: Bytes to write + * @len: Length of array * - * The largest array length permitted by the hardware is 5 items. + * Write registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * This function may sleep. + * The largest array length permitted by the hardware is 5 items. * + * This function may sleep. */ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) { @@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_writev); /** - * intel_scu_ipc_update_register - r/m/w a register - * @addr: register address - * @bits: bits to update - * @mask: mask of bits to update - * - * Read-modify-write power control unit register. The first data argument - * must be register value and second is mask value - * mask is a bitmap that indicates which bits to update. - * 0 = masked. Don't modify this bit, 1 = modify this bit. - * returns 0 on success or an error code. - * - * This function may sleep. Locking between SCU accesses is handled - * for the caller. + * intel_scu_ipc_update_register - r/m/w a register + * @addr: Register address + * @bits: Bits to update + * @mask: Mask of bits to update + * + * Read-modify-write power control unit register. The first data argument + * must be register value and second is mask value mask is a bitmap that + * indicates which bits to update. %0 = masked. Don't modify this bit, %1 = + * modify this bit. returns %0 on success or an error code. + * + * This function may sleep. Locking between SCU accesses is handled + * for the caller. */ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) { @@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) EXPORT_SYMBOL(intel_scu_ipc_update_register); /** - * intel_scu_ipc_simple_command - send a simple command - * @cmd: command - * @sub: sub type + * intel_scu_ipc_simple_command - send a simple command + * @cmd: Command + * @sub: Sub type * - * Issue a simple command to the SCU. Do not use this interface if - * you must then access data as any data values may be overwritten - * by another SCU access by the time this function returns. + * Issue a simple command to the SCU. Do not use this interface if you must + * then access data as any data values may be overwritten by another SCU + * access by the time this function returns. * - * This function may sleep. Locking for SCU accesses is handled for - * the caller. + * This function may sleep. Locking for SCU accesses is handled for the + * caller. */ int intel_scu_ipc_simple_command(int cmd, int sub) { @@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub) EXPORT_SYMBOL(intel_scu_ipc_simple_command); /** - * intel_scu_ipc_command - command with data - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in dwords - * @out: output data - * @outlein: output length in dwords - * - * Issue a command to the SCU which involves data transfers. Do the - * data copies under the lock but leave it for the caller to interpret + * intel_scu_ipc_command - command with data + * @cmd: Command + * @sub: Sub type + * @in: Input data + * @inlen: Input length in dwords + * @out: Output data + * @outlen: Output length in dwords + * + * Issue a command to the SCU which involves data transfers. Do the + * data copies under the lock but leave it for the caller to interpret. */ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, u32 *out, int outlen) @@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, } EXPORT_SYMBOL(intel_scu_ipc_command); -#define IPC_SPTR 0x08 -#define IPC_DPTR 0x0C - -/** - * intel_scu_ipc_raw_command() - IPC command with data and pointers - * @cmd: IPC command code. - * @sub: IPC command sub type. - * @in: input data of this IPC command. - * @inlen: input data length in dwords. - * @out: output data of this IPC command. - * @outlen: output data length in dwords. - * @sptr: data writing to SPTR register. - * @dptr: data writing to DPTR register. - * - * Send an IPC command to SCU with input/output data and source/dest pointers. - * - * Return: an IPC error code or 0 on success. - */ -int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen, - u32 *out, int outlen, u32 dptr, u32 sptr) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - int inbuflen = DIV_ROUND_UP(inlen, 4); - u32 inbuf[4]; - int i, err; - - /* Up to 16 bytes */ - if (inbuflen > 4) - return -EINVAL; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - - writel(dptr, scu->ipc_base + IPC_DPTR); - writel(sptr, scu->ipc_base + IPC_SPTR); - - /* - * SRAM controller doesn't support 8-bit writes, it only - * supports 32-bit writes, so we have to copy input data into - * the temporary buffer, and SCU FW will use the inlen to - * determine the actual input data length in the temporary - * buffer. - */ - memcpy(inbuf, in, inlen); - - for (i = 0; i < inbuflen; i++) - ipc_data_writel(scu, inbuf[i], 4 * i); - - ipc_command(scu, (inlen << 16) | (sub << 12) | cmd); - err = intel_scu_ipc_check_status(scu); - if (!err) { - for (i = 0; i < outlen; i++) - *out++ = ipc_data_readl(scu, 4 * i); - } - - mutex_unlock(&ipclock); - return err; -} -EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command); - -/* I2C commands */ -#define IPC_I2C_WRITE 1 /* I2C Write command */ -#define IPC_I2C_READ 2 /* I2C Read command */ - -/** - * intel_scu_ipc_i2c_cntrl - I2C read/write operations - * @addr: I2C address + command bits - * @data: data to read/write - * - * Perform an an I2C read/write operation via the SCU. All locking is - * handled for the caller. This function may sleep. - * - * Returns an error code or 0 on success. - * - * This has to be in the IPC driver for the locking. - */ -int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - u32 cmd = 0; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - cmd = (addr >> 24) & 0xFF; - if (cmd == IPC_I2C_READ) { - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - /* Write not getting updated without delay */ - usleep_range(1000, 2000); - *data = readl(scu->i2c_base + I2C_DATA_ADDR); - } else if (cmd == IPC_I2C_WRITE) { - writel(*data, scu->i2c_base + I2C_DATA_ADDR); - usleep_range(1000, 2000); - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - } else { - dev_err(scu->dev, - "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd); - - mutex_unlock(&ipclock); - return -EIO; - } - mutex_unlock(&ipclock); - return 0; -} -EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); - /* * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1 * When ioc bit is set to 1, caller api must wait for interrupt handler called @@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); static irqreturn_t ioc(int irq, void *dev_id) { struct intel_scu_ipc_dev *scu = dev_id; + int status = ipc_read_status(scu); - if (scu->irq_mode) - complete(&scu->cmd_complete); + writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS); + complete(&scu->cmd_complete); return IRQ_HANDLED; } @@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; struct intel_scu_ipc_dev *scu = &ipcdev; - struct intel_scu_ipc_pdata_t *pdata; if (scu->dev) /* We support only one SCU */ return -EBUSY; - pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data; - if (!pdata) - return -ENODEV; - - scu->irq_mode = pdata->irq_mode; - err = pcim_enable_device(pdev); if (err) return err; @@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) scu->ipc_base = pcim_iomap_table(pdev)[0]; - scu->i2c_base = ioremap(pdata->i2c_base, pdata->i2c_len); - if (!scu->i2c_base) - return -ENOMEM; - err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc", scu); if (err) @@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; } -#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata} - static const struct pci_device_id pci_ids[] = { - SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata), - SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata), + { PCI_VDEVICE(INTEL, 0x080e) }, + { PCI_VDEVICE(INTEL, 0x08ea) }, + { PCI_VDEVICE(INTEL, 0x11a0) }, {} }; diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c index 3de5a3c66529..0c2aa22c7a12 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { {0x7F, 0x00, 0x0B}, {0x7F, 0x10, 0x12}, {0x7F, 0x20, 0x23}, + {0x94, 0x03, 0x03}, + {0x95, 0x03, 0x03}, }; static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { @@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { {0xD0, 0x03, 0x08}, {0x7F, 0x02, 0x00}, {0x7F, 0x08, 0x00}, + {0x95, 0x03, 0x03}, }; struct isst_cmd { diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index e84d3e983e0c..8e3fb55ac1ae 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity); if (err) { pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; @@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity); if (err) { pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index ebfcfead23b4..c4c742bb23cf 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = { static int telemetry_pltdrv_probe(struct platform_device *pdev) { - struct resource *res0 = NULL, *res1 = NULL; const struct x86_cpu_id *id; - int size, ret = -ENOMEM; + void __iomem *mem; + int ret; id = x86_match_cpu(telemetry_cpu_ids); if (!id) @@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) telm_conf = (struct telemetry_plt_config *)id->driver_data; - res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res0) { - ret = -EINVAL; - goto out; - } - size = resource_size(res0); - if (!devm_request_mem_region(&pdev->dev, res0->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } - telm_conf->pss_config.ssram_base_addr = res0->start; - telm_conf->pss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); - res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res1) { - ret = -EINVAL; - goto out; - } - size = resource_size(res1); - if (!devm_request_mem_region(&pdev->dev, res1->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } + telm_conf->pss_config.regmap = mem; - telm_conf->ioss_config.ssram_base_addr = res1->start; - telm_conf->ioss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mem)) + return PTR_ERR(mem); - telm_conf->pss_config.regmap = ioremap( - telm_conf->pss_config.ssram_base_addr, - telm_conf->pss_config.ssram_size); - if (!telm_conf->pss_config.regmap) { - ret = -ENOMEM; - goto out; - } - - telm_conf->ioss_config.regmap = ioremap( - telm_conf->ioss_config.ssram_base_addr, - telm_conf->ioss_config.ssram_size); - if (!telm_conf->ioss_config.regmap) { - ret = -ENOMEM; - goto out; - } + telm_conf->ioss_config.regmap = mem; mutex_init(&telm_conf->telem_lock); mutex_init(&telm_conf->telem_trace_lock); @@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) return 0; out: - if (res0) - release_mem_region(res0->start, resource_size(res0)); - if (res1) - release_mem_region(res1->start, resource_size(res1)); - if (telm_conf->pss_config.regmap) - iounmap(telm_conf->pss_config.regmap); - if (telm_conf->ioss_config.regmap) - iounmap(telm_conf->ioss_config.regmap); dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n"); return ret; @@ -1204,9 +1163,6 @@ out: static int telemetry_pltdrv_remove(struct platform_device *pdev) { telemetry_clear_pltdata(); - iounmap(telm_conf->pss_config.regmap); - iounmap(telm_conf->ioss_config.regmap); - return 0; } diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8fe51e43f1bc..c27548fd386a 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -35,6 +35,8 @@ #define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23 #define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24 #define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a +#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b +#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e #define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30 #define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31 #define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32 @@ -46,6 +48,8 @@ #define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45 #define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50 #define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51 #define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52 @@ -68,6 +72,7 @@ #define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1 #define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2 #define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3 +#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5 @@ -85,9 +90,13 @@ #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7 #define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8 +#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9 +#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb +#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda +#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc #define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL #define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ @@ -96,6 +105,9 @@ #define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \ MLXPLAT_CPLD_LPC_PIO_OFFSET) +#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ + MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \ + MLXPLAT_CPLD_LPC_PIO_OFFSET) /* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */ #define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04 @@ -112,17 +124,29 @@ #define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6) #define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0) +#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0) #define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4) #define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4) +#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04 +#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT) + +/* Masks for aggregation for comex carriers */ +#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1) +#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \ + MLXPLAT_CPLD_AGGR_MASK_CARRIER) +#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1 /* Default I2C parent bus number */ #define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1 /* Maximum number of possible physical buses equipped on system */ #define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16 +#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24 /* Number of channels in group */ #define MLXPLAT_CPLD_GRP_CHNL_NUM 8 @@ -130,14 +154,16 @@ /* Start channel numbers */ #define MLXPLAT_CPLD_CH1 2 #define MLXPLAT_CPLD_CH2 10 +#define MLXPLAT_CPLD_CH3 18 /* Number of LPC attached MUX platform devices */ -#define MLXPLAT_CPLD_LPC_MUX_DEVS 2 +#define MLXPLAT_CPLD_LPC_MUX_DEVS 3 /* Hotplug devices adapter numbers */ #define MLXPLAT_CPLD_NR_NONE -1 #define MLXPLAT_CPLD_PSU_DEFAULT_NR 10 #define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4 +#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3 #define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11 #define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13 @@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = { IORESOURCE_IO), }; +/* Platform i2c next generation systems data */ +static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = { + { + .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .mask = MLXPLAT_CPLD_I2C_CAP_MASK, + .bit = MLXPLAT_CPLD_I2C_CAP_BIT, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = { + { + .data = mlxplat_mlxcpld_i2c_ng_items_data, + }, +}; + /* Platform next generation systems i2c data */ static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = { + .items = mlxplat_mlxcpld_i2c_ng_items, .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX, .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET, @@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = { static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 }; /* Platform mux data */ -static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { +static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = { { .parent = 1, .base_nr = MLXPLAT_CPLD_CH1, @@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { }; +/* Platform mux configuration variables */ +static int mlxplat_max_adap_num; +static int mlxplat_mux_num; +static struct i2c_mux_reg_platform_data *mlxplat_mux_data; + +/* Platform extended mux data */ +static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = { + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH1, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH2, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH3, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2, + .reg_size = 1, + .idle_in_use = 1, + }, + +}; + /* Platform hotplug devices */ static struct i2c_board_info mlxplat_mlxcpld_psu[] = { { @@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = { }, }; +/* Platform hotplug comex carrier system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + /* Platform hotplug default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = { { @@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { }, }; +static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = { + { + .data = mlxplat_mlxcpld_comex_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_psu), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_fan), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .items = mlxplat_mlxcpld_default_items, @@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = { + .items = mlxplat_mlxcpld_comex_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK, +}; + static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = { { .label = "pwr1", @@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +/* Platform hotplug extended system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu3", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(2), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu4", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(3), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = { + { + .label = "pwr1", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr2", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr3", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(2), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, + { + .label = "pwr4", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(3), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = { + { + .data = mlxplat_mlxcpld_ext_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_ext_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_ng_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_NG_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = { + .items = mlxplat_mlxcpld_ext_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, +}; + /* Platform led default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = { { @@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = { .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data), }; +/* Platform led for Comex based 100GbE systems */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = { + { + .label = "status:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "status:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK + }, + { + .label = "psu:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "psu:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan1:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan1:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan2:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan2:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan3:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan3:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan4:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan4:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "uid:blue", + .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, +}; + +static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = { + .data = mlxplat_mlxcpld_comex_100G_led_data, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data), +}; + /* Platform register access default */ static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = { { @@ -1157,6 +1482,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = { .mode = 0200, }, { + .label = "select_iio", + .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0644, + }, + { .label = "asic_health", .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, .mask = MLXPLAT_CPLD_ASIC_MASK, @@ -1245,6 +1576,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_platform", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0444, + }, + { + .label = "reset_soc", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(5), + .mode = 0444, + }, + { .label = "reset_comex_wd", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(6), @@ -1263,6 +1606,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_sw_pwr_off", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0444, + }, + { .label = "reset_comex_thermal", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, .mask = GENMASK(7, 0) & ~BIT(3), @@ -1275,6 +1624,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_ac_pwr_fail", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0444, + }, + { .label = "psu1_on", .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(0), @@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .bit = GENMASK(7, 0), .mode = 0444, }, + { + .label = "voltreg_update_status", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET, + .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK, + .bit = 5, + .mode = 0444, + }, + { + .label = "vpd_wp", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(3), + .mode = 0644, + }, + { + .label = "pcie_asic_reset_dis", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0644, + }, + { + .label = "config1", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "config2", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "ufm_version", + .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, }; static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = { @@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET: @@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET: @@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = { { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 }, }; +static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = { + { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET, + MLXPLAT_CPLD_LOW_AGGRCX_MASK }, + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, +}; + +static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = { + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 }, +}; + struct mlxplat_mlxcpld_regmap_context { void __iomem *base; }; @@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = { .reg_write = mlxplat_mlxcpld_reg_write, }; +static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_comex_default, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + +static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_ng400, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + static struct resource mlxplat_mlxcpld_resources[] = { [0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"), }; @@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_default_channels[i]; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_default_channels[i]); @@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng; return 1; -}; +} + +static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data); + mlxplat_mux_data = mlxplat_extended_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_comex_data; + mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_led = &mlxplat_comex_100G_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex; + + return 1; +} + +static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_ext_data; + mlxplat_hotplug->deferred_nr = + mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; + mlxplat_led = &mlxplat_default_ng_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400; + + return 1; +} static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { @@ -1954,6 +2470,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { }, }, { + .callback = mlxplat_dmi_comex_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"), + }, + }, + { + .callback = mlxplat_dmi_ng400_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"), + }, + }, + { .callback = mlxplat_dmi_msn274x_matched, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), @@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) /* Scan adapters from expected id to verify it is free. */ *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i < - MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) { + mlxplat_max_adap_num; i++) { search_adap = i2c_get_adapter(i); if (search_adap) { i2c_put_adapter(search_adap); @@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) } /* Return with error if free id for adapter is not found. */ - if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) + if (i == mlxplat_max_adap_num) return -ENODEV; /* Shift adapter ids, since expected parent adapter is not free. */ *nr = i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { shift = *nr - mlxplat_mux_data[i].parent; mlxplat_mux_data[i].parent = *nr; mlxplat_mux_data[i].base_nr += shift; @@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void) if (nr < 0) goto fail_alloc; - nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr; + nr = (nr == mlxplat_max_adap_num) ? -1 : nr; if (mlxplat_i2c) mlxplat_i2c->regmap = priv->regmap; priv->pdev_i2c = platform_device_register_resndata( @@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void) goto fail_alloc; } - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { priv->pdev_mux[i] = platform_device_register_resndata( &priv->pdev_i2c->dev, "i2c-mux-reg", i, NULL, @@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void) platform_device_unregister(priv->pdev_led); platform_device_unregister(priv->pdev_hotplug); - for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--) + for (i = mlxplat_mux_num - 1; i >= 0 ; i--) platform_device_unregister(priv->pdev_mux[i]); platform_device_unregister(priv->pdev_i2c); diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 72205771d03d..93177e6e5ecd 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-digma_citi_e200.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 640), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-gp-electronic-t701.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"), { } }; @@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1698), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v80-plus-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1665), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-onda-v820w-32g.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 1676), PROPERTY_ENTRY_U32("touchscreen-size-y", 1130), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-onda-v891w-v1.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1625), PROPERTY_ENTRY_U32("touchscreen-size-y", 1135), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v891w-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 880), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-pipo-w2s.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"), { } }; @@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = { .properties = pipo_w2s_props, }; +static const struct property_entry pipo_w11_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 15), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1532), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data pipo_w11_data = { + .acpi_name = "MSSL1680:00", + .properties = pipo_w11_props, +}; + static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 32), PROPERTY_ENTRY_U32("touchscreen-min-y", 16), PROPERTY_ENTRY_U32("touchscreen-size-x", 1692), PROPERTY_ENTRY_U32("touchscreen-size-y", 1146), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-pov-mobii-wintab-p800w-v20.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1794), PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p800w.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), PROPERTY_ENTRY_U32("touchscreen-size-y", 1520), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-schneider-sct101ctm.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-teclast_x98plus2.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1970), PROPERTY_ENTRY_U32("touchscreen-size-y", 1530), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c11.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = { static const struct property_entry trekstor_primebook_c13_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2624), PROPERTY_ENTRY_U32("touchscreen-size-y", 1920), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c13.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = { static const struct property_entry trekstor_primetab_t13b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2500), PROPERTY_ENTRY_U32("touchscreen-size-y", 1900), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primetab-t13b.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), @@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1900), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3670-surftab-twin-10-1-st10432-8.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 884), PROPERTY_ENTRY_U32("touchscreen-size-y", 632), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-surftab-wintron70-st70416-6.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -910,6 +909,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Pipo W11 */ + .driver_data = (void *)&pipo_w11_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PIPO"), + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + /* Above matches are too generic, add bios-ver match */ + DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"), + }, + }, + { /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */ .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { @@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), - DMI_MATCH(DMI_PRODUCT_NAME, - "SurfTab wintron 7.0 ST70416-6"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"), /* Exact match, different versions need different fw */ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"), }, @@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client) } static int ts_dmi_notifier_call(struct notifier_block *nb, - unsigned long action, void *data) + unsigned long action, void *data) { struct device *dev = data; struct i2c_client *client; diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c index 179b737280e1..c43d8ad02529 100644 --- a/drivers/pnp/isapnp/core.c +++ b/drivers/pnp/isapnp/core.c @@ -747,34 +747,12 @@ __skip: } /* - * Compute ISA PnP checksum for first eight bytes. - */ -static unsigned char __init isapnp_checksum(unsigned char *data) -{ - int i, j; - unsigned char checksum = 0x6a, bit, b; - - for (i = 0; i < 8; i++) { - b = data[i]; - for (j = 0; j < 8; j++) { - bit = 0; - if (b & (1 << j)) - bit = 1; - checksum = - ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7) - | (checksum >> 1); - } - } - return checksum; -} - -/* * Build device list for all present ISA PnP devices. */ static int __init isapnp_build_device_list(void) { int csn; - unsigned char header[9], checksum; + unsigned char header[9]; struct pnp_card *card; u32 eisa_id; char id[8]; @@ -784,7 +762,6 @@ static int __init isapnp_build_device_list(void) for (csn = 1; csn <= isapnp_csn_count; csn++) { isapnp_wake(csn); isapnp_peek(header, 9); - checksum = isapnp_checksum(header); eisa_id = header[0] | header[1] << 8 | header[2] << 16 | header[3] << 24; pnp_eisa_id_to_string(eisa_id, id); diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index 089b6244b716..b8fe166cd0d9 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -12,6 +12,22 @@ menuconfig POWER_AVS Say Y here to enable Adaptive Voltage Scaling class support. +config QCOM_CPR + tristate "QCOM Core Power Reduction (CPR) support" + depends on POWER_AVS + select PM_OPP + select REGMAP + help + Say Y here to enable support for the CPR hardware found on Qualcomm + SoCs like QCS404. + + This driver populates CPU OPPs tables and makes adjustments to the + tables based on feedback from the CPR hardware. If you want to do + CPUfrequency scaling say Y here. + + To compile this driver as a module, choose M here: the module will + be called qcom-cpr + config ROCKCHIP_IODOMAIN tristate "Rockchip IO domain support" depends on POWER_AVS && ARCH_ROCKCHIP && OF diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile index a1b8cd453f19..9007d05853e2 100644 --- a/drivers/power/avs/Makefile +++ b/drivers/power/avs/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o +obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c new file mode 100644 index 000000000000..9192fb747653 --- /dev/null +++ b/drivers/power/avs/qcom-cpr.c @@ -0,0 +1,1793 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, Linaro Limited + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/debugfs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/interrupt.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> +#include <linux/nvmem-consumer.h> + +/* Register Offsets for RB-CPR and Bit Definitions */ + +/* RBCPR Version Register */ +#define REG_RBCPR_VERSION 0 +#define RBCPR_VER_2 0x02 +#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0) + +/* RBCPR Gate Count and Target Registers */ +#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n)) + +#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0 +#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0) +#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12 +#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0) + +/* RBCPR Timer Control */ +#define REG_RBCPR_TIMER_INTERVAL 0x44 +#define REG_RBIF_TIMER_ADJUST 0x4c + +#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0 +#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4 +#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0) +#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8 + +/* RBCPR Config Register */ +#define REG_RBIF_LIMIT 0x48 +#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0) +#define RBIF_LIMIT_CEILING_SHIFT 6 +#define RBIF_LIMIT_FLOOR_BITS 6 +#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0) + +#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK +#define RBIF_LIMIT_FLOOR_DEFAULT 0 + +#define REG_RBIF_SW_VLEVEL 0x94 +#define RBIF_SW_VLEVEL_DEFAULT 0x20 + +#define REG_RBCPR_STEP_QUOT 0x80 +#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8 + +/* RBCPR Control Register */ +#define REG_RBCPR_CTL 0x90 + +#define RBCPR_CTL_LOOP_EN BIT(0) +#define RBCPR_CTL_TIMER_EN BIT(3) +#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5) +#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6) +#define RBCPR_CTL_COUNT_MODE BIT(10) +#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24 +#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28 + +/* RBCPR Ack/Nack Response */ +#define REG_RBIF_CONT_ACK_CMD 0x98 +#define REG_RBIF_CONT_NACK_CMD 0x9c + +/* RBCPR Result status Register */ +#define REG_RBCPR_RESULT_0 0xa0 + +#define RBCPR_RESULT0_BUSY_SHIFT 19 +#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT) +#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18 +#define RBCPR_RESULT0_ERROR_SHIFT 6 +#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0) +#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2 +#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0) +#define RBCPR_RESULT0_STEP_UP_SHIFT 1 + +/* RBCPR Interrupt Control Register */ +#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n)) +#define REG_RBIF_IRQ_CLEAR 0x110 +#define REG_RBIF_IRQ_STATUS 0x114 + +#define CPR_INT_DONE BIT(0) +#define CPR_INT_MIN BIT(1) +#define CPR_INT_DOWN BIT(2) +#define CPR_INT_MID BIT(3) +#define CPR_INT_UP BIT(4) +#define CPR_INT_MAX BIT(5) +#define CPR_INT_CLAMP BIT(6) +#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \ + CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP) +#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN) + +#define CPR_NUM_RING_OSC 8 + +/* CPR eFuse parameters */ +#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0) + +#define CPR_FUSE_MIN_QUOT_DIFF 50 + +#define FUSE_REVISION_UNKNOWN (-1) + +enum voltage_change_dir { + NO_CHANGE, + DOWN, + UP, +}; + +struct cpr_fuse { + char *ring_osc; + char *init_voltage; + char *quotient; + char *quotient_offset; +}; + +struct fuse_corner_data { + int ref_uV; + int max_uV; + int min_uV; + int max_volt_scale; + int max_quot_scale; + /* fuse quot */ + int quot_offset; + int quot_scale; + int quot_adjust; + /* fuse quot_offset */ + int quot_offset_scale; + int quot_offset_adjust; +}; + +struct cpr_fuses { + int init_voltage_step; + int init_voltage_width; + struct fuse_corner_data *fuse_corner_data; +}; + +struct corner_data { + unsigned int fuse_corner; + unsigned long freq; +}; + +struct cpr_desc { + unsigned int num_fuse_corners; + int min_diff_quot; + int *step_quot; + + unsigned int timer_delay_us; + unsigned int timer_cons_up; + unsigned int timer_cons_down; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int idle_clocks; + unsigned int gcnt_us; + unsigned int vdd_apc_step_up_limit; + unsigned int vdd_apc_step_down_limit; + unsigned int clamp_timer_interval; + + struct cpr_fuses cpr_fuses; + bool reduce_to_fuse_uV; + bool reduce_to_corner_uV; +}; + +struct acc_desc { + unsigned int enable_reg; + u32 enable_mask; + + struct reg_sequence *config; + struct reg_sequence *settings; + int num_regs_per_fuse; +}; + +struct cpr_acc_desc { + const struct cpr_desc *cpr_desc; + const struct acc_desc *acc_desc; +}; + +struct fuse_corner { + int min_uV; + int max_uV; + int uV; + int quot; + int step_quot; + const struct reg_sequence *accs; + int num_accs; + unsigned long max_freq; + u8 ring_osc_idx; +}; + +struct corner { + int min_uV; + int max_uV; + int uV; + int last_uV; + int quot_adjust; + u32 save_ctl; + u32 save_irq; + unsigned long freq; + struct fuse_corner *fuse_corner; +}; + +struct cpr_drv { + unsigned int num_corners; + unsigned int ref_clk_khz; + + struct generic_pm_domain pd; + struct device *dev; + struct device *attached_cpu_dev; + struct mutex lock; + void __iomem *base; + struct corner *corner; + struct regulator *vdd_apc; + struct clk *cpu_clk; + struct regmap *tcsr; + bool loop_disabled; + u32 gcnt; + unsigned long flags; + + struct fuse_corner *fuse_corners; + struct corner *corners; + + const struct cpr_desc *desc; + const struct acc_desc *acc_desc; + const struct cpr_fuse *cpr_fuses; + + struct dentry *debugfs; +}; + +static bool cpr_is_allowed(struct cpr_drv *drv) +{ + return !drv->loop_disabled; +} + +static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value) +{ + writel_relaxed(value, drv->base + offset); +} + +static u32 cpr_read(struct cpr_drv *drv, u32 offset) +{ + return readl_relaxed(drv->base + offset); +} + +static void +cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(drv->base + offset); + val &= ~mask; + val |= value & mask; + writel_relaxed(val, drv->base + offset); +} + +static void cpr_irq_clr(struct cpr_drv *drv) +{ + cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL); +} + +static void cpr_irq_clr_nack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); +} + +static void cpr_irq_clr_ack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); +} + +static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits) +{ + cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits); +} + +static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value) +{ + cpr_masked_write(drv, REG_RBCPR_CTL, mask, value); +} + +static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner) +{ + u32 val, mask; + const struct cpr_desc *desc = drv->desc; + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK; + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val); + cpr_masked_write(drv, REG_RBCPR_CTL, + RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, + corner->save_ctl); + cpr_irq_set(drv, corner->save_irq); + + if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV) + val = RBCPR_CTL_LOOP_EN; + else + val = 0; + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val); +} + +static void cpr_ctl_disable(struct cpr_drv *drv) +{ + cpr_irq_set(drv, 0); + cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0); + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, + RBIF_TIMER_ADJ_CONS_UP_MASK | + RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0); + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0); +} + +static bool cpr_ctl_is_enabled(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_CTL); + return reg_val & RBCPR_CTL_LOOP_EN; +} + +static bool cpr_ctl_is_busy(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_RESULT_0); + return reg_val & RBCPR_RESULT0_BUSY_MASK; +} + +static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner) +{ + corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL); + corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0)); +} + +static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner) +{ + u32 gcnt, ctl, irq, ro_sel, step_quot; + struct fuse_corner *fuse = corner->fuse_corner; + const struct cpr_desc *desc = drv->desc; + int i; + + ro_sel = fuse->ring_osc_idx; + gcnt = drv->gcnt; + gcnt |= fuse->quot - corner->quot_adjust; + + /* Program the step quotient and idle clocks */ + step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT; + step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK; + cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot); + + /* Clear the target quotient value and gate count of all ROs */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt); + ctl = corner->save_ctl; + cpr_write(drv, REG_RBCPR_CTL, ctl); + irq = corner->save_irq; + cpr_irq_set(drv, irq); + dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt, + ctl, irq); +} + +static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f, + struct fuse_corner *end) +{ + if (f == end) + return; + + if (f < end) { + for (f += 1; f <= end; f++) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } else { + for (f -= 1; f >= end; f--) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } +} + +static int cpr_pre_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == DOWN) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_post_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == UP) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner, + int new_uV, enum voltage_change_dir dir) +{ + int ret; + struct fuse_corner *fuse_corner = corner->fuse_corner; + + ret = cpr_pre_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV); + if (ret) { + dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n", + new_uV); + return ret; + } + + ret = cpr_post_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + return 0; +} + +static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv) +{ + return drv->corner ? drv->corner - drv->corners + 1 : 0; +} + +static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) +{ + u32 val, error_steps, reg_mask; + int last_uV, new_uV, step_uV, ret; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + if (dir != UP && dir != DOWN) + return 0; + + step_uV = regulator_get_linear_step(drv->vdd_apc); + if (!step_uV) + return -EINVAL; + + corner = drv->corner; + + val = cpr_read(drv, REG_RBCPR_RESULT_0); + + error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT; + error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK; + last_uV = corner->last_uV; + + if (dir == UP) { + if (desc->clamp_timer_interval && + error_steps < desc->up_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->up_threshold, + desc->vdd_apc_step_up_limit); + } + + if (last_uV >= corner->max_uV) { + cpr_irq_clr_nack(drv); + + /* Maximize the UP threshold */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = reg_mask; + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable UP interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_up_limit) + error_steps = desc->vdd_apc_step_up_limit; + + /* Calculate new voltage */ + new_uV = last_uV + error_steps * step_uV; + new_uV = min(new_uV, corner->max_uV); + + dev_dbg(drv->dev, + "UP: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } else if (dir == DOWN) { + if (desc->clamp_timer_interval && + error_steps < desc->down_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->down_threshold, + desc->vdd_apc_step_down_limit); + } + + if (last_uV <= corner->min_uV) { + cpr_irq_clr_nack(drv); + + /* Enable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable DOWN interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_down_limit) + error_steps = desc->vdd_apc_step_down_limit; + + /* Calculate new voltage */ + new_uV = last_uV - error_steps * step_uV; + new_uV = max(new_uV, corner->min_uV); + + dev_dbg(drv->dev, + "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) { + cpr_irq_clr_nack(drv); + return ret; + } + drv->corner->last_uV = new_uV; + + if (dir == UP) { + /* Disable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = 0; + } else if (dir == DOWN) { + /* Restore default threshold for UP */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = desc->up_threshold; + val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + } + + cpr_ctl_modify(drv, reg_mask, val); + + /* Re-enable default interrupts */ + cpr_irq_set(drv, CPR_INT_DEFAULT); + + /* Ack */ + cpr_irq_clr_ack(drv); + + return 0; +} + +static irqreturn_t cpr_irq_handler(int irq, void *dev) +{ + struct cpr_drv *drv = dev; + const struct cpr_desc *desc = drv->desc; + irqreturn_t ret = IRQ_HANDLED; + u32 val; + + mutex_lock(&drv->lock); + + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS) + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + + dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val); + + if (!cpr_ctl_is_enabled(drv)) { + dev_dbg(drv->dev, "CPR is disabled\n"); + ret = IRQ_NONE; + } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) { + dev_dbg(drv->dev, "CPR measurement is not ready\n"); + } else if (!cpr_is_allowed(drv)) { + val = cpr_read(drv, REG_RBCPR_CTL); + dev_err_ratelimited(drv->dev, + "Interrupt broken? RBCPR_CTL = %#02x\n", + val); + ret = IRQ_NONE; + } else { + /* + * Following sequence of handling is as per each IRQ's + * priority + */ + if (val & CPR_INT_UP) { + cpr_scale(drv, UP); + } else if (val & CPR_INT_DOWN) { + cpr_scale(drv, DOWN); + } else if (val & CPR_INT_MIN) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MAX) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MID) { + /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */ + dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n"); + } else { + dev_dbg(drv->dev, + "IRQ occurred for unknown flag (%#08x)\n", val); + } + + /* Save register values for the corner */ + cpr_corner_save(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_enable(struct cpr_drv *drv) +{ + int ret; + + ret = regulator_enable(drv->vdd_apc); + if (ret) + return ret; + + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv) && drv->corner) { + cpr_irq_clr(drv); + cpr_corner_restore(drv, drv->corner); + cpr_ctl_enable(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return 0; +} + +static int cpr_disable(struct cpr_drv *drv) +{ + int ret; + + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_clr(drv); + } + + mutex_unlock(&drv->lock); + + ret = regulator_disable(drv->vdd_apc); + if (ret) + return ret; + + return 0; +} + +static int cpr_config(struct cpr_drv *drv) +{ + int i; + u32 val, gcnt; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + /* Disable interrupt and CPR */ + cpr_write(drv, REG_RBIF_IRQ_EN(0), 0); + cpr_write(drv, REG_RBCPR_CTL, 0); + + /* Program the default HW ceiling, floor and vlevel */ + val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK) + << RBIF_LIMIT_CEILING_SHIFT; + val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK; + cpr_write(drv, REG_RBIF_LIMIT, val); + cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT); + + /* + * Clear the target quotient value and gate count of all + * ring oscillators + */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + /* Init and save gcnt */ + gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000; + gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK; + gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT; + drv->gcnt = gcnt; + + /* Program the delay count for the timer */ + val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000; + cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val); + dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val, + desc->timer_delay_us); + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT; + cpr_write(drv, REG_RBIF_TIMER_ADJUST, val); + + /* Program the control register */ + val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT; + val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT; + val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE; + val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN; + cpr_write(drv, REG_RBCPR_CTL, val); + + for (i = 0; i < drv->num_corners; i++) { + corner = &drv->corners[i]; + corner->save_ctl = val; + corner->save_irq = CPR_INT_DEFAULT; + } + + cpr_irq_set(drv, CPR_INT_DEFAULT); + + val = cpr_read(drv, REG_RBCPR_VERSION); + if (val <= RBCPR_VER_2) + drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS; + + return 0; +} + +static int cpr_set_performance_state(struct generic_pm_domain *domain, + unsigned int state) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + struct corner *corner, *end; + enum voltage_change_dir dir; + int ret = 0, new_uV; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n", + __func__, state, cpr_get_cur_perf_state(drv)); + + /* + * Determine new corner we're going to. + * Remove one since lowest performance state is 1. + */ + corner = drv->corners + state - 1; + end = &drv->corners[drv->num_corners - 1]; + if (corner > end || corner < drv->corners) { + ret = -EINVAL; + goto unlock; + } + + /* Determine direction */ + if (drv->corner > corner) + dir = DOWN; + else if (drv->corner < corner) + dir = UP; + else + dir = NO_CHANGE; + + if (cpr_is_allowed(drv)) + new_uV = corner->last_uV; + else + new_uV = corner->uV; + + if (cpr_is_allowed(drv)) + cpr_ctl_disable(drv); + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) + goto unlock; + + if (cpr_is_allowed(drv)) { + cpr_irq_clr(drv); + if (drv->corner != corner) + cpr_corner_restore(drv, corner); + cpr_ctl_enable(drv, corner); + } + + drv->corner = corner; + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data) +{ + struct nvmem_cell *cell; + ssize_t len; + char *ret; + int i; + + *data = 0; + + cell = nvmem_cell_get(dev, cname); + if (IS_ERR(cell)) { + if (PTR_ERR(cell) != -EPROBE_DEFER) + dev_err(dev, "undefined cell %s\n", cname); + return PTR_ERR(cell); + } + + ret = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + if (IS_ERR(ret)) { + dev_err(dev, "can't read cell %s\n", cname); + return PTR_ERR(ret); + } + + for (i = 0; i < len; i++) + *data |= ret[i] << (8 * i); + + kfree(ret); + dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len); + + return 0; +} + +static int +cpr_populate_ring_osc_idx(struct cpr_drv *drv) +{ + struct fuse_corner *fuse = drv->fuse_corners; + struct fuse_corner *end = fuse + drv->desc->num_fuse_corners; + const struct cpr_fuse *fuses = drv->cpr_fuses; + u32 data; + int ret; + + for (; fuse < end; fuse++, fuses++) { + ret = cpr_read_efuse(drv->dev, fuses->ring_osc, + &data); + if (ret) + return ret; + fuse->ring_osc_idx = data; + } + + return 0; +} + +static int cpr_read_fuse_uV(const struct cpr_desc *desc, + const struct fuse_corner_data *fdata, + const char *init_v_efuse, + int step_volt, + struct cpr_drv *drv) +{ + int step_size_uV, steps, uV; + u32 bits = 0; + int ret; + + ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits); + if (ret) + return ret; + + steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1); + /* Not two's complement.. instead highest bit is sign bit */ + if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1)) + steps = -steps; + + step_size_uV = desc->cpr_fuses.init_voltage_step; + + uV = fdata->ref_uV + steps * step_size_uV; + return DIV_ROUND_UP(uV, step_volt) * step_volt; +} + +static int cpr_fuse_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + const struct acc_desc *acc_desc = drv->acc_desc; + int i; + unsigned int step_volt; + struct fuse_corner_data *fdata; + struct fuse_corner *fuse, *end; + int uV; + const struct reg_sequence *accs; + int ret; + + accs = acc_desc->settings; + + step_volt = regulator_get_linear_step(drv->vdd_apc); + if (!step_volt) + return -EINVAL; + + /* Populate fuse_corner members */ + fuse = drv->fuse_corners; + end = &fuse[desc->num_fuse_corners - 1]; + fdata = desc->cpr_fuses.fuse_corner_data; + + for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) { + /* + * Update SoC voltages: platforms might choose a different + * regulators than the one used to characterize the algorithms + * (ie, init_voltage_step). + */ + fdata->min_uV = roundup(fdata->min_uV, step_volt); + fdata->max_uV = roundup(fdata->max_uV, step_volt); + + /* Populate uV */ + uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage, + step_volt, drv); + if (uV < 0) + return uV; + + fuse->min_uV = fdata->min_uV; + fuse->max_uV = fdata->max_uV; + fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV); + + if (fuse == end) { + /* + * Allow the highest fuse corner's PVS voltage to + * define the ceiling voltage for that corner in order + * to support SoC's in which variable ceiling values + * are required. + */ + end->max_uV = max(end->max_uV, end->uV); + } + + /* Populate target quotient by scaling */ + ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot); + if (ret) + return ret; + + fuse->quot *= fdata->quot_scale; + fuse->quot += fdata->quot_offset; + fuse->quot += fdata->quot_adjust; + fuse->step_quot = desc->step_quot[fuse->ring_osc_idx]; + + /* Populate acc settings */ + fuse->accs = accs; + fuse->num_accs = acc_desc->num_regs_per_fuse; + accs += acc_desc->num_regs_per_fuse; + } + + /* + * Restrict all fuse corner PVS voltages based upon per corner + * ceiling and floor voltages. + */ + for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) { + if (fuse->uV > fuse->max_uV) + fuse->uV = fuse->max_uV; + else if (fuse->uV < fuse->min_uV) + fuse->uV = fuse->min_uV; + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->min_uV, + fuse->min_uV); + if (!ret) { + dev_err(drv->dev, + "min uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->min_uV, i); + return -EINVAL; + } + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->max_uV, + fuse->max_uV); + if (!ret) { + dev_err(drv->dev, + "max uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->max_uV, i); + return -EINVAL; + } + + dev_dbg(drv->dev, + "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n", + i, fuse->min_uV, fuse->uV, fuse->max_uV, + fuse->ring_osc_idx, fuse->quot, fuse->step_quot); + } + + return 0; +} + +static int cpr_calculate_scaling(const char *quot_offset, + struct cpr_drv *drv, + const struct fuse_corner_data *fdata, + const struct corner *corner) +{ + u32 quot_diff = 0; + unsigned long freq_diff; + int scaling; + const struct fuse_corner *fuse, *prev_fuse; + int ret; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + if (quot_offset) { + ret = cpr_read_efuse(drv->dev, quot_offset, "_diff); + if (ret) + return ret; + + quot_diff *= fdata->quot_offset_scale; + quot_diff += fdata->quot_offset_adjust; + } else { + quot_diff = fuse->quot - prev_fuse->quot; + } + + freq_diff = fuse->max_freq - prev_fuse->max_freq; + freq_diff /= 1000000; /* Convert to MHz */ + scaling = 1000 * quot_diff / freq_diff; + return min(scaling, fdata->max_quot_scale); +} + +static int cpr_interpolate(const struct corner *corner, int step_volt, + const struct fuse_corner_data *fdata) +{ + unsigned long f_high, f_low, f_diff; + int uV_high, uV_low, uV; + u64 temp, temp_limit; + const struct fuse_corner *fuse, *prev_fuse; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + f_high = fuse->max_freq; + f_low = prev_fuse->max_freq; + uV_high = fuse->uV; + uV_low = prev_fuse->uV; + f_diff = fuse->max_freq - corner->freq; + + /* + * Don't interpolate in the wrong direction. This could happen + * if the adjusted fuse voltage overlaps with the previous fuse's + * adjusted voltage. + */ + if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq) + return corner->uV; + + temp = f_diff * (uV_high - uV_low); + do_div(temp, f_high - f_low); + + /* + * max_volt_scale has units of uV/MHz while freq values + * have units of Hz. Divide by 1000000 to convert to. + */ + temp_limit = f_diff * fdata->max_volt_scale; + do_div(temp_limit, 1000000); + + uV = uV_high - min(temp, temp_limit); + return roundup(uV, step_volt); +} + +static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp) +{ + struct device_node *np; + unsigned int fuse_corner = 0; + + np = dev_pm_opp_get_of_node(opp); + if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner)) + pr_err("%s: missing 'qcom,opp-fuse-level' property\n", + __func__); + + of_node_put(np); + + return fuse_corner; +} + +static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, + struct device *cpu_dev) +{ + u64 rate = 0; + struct device_node *ref_np; + struct device_node *desc_np; + struct device_node *child_np = NULL; + struct device_node *child_req_np = NULL; + + desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!desc_np) + return 0; + + ref_np = dev_pm_opp_get_of_node(ref); + if (!ref_np) + goto out_ref; + + do { + of_node_put(child_req_np); + child_np = of_get_next_available_child(desc_np, child_np); + child_req_np = of_parse_phandle(child_np, "required-opps", 0); + } while (child_np && child_req_np != ref_np); + + if (child_np && child_req_np == ref_np) + of_property_read_u64(child_np, "opp-hz", &rate); + + of_node_put(child_req_np); + of_node_put(child_np); + of_node_put(ref_np); +out_ref: + of_node_put(desc_np); + + return (unsigned long) rate; +} + +static int cpr_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + int i, level, scaling = 0; + unsigned int fnum, fc; + const char *quot_offset; + struct fuse_corner *fuse, *prev_fuse; + struct corner *corner, *end; + struct corner_data *cdata; + const struct fuse_corner_data *fdata; + bool apply_scaling; + unsigned long freq_diff, freq_diff_mhz; + unsigned long freq; + int step_volt = regulator_get_linear_step(drv->vdd_apc); + struct dev_pm_opp *opp; + + if (!step_volt) + return -EINVAL; + + corner = drv->corners; + end = &corner[drv->num_corners - 1]; + + cdata = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(struct corner_data), + GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + /* + * Store maximum frequency for each fuse corner based on the frequency + * plan + */ + for (level = 1; level <= drv->num_corners; level++) { + opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level); + if (IS_ERR(opp)) + return -EINVAL; + fc = cpr_get_fuse_corner(opp); + if (!fc) { + dev_pm_opp_put(opp); + return -EINVAL; + } + fnum = fc - 1; + freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev); + if (!freq) { + dev_pm_opp_put(opp); + return -EINVAL; + } + cdata[level - 1].fuse_corner = fnum; + cdata[level - 1].freq = freq; + + fuse = &drv->fuse_corners[fnum]; + dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n", + freq, dev_pm_opp_get_level(opp) - 1, fnum); + if (freq > fuse->max_freq) + fuse->max_freq = freq; + dev_pm_opp_put(opp); + } + + /* + * Get the quotient adjustment scaling factor, according to: + * + * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1)) + * / (freq(corner_N) - freq(corner_N-1)), max_factor) + * + * QUOT(corner_N): quotient read from fuse for fuse corner N + * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1) + * freq(corner_N): max frequency in MHz supported by fuse corner N + * freq(corner_N-1): max frequency in MHz supported by fuse corner + * (N - 1) + * + * Then walk through the corners mapped to each fuse corner + * and calculate the quotient adjustment for each one using the + * following formula: + * + * quot_adjust = (freq_max - freq_corner) * scaling / 1000 + * + * freq_max: max frequency in MHz supported by the fuse corner + * freq_corner: frequency in MHz corresponding to the corner + * scaling: calculated from above equation + * + * + * + + + * | v | + * q | f c o | f c + * u | c l | c + * o | f t | f + * t | c a | c + * | c f g | c f + * | e | + * +--------------- +---------------- + * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 + * corner corner + * + * c = corner + * f = fuse corner + * + */ + for (apply_scaling = false, i = 0; corner <= end; corner++, i++) { + fnum = cdata[i].fuse_corner; + fdata = &desc->cpr_fuses.fuse_corner_data[fnum]; + quot_offset = fuses[fnum].quotient_offset; + fuse = &drv->fuse_corners[fnum]; + if (fnum) + prev_fuse = &drv->fuse_corners[fnum - 1]; + else + prev_fuse = NULL; + + corner->fuse_corner = fuse; + corner->freq = cdata[i].freq; + corner->uV = fuse->uV; + + if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) { + scaling = cpr_calculate_scaling(quot_offset, drv, + fdata, corner); + if (scaling < 0) + return scaling; + + apply_scaling = true; + } else if (corner->freq == fuse->max_freq) { + /* This is a fuse corner; don't scale anything */ + apply_scaling = false; + } + + if (apply_scaling) { + freq_diff = fuse->max_freq - corner->freq; + freq_diff_mhz = freq_diff / 1000000; + corner->quot_adjust = scaling * freq_diff_mhz / 1000; + + corner->uV = cpr_interpolate(corner, step_volt, fdata); + } + + corner->max_uV = fuse->max_uV; + corner->min_uV = fuse->min_uV; + corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV); + corner->last_uV = corner->uV; + + /* Reduce the ceiling voltage if needed */ + if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV) + corner->max_uV = corner->uV; + else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV) + corner->max_uV = max(corner->min_uV, fuse->uV); + + dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i, + corner->min_uV, corner->uV, corner->max_uV, + fuse->quot - corner->quot_adjust); + } + + return 0; +} + +static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct cpr_fuse *fuses; + int i; + + fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners, + sizeof(struct cpr_fuse), + GFP_KERNEL); + if (!fuses) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < desc->num_fuse_corners; i++) { + char tbuf[32]; + + snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1); + fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].ring_osc) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1); + fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].init_voltage) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient%d", i + 1); + fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].quotient) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1); + fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].quotient_offset) + return ERR_PTR(-ENOMEM); + } + + return fuses; +} + +static void cpr_set_loop_allowed(struct cpr_drv *drv) +{ + drv->loop_disabled = false; +} + +static int cpr_init_parameters(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct clk *clk; + + clk = clk_get(drv->dev, "ref"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + drv->ref_clk_khz = clk_get_rate(clk) / 1000; + clk_put(clk); + + if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK || + desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK || + desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK || + desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK || + desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK || + desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK) + return -EINVAL; + + dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n", + desc->up_threshold, desc->down_threshold); + + return 0; +} + +static int cpr_find_initial_corner(struct cpr_drv *drv) +{ + unsigned long rate; + const struct corner *end; + struct corner *iter; + unsigned int i = 0; + + if (!drv->cpu_clk) { + dev_err(drv->dev, "cannot get rate from NULL clk\n"); + return -EINVAL; + } + + end = &drv->corners[drv->num_corners - 1]; + rate = clk_get_rate(drv->cpu_clk); + + /* + * Some bootloaders set a CPU clock frequency that is not defined + * in the OPP table. When running at an unlisted frequency, + * cpufreq_online() will change to the OPP which has the lowest + * frequency, at or above the unlisted frequency. + * Since cpufreq_online() always "rounds up" in the case of an + * unlisted frequency, this function always "rounds down" in case + * of an unlisted frequency. That way, when cpufreq_online() + * triggers the first ever call to cpr_set_performance_state(), + * it will correctly determine the direction as UP. + */ + for (iter = drv->corners; iter <= end; iter++) { + if (iter->freq > rate) + break; + i++; + if (iter->freq == rate) { + drv->corner = iter; + break; + } + if (iter->freq < rate) + drv->corner = iter; + } + + if (!drv->corner) { + dev_err(drv->dev, "boot up corner not found\n"); + return -EINVAL; + } + + dev_dbg(drv->dev, "boot up perf state: %u\n", i); + + return 0; +} + +static const struct cpr_desc qcs404_cpr_desc = { + .num_fuse_corners = 3, + .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF, + .step_quot = (int []){ 25, 25, 25, }, + .timer_delay_us = 5000, + .timer_cons_up = 0, + .timer_cons_down = 2, + .up_threshold = 1, + .down_threshold = 3, + .idle_clocks = 15, + .gcnt_us = 1, + .vdd_apc_step_up_limit = 1, + .vdd_apc_step_down_limit = 1, + .cpr_fuses = { + .init_voltage_step = 8000, + .init_voltage_width = 6, + .fuse_corner_data = (struct fuse_corner_data[]){ + /* fuse corner 0 */ + { + .ref_uV = 1224000, + .max_uV = 1224000, + .min_uV = 1048000, + .max_volt_scale = 0, + .max_quot_scale = 0, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 1 */ + { + .ref_uV = 1288000, + .max_uV = 1288000, + .min_uV = 1048000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = -20, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 2 */ + { + .ref_uV = 1352000, + .max_uV = 1384000, + .min_uV = 1088000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + }, + }, +}; + +static const struct acc_desc qcs404_acc_desc = { + .settings = (struct reg_sequence[]){ + { 0xb120, 0x1041040 }, + { 0xb124, 0x41 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + }, + .config = (struct reg_sequence[]){ + { 0xb138, 0xff }, + { 0xb130, 0x5555 }, + }, + .num_regs_per_fuse = 2, +}; + +static const struct cpr_acc_desc qcs404_cpr_acc_desc = { + .cpr_desc = &qcs404_cpr_desc, + .acc_desc = &qcs404_acc_desc, +}; + +static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int cpr_power_off(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_disable(drv); +} + +static int cpr_power_on(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_enable(drv); +} + +static int cpr_pd_attach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + const struct acc_desc *acc_desc = drv->acc_desc; + int ret = 0; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev)); + + /* + * This driver only supports scaling voltage for a CPU cluster + * where all CPUs in the cluster share a single regulator. + * Therefore, save the struct device pointer only for the first + * CPU device that gets attached. There is no need to do any + * additional initialization when further CPUs get attached. + */ + if (drv->attached_cpu_dev) + goto unlock; + + /* + * cpr_scale_voltage() requires the direction (if we are changing + * to a higher or lower OPP). The first time + * cpr_set_performance_state() is called, there is no previous + * performance state defined. Therefore, we call + * cpr_find_initial_corner() that gets the CPU clock frequency + * set by the bootloader, so that we can determine the direction + * the first time cpr_set_performance_state() is called. + */ + drv->cpu_clk = devm_clk_get(dev, NULL); + if (IS_ERR(drv->cpu_clk)) { + ret = PTR_ERR(drv->cpu_clk); + if (ret != -EPROBE_DEFER) + dev_err(drv->dev, "could not get cpu clk: %d\n", ret); + goto unlock; + } + drv->attached_cpu_dev = dev; + + dev_dbg(drv->dev, "using cpu clk from: %s\n", + dev_name(drv->attached_cpu_dev)); + + /* + * Everything related to (virtual) corners has to be initialized + * here, when attaching to the power domain, since we need to know + * the maximum frequency for each fuse corner, and this is only + * available after the cpufreq driver has attached to us. + * The reason for this is that we need to know the highest + * frequency associated with each fuse corner. + */ + ret = dev_pm_opp_get_opp_count(&drv->pd.dev); + if (ret < 0) { + dev_err(drv->dev, "could not get OPP count\n"); + goto unlock; + } + drv->num_corners = ret; + + if (drv->num_corners < 2) { + dev_err(drv->dev, "need at least 2 OPPs to use CPR\n"); + ret = -EINVAL; + goto unlock; + } + + dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners); + + drv->corners = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(*drv->corners), + GFP_KERNEL); + if (!drv->corners) { + ret = -ENOMEM; + goto unlock; + } + + ret = cpr_corner_init(drv); + if (ret) + goto unlock; + + cpr_set_loop_allowed(drv); + + ret = cpr_init_parameters(drv); + if (ret) + goto unlock; + + /* Configure CPR HW but keep it disabled */ + ret = cpr_config(drv); + if (ret) + goto unlock; + + ret = cpr_find_initial_corner(drv); + if (ret) + goto unlock; + + if (acc_desc->config) + regmap_multi_reg_write(drv->tcsr, acc_desc->config, + acc_desc->num_regs_per_fuse); + + /* Enable ACC if required */ + if (acc_desc->enable_mask) + regmap_update_bits(drv->tcsr, acc_desc->enable_reg, + acc_desc->enable_mask, + acc_desc->enable_mask); + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_debug_info_show(struct seq_file *s, void *unused) +{ + u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps; + u32 step_dn, step_up, error, error_lt0, busy; + struct cpr_drv *drv = s->private; + struct fuse_corner *fuse_corner; + struct corner *corner; + + corner = drv->corner; + fuse_corner = corner->fuse_corner; + + seq_printf(s, "corner, current_volt = %d uV\n", + corner->last_uV); + + ro_sel = fuse_corner->ring_osc_idx; + gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel)); + seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt); + + ctl = cpr_read(drv, REG_RBCPR_CTL); + seq_printf(s, "rbcpr_ctl = %#02X\n", ctl); + + irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS); + seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status); + + reg = cpr_read(drv, REG_RBCPR_RESULT_0); + seq_printf(s, "rbcpr_result_0 = %#02X\n", reg); + + step_dn = reg & 0x01; + step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01; + seq_printf(s, " [step_dn = %u", step_dn); + + seq_printf(s, ", step_up = %u", step_up); + + error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT) + & RBCPR_RESULT0_ERROR_STEPS_MASK; + seq_printf(s, ", error_steps = %u", error_steps); + + error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK; + seq_printf(s, ", error = %u", error); + + error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01; + seq_printf(s, ", error_lt_0 = %u", error_lt0); + + busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01; + seq_printf(s, ", busy = %u]\n", busy); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cpr_debug_info); + +static void cpr_debugfs_init(struct cpr_drv *drv) +{ + drv->debugfs = debugfs_create_dir("qcom_cpr", NULL); + + debugfs_create_file("debug_info", 0444, drv->debugfs, + drv, &cpr_debug_info_fops); +} + +static int cpr_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct cpr_drv *drv; + int irq, ret; + const struct cpr_acc_desc *data; + struct device_node *np; + u32 cpr_rev = FUSE_REVISION_UNKNOWN; + + data = of_device_get_match_data(dev); + if (!data || !data->cpr_desc || !data->acc_desc) + return -EINVAL; + + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + drv->dev = dev; + drv->desc = data->cpr_desc; + drv->acc_desc = data->acc_desc; + + drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners, + sizeof(*drv->fuse_corners), + GFP_KERNEL); + if (!drv->fuse_corners) + return -ENOMEM; + + np = of_parse_phandle(dev->of_node, "acc-syscon", 0); + if (!np) + return -ENODEV; + + drv->tcsr = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(drv->tcsr)) + return PTR_ERR(drv->tcsr); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + drv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(drv->base)) + return PTR_ERR(drv->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + drv->vdd_apc = devm_regulator_get(dev, "vdd-apc"); + if (IS_ERR(drv->vdd_apc)) + return PTR_ERR(drv->vdd_apc); + + /* + * Initialize fuse corners, since it simply depends + * on data in efuses. + * Everything related to (virtual) corners has to be + * initialized after attaching to the power domain, + * since it depends on the CPU's OPP table. + */ + ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev); + if (ret) + return ret; + + drv->cpr_fuses = cpr_get_fuses(drv); + if (IS_ERR(drv->cpr_fuses)) + return PTR_ERR(drv->cpr_fuses); + + ret = cpr_populate_ring_osc_idx(drv); + if (ret) + return ret; + + ret = cpr_fuse_corner_init(drv); + if (ret) + return ret; + + mutex_init(&drv->lock); + + ret = devm_request_threaded_irq(dev, irq, NULL, + cpr_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_RISING, + "cpr", drv); + if (ret) + return ret; + + drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name, + GFP_KERNEL); + if (!drv->pd.name) + return -EINVAL; + + drv->pd.power_off = cpr_power_off; + drv->pd.power_on = cpr_power_on; + drv->pd.set_performance_state = cpr_set_performance_state; + drv->pd.opp_to_performance_state = cpr_get_performance_state; + drv->pd.attach_dev = cpr_pd_attach_dev; + + ret = pm_genpd_init(&drv->pd, NULL, true); + if (ret) + return ret; + + ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); + if (ret) + return ret; + + platform_set_drvdata(pdev, drv); + cpr_debugfs_init(drv); + + return 0; +} + +static int cpr_remove(struct platform_device *pdev) +{ + struct cpr_drv *drv = platform_get_drvdata(pdev); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_set(drv, 0); + } + + of_genpd_del_provider(pdev->dev.of_node); + pm_genpd_remove(&drv->pd); + + debugfs_remove_recursive(drv->debugfs); + + return 0; +} + +static const struct of_device_id cpr_match_table[] = { + { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, cpr_match_table); + +static struct platform_driver cpr_driver = { + .probe = cpr_probe, + .remove = cpr_remove, + .driver = { + .name = "qcom-cpr", + .of_match_table = cpr_match_table, + }, +}; +module_platform_driver(cpr_driver); + +MODULE_DESCRIPTION("Core Power Reduction (CPR) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index a67701ed93e8..73257cf107d9 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -980,6 +980,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server), INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core), + INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), @@ -989,6 +990,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core), INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), @@ -1295,6 +1297,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv) struct cpuinfo_x86 *c = &cpu_data(cpu); int ret; + if (!rapl_defaults) + return ERR_PTR(-ENODEV); + rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); if (!rp) return ERR_PTR(-ENOMEM); diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 61fafe0374ce..b84f16bbd6f2 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev) { struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); + ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); @@ -302,9 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp) if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - ptp_cleanup_pin_groups(ptp); - posix_clock_unregister(&ptp->clock); + return 0; } EXPORT_SYMBOL(ptp_clock_unregister); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 74eb5af7295f..97bfdd47954f 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -194,6 +194,18 @@ config REGULATOR_BD70528 This driver can also be built as a module. If so, the module will be called bd70528-regulator. +config REGULATOR_BD71828 + tristate "ROHM BD71828 Power Regulator" + depends on MFD_ROHM_BD71828 + select REGULATOR_ROHM + help + This driver supports voltage regulators on ROHM BD71828 PMIC. + This will enable support for the software controllable buck + and LDO regulators. + + This driver can also be built as a module. If so, the module + will be called bd71828-regulator. + config REGULATOR_BD718XX tristate "ROHM BD71837 Power Regulator" depends on MFD_ROHM_BD718XX @@ -600,6 +612,27 @@ config REGULATOR_MCP16502 through the regulator interface. In addition it enables suspend-to-ram/standby transition. +config REGULATOR_MP8859 + tristate "MPS MP8859 regulator driver" + depends on I2C + select REGMAP_I2C + help + Say y here to support the MP8859 voltage regulator. This driver + supports basic operations (get/set voltage) through the regulator + interface. + Say M here if you want to include support for the regulator as a + module. The module will be named "mp8859". + +config REGULATOR_MPQ7920 + tristate "Monolithic MPQ7920 PMIC" + depends on I2C && OF + select REGMAP_I2C + help + Say y here to support the MPQ7920 PMIC. This will enable supports + the software controllable 4 buck and 5 LDO regulators. + This driver supports the control of different power rails of device + through regulator interface. + config REGULATOR_MT6311 tristate "MediaTek MT6311 PMIC" depends on I2C @@ -1077,6 +1110,13 @@ config REGULATOR_VEXPRESS This driver provides support for voltage regulators available on the ARM Ltd's Versatile Express platform. +config REGULATOR_VQMMC_IPQ4019 + tristate "IPQ4019 VQMMC SD LDO regulator support" + depends on ARCH_QCOM + help + This driver provides support for the VQMMC LDO I/0 + voltage regulator of the IPQ4019 SD/EMMC controller. + config REGULATOR_WM831X tristate "Wolfson Microelectronics WM831x PMIC regulators" depends on MFD_WM831X diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 2210ba56f9bd..07bc977c52b0 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o +obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o @@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o +obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o +obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o @@ -132,6 +135,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o +obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 989506bd90b1..16f0c8570036 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) int i; for (i = 0; i < rate_count; i++) { - if (ramp <= slew_rates[i]) - cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); - else + if (ramp > slew_rates[i]) break; + + if (id == AXP20X_DCDC2) + cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i); + else + cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); } if (cfg == 0xff) { @@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100, AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, - AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c index ec764022621f..5bf8a2dc5fe7 100644 --- a/drivers/regulator/bd70528-regulator.c +++ b/drivers/regulator/bd70528-regulator.c @@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, - .set_ramp_delay = bd70528_set_ramp_delay, }; static const struct regulator_ops bd70528_led_ops = { diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c new file mode 100644 index 000000000000..b2fa17be4988 --- /dev/null +++ b/drivers/regulator/bd71828-regulator.c @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2019 ROHM Semiconductors +// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver +// + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/rohm-bd71828.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +struct reg_init { + unsigned int reg; + unsigned int mask; + unsigned int val; +}; +struct bd71828_regulator_data { + struct regulator_desc desc; + const struct rohm_dvs_config dvs; + const struct reg_init *reg_inits; + int reg_init_amnt; +}; + +static const struct reg_init buck1_inits[] = { + /* + * DVS Buck voltages can be changed by register values or via GPIO. + * Use register accesses by default. + */ + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK1_CTRL, + .val = BD71828_DVS_BUCK1_CTRL_I2C, + }, +}; + +static const struct reg_init buck2_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK2_CTRL, + .val = BD71828_DVS_BUCK2_CTRL_I2C, + }, +}; + +static const struct reg_init buck6_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK6_CTRL, + .val = BD71828_DVS_BUCK6_CTRL_I2C, + }, +}; + +static const struct reg_init buck7_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK7_CTRL, + .val = BD71828_DVS_BUCK7_CTRL_I2C, + }, +}; + +static const struct regulator_linear_range bd71828_buck1267_volts[] = { + REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250), + REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0), +}; + +static const struct regulator_linear_range bd71828_buck3_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_buck4_volts[] = { + REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000), + REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0), +}; + +static const struct regulator_linear_range bd71828_buck5_volts[] = { + REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_ldo_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0), +}; + +static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int val; + + switch (ramp_delay) { + case 1 ... 2500: + val = 0; + break; + case 2501 ... 5000: + val = 1; + break; + case 5001 ... 10000: + val = 2; + break; + case 10001 ... 20000: + val = 3; + break; + default: + val = 3; + dev_err(&rdev->dev, + "ramp_delay: %d not supported, setting 20mV/uS", + ramp_delay); + } + + /* + * On BD71828 the ramp delay level control reg is at offset +2 to + * enable reg + */ + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2, + BD71828_MASK_RAMP_DELAY, + val << (ffs(BD71828_MASK_RAMP_DELAY) - 1)); +} + +static int buck_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + struct bd71828_regulator_data *data; + + data = container_of(desc, struct bd71828_regulator_data, desc); + + return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap); +} + +static int ldo6_parse_dt(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + uint32_t uv = 0; + unsigned int en; + struct regmap *regmap = cfg->regmap; + static const char * const props[] = { "rohm,dvs-run-voltage", + "rohm,dvs-idle-voltage", + "rohm,dvs-suspend-voltage", + "rohm,dvs-lpsr-voltage" }; + unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN, + BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN }; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + ret = of_property_read_u32(np, props[i], &uv); + if (ret) { + if (ret != -EINVAL) + return ret; + continue; + } + if (uv) + en = 0xffffffff; + else + en = 0; + + ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en); + if (ret) + return ret; + } + return 0; +} + +static const struct regulator_ops bd71828_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_dvs_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd71828_set_ramp_delay, +}; + +static const struct regulator_ops bd71828_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_ldo6_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static const struct bd71828_regulator_data bd71828_rdata[] = { + { + .desc = { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK1, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK1_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK1_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + /* + * LPSR voltage is same as SUSPEND voltage. Allow + * setting it so that regulator can be set enabled at + * LPSR state + */ + .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck1_inits, + .reg_init_amnt = ARRAY_SIZE(buck1_inits), + }, + { + .desc = { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK2, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK2_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK2_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck2_inits, + .reg_init_amnt = ARRAY_SIZE(buck2_inits), + }, + { + .desc = { + .name = "buck3", + .of_match = of_match_ptr("BUCK3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK3, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck3_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts), + .n_voltages = BD71828_BUCK3_VOLTS, + .enable_reg = BD71828_REG_BUCK3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK3_VOLT, + .vsel_mask = BD71828_MASK_BUCK3_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK3_VOLT, + .idle_reg = BD71828_REG_BUCK3_VOLT, + .suspend_reg = BD71828_REG_BUCK3_VOLT, + .lpsr_reg = BD71828_REG_BUCK3_VOLT, + .run_mask = BD71828_MASK_BUCK3_VOLT, + .idle_mask = BD71828_MASK_BUCK3_VOLT, + .suspend_mask = BD71828_MASK_BUCK3_VOLT, + .lpsr_mask = BD71828_MASK_BUCK3_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck4", + .of_match = of_match_ptr("BUCK4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK4, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck4_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts), + .n_voltages = BD71828_BUCK4_VOLTS, + .enable_reg = BD71828_REG_BUCK4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK4_VOLT, + .vsel_mask = BD71828_MASK_BUCK4_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK4 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK4_VOLT, + .idle_reg = BD71828_REG_BUCK4_VOLT, + .suspend_reg = BD71828_REG_BUCK4_VOLT, + .lpsr_reg = BD71828_REG_BUCK4_VOLT, + .run_mask = BD71828_MASK_BUCK4_VOLT, + .idle_mask = BD71828_MASK_BUCK4_VOLT, + .suspend_mask = BD71828_MASK_BUCK4_VOLT, + .lpsr_mask = BD71828_MASK_BUCK4_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck5", + .of_match = of_match_ptr("BUCK5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK5, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck5_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts), + .n_voltages = BD71828_BUCK5_VOLTS, + .enable_reg = BD71828_REG_BUCK5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK5_VOLT, + .vsel_mask = BD71828_MASK_BUCK5_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK5 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK5_VOLT, + .idle_reg = BD71828_REG_BUCK5_VOLT, + .suspend_reg = BD71828_REG_BUCK5_VOLT, + .lpsr_reg = BD71828_REG_BUCK5_VOLT, + .run_mask = BD71828_MASK_BUCK5_VOLT, + .idle_mask = BD71828_MASK_BUCK5_VOLT, + .suspend_mask = BD71828_MASK_BUCK5_VOLT, + .lpsr_mask = BD71828_MASK_BUCK5_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck6", + .of_match = of_match_ptr("BUCK6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK6, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK6_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK6_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck6_inits, + .reg_init_amnt = ARRAY_SIZE(buck6_inits), + }, + { + .desc = { + .name = "buck7", + .of_match = of_match_ptr("BUCK7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK7, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK7_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK7_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck7_inits, + .reg_init_amnt = ARRAY_SIZE(buck7_inits), + }, + { + .desc = { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO1, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO1_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO1_VOLT, + .idle_reg = BD71828_REG_LDO1_VOLT, + .suspend_reg = BD71828_REG_LDO1_VOLT, + .lpsr_reg = BD71828_REG_LDO1_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO2, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO2_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO2 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO2_VOLT, + .idle_reg = BD71828_REG_LDO2_VOLT, + .suspend_reg = BD71828_REG_LDO2_VOLT, + .lpsr_reg = BD71828_REG_LDO2_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO3, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO3_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO3_VOLT, + .idle_reg = BD71828_REG_LDO3_VOLT, + .suspend_reg = BD71828_REG_LDO3_VOLT, + .lpsr_reg = BD71828_REG_LDO3_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo4", + .of_match = of_match_ptr("LDO4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO4, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO4_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO4_VOLT, + .idle_reg = BD71828_REG_LDO4_VOLT, + .suspend_reg = BD71828_REG_LDO4_VOLT, + .lpsr_reg = BD71828_REG_LDO4_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo5", + .of_match = of_match_ptr("LDO5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO5, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO5_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .of_parse_cb = buck_set_hw_dvs_levels, + .owner = THIS_MODULE, + }, + /* + * LDO5 is special. It can choose vsel settings to be configured + * from 2 different registers (by GPIO). + * + * This driver supports only configuration where + * BD71828_REG_LDO5_VOLT_L is used. + */ + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO5_VOLT, + .idle_reg = BD71828_REG_LDO5_VOLT, + .suspend_reg = BD71828_REG_LDO5_VOLT, + .lpsr_reg = BD71828_REG_LDO5_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo6", + .of_match = of_match_ptr("LDO6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO6, + .ops = &bd71828_ldo6_ops, + .type = REGULATOR_VOLTAGE, + .fixed_uV = BD71828_LDO_6_VOLTAGE, + .n_voltages = 1, + .enable_reg = BD71828_REG_LDO6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .owner = THIS_MODULE, + /* + * LDO6 only supports enable/disable for all states. + * Voltage for LDO6 is fixed. + */ + .of_parse_cb = ldo6_parse_dt, + }, + }, { + .desc = { + /* SNVS LDO in data-sheet */ + .name = "ldo7", + .of_match = of_match_ptr("LDO7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO_SNVS, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO7_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO7 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO7_VOLT, + .idle_reg = BD71828_REG_LDO7_VOLT, + .suspend_reg = BD71828_REG_LDO7_VOLT, + .lpsr_reg = BD71828_REG_LDO7_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, +}; + +static int bd71828_probe(struct platform_device *pdev) +{ + struct rohm_regmap_dev *bd71828; + int i, j, ret; + struct regulator_config config = { + .dev = pdev->dev.parent, + }; + + bd71828 = dev_get_drvdata(pdev->dev.parent); + if (!bd71828) { + dev_err(&pdev->dev, "No MFD driver data\n"); + return -EINVAL; + } + + config.regmap = bd71828->regmap; + + for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) { + struct regulator_dev *rdev; + const struct bd71828_regulator_data *rd; + + rd = &bd71828_rdata[i]; + rdev = devm_regulator_register(&pdev->dev, + &rd->desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "failed to register %s regulator\n", + rd->desc.name); + return PTR_ERR(rdev); + } + for (j = 0; j < rd->reg_init_amnt; j++) { + ret = regmap_update_bits(bd71828->regmap, + rd->reg_inits[j].reg, + rd->reg_inits[j].mask, + rd->reg_inits[j].val); + if (ret) { + dev_err(&pdev->dev, + "regulator %s init failed\n", + rd->desc.name); + return ret; + } + } + } + return 0; +} + +static struct platform_driver bd71828_regulator = { + .driver = { + .name = "bd71828-pmic" + }, + .probe = bd71828_probe, +}; + +module_platform_driver(bd71828_regulator); + +MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); +MODULE_DESCRIPTION("BD71828 voltage regulator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd71828-pmic"); diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 13a43eee2e46..8f9b2d8eaf10 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -1142,28 +1142,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { }, }; -struct bd718xx_pmic_inits { - const struct bd718xx_regulator_data *r_datas; - unsigned int r_amount; -}; - static int bd718xx_probe(struct platform_device *pdev) { struct bd718xx *mfd; struct regulator_config config = { 0 }; - struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = { - [ROHM_CHIP_TYPE_BD71837] = { - .r_datas = bd71837_regulators, - .r_amount = ARRAY_SIZE(bd71837_regulators), - }, - [ROHM_CHIP_TYPE_BD71847] = { - .r_datas = bd71847_regulators, - .r_amount = ARRAY_SIZE(bd71847_regulators), - }, - }; - int i, j, err; bool use_snvs; + const struct bd718xx_regulator_data *reg_data; + unsigned int num_reg_data; mfd = dev_get_drvdata(pdev->dev.parent); if (!mfd) { @@ -1172,8 +1158,16 @@ static int bd718xx_probe(struct platform_device *pdev) goto err; } - if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT || - !pmic_regulators[mfd->chip.chip_type].r_datas) { + switch (mfd->chip.chip_type) { + case ROHM_CHIP_TYPE_BD71837: + reg_data = bd71837_regulators; + num_reg_data = ARRAY_SIZE(bd71837_regulators); + break; + case ROHM_CHIP_TYPE_BD71847: + reg_data = bd71847_regulators; + num_reg_data = ARRAY_SIZE(bd71847_regulators); + break; + default: dev_err(&pdev->dev, "Unsupported chip type\n"); err = -EINVAL; goto err; @@ -1215,13 +1209,13 @@ static int bd718xx_probe(struct platform_device *pdev) } } - for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) { + for (i = 0; i < num_reg_data; i++) { const struct regulator_desc *desc; struct regulator_dev *rdev; const struct bd718xx_regulator_data *r; - r = &pmic_regulators[mfd->chip.chip_type].r_datas[i]; + r = ®_data[i]; desc = &r->desc; config.dev = pdev->dev.parent; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 03d79fee2987..d015d99cb59d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3470,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, out: return ret; } +EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev); static int regulator_limit_voltage_step(struct regulator_dev *rdev, int *current_uV, int *min_uV) @@ -4034,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev) return ret; return ret - rdev->constraints->uV_offset; } +EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev); /** * regulator_get_voltage - get regulator output voltage diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index f9448ed50e05..0cdeb6186529 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, da9210_dt_ids); -static int da9210_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9210_i2c_probe(struct i2c_client *i2c) { struct da9210 *chip; struct device *dev = &i2c->dev; @@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = { .name = "da9210", .of_match_table = of_match_ptr(da9210_dt_ids), }, - .probe = da9210_i2c_probe, + .probe_new = da9210_i2c_probe, .id_table = da9210_i2c_id, }; diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index 523dc1b95826..2ea4362ffa5c 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip) /* * I2C driver interface functions */ -static int da9211_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9211_i2c_probe(struct i2c_client *i2c) { struct da9211 *chip; int error, ret; @@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = { .name = "da9211", .of_match_table = of_match_ptr(da9211_dt_ids), }, - .probe = da9211_i2c_probe, + .probe_new = da9211_i2c_probe, .id_table = da9211_i2c_id, }; diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index ca3dc3f3bb29..bb16c465426e 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -13,6 +13,8 @@ #include <linux/regulator/driver.h> #include <linux/module.h> +#include "internal.h" + /** * regulator_is_enabled_regmap - standard is_enabled() for regmap users * @@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, consumers[i].supply = supply_names[i]; } EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names); + +/** + * regulator_is_equal - test whether two regulators are the same + * + * @reg1: first regulator to operate on + * @reg2: second regulator to operate on + */ +bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2) +{ + return reg1->rdev == reg2->rdev; +} +EXPORT_SYMBOL_GPL(regulator_is_equal); diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c index 978f5e903cae..cfb765986d0d 100644 --- a/drivers/regulator/isl9305.c +++ b/drivers/regulator/isl9305.c @@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = { .cache_type = REGCACHE_RBTREE, }; -static int isl9305_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int isl9305_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct isl9305_pdata *pdata = i2c->dev.platform_data; @@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = { .name = "isl9305", .of_match_table = of_match_ptr(isl9305_dt_ids), }, - .probe = isl9305_i2c_probe, + .probe_new = isl9305_i2c_probe, .id_table = isl9305_i2c_id, }; diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index bc96e65ef7c0..8be252f81b09 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971, return 0; } -static int lp3971_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int lp3971_i2c_probe(struct i2c_client *i2c) { struct lp3971 *lp3971; struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev); @@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = { .driver = { .name = "LP3971", }, - .probe = lp3971_i2c_probe, + .probe_new = lp3971_i2c_probe, .id_table = lp3971_i2c_id, }; diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c index d934540eb8c4..e12e52c69e52 100644 --- a/drivers/regulator/ltc3676.c +++ b/drivers/regulator/ltc3676.c @@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int ltc3676_regulator_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc3676_regulator_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct regulator_init_data *init_data = dev_get_platdata(dev); @@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(ltc3676_of_match), }, - .probe = ltc3676_regulator_probe, + .probe_new = ltc3676_regulator_probe, .id_table = ltc3676_i2c_id, }; module_i2c_driver(ltc3676_driver); diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c new file mode 100644 index 000000000000..1d26b506ee5b --- /dev/null +++ b/drivers/regulator/mp8859.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2019 five technologies GmbH +// Author: Markus Reichl <m.reichl@fivetechno.de> + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/of.h> +#include <linux/regulator/driver.h> +#include <linux/regmap.h> + + +#define VOL_MIN_IDX 0x00 +#define VOL_MAX_IDX 0x7ff + +/* Register definitions */ +#define MP8859_VOUT_L_REG 0 //3 lo Bits +#define MP8859_VOUT_H_REG 1 //8 hi Bits +#define MP8859_VOUT_GO_REG 2 +#define MP8859_IOUT_LIM_REG 3 +#define MP8859_CTL1_REG 4 +#define MP8859_CTL2_REG 5 +#define MP8859_RESERVED1_REG 6 +#define MP8859_RESERVED2_REG 7 +#define MP8859_RESERVED3_REG 8 +#define MP8859_STATUS_REG 9 +#define MP8859_INTERRUPT_REG 0x0A +#define MP8859_MASK_REG 0x0B +#define MP8859_ID1_REG 0x0C +#define MP8859_MFR_ID_REG 0x27 +#define MP8859_DEV_ID_REG 0x28 +#define MP8859_IC_REV_REG 0x29 + +#define MP8859_MAX_REG 0x29 + +#define MP8859_GO_BIT 0x01 + + +static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel) +{ + int ret; + + ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7); + + if (ret) + return ret; + ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3); + + if (ret) + return ret; + ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG, + MP8859_GO_BIT, 1); + return ret; +} + +static int mp8859_get_voltage_sel(struct regulator_dev *rdev) +{ + unsigned int val_tmp; + unsigned int val; + int ret; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp); + + if (ret) + return ret; + val = val_tmp << 3; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp); + + if (ret) + return ret; + val |= val_tmp & 0x07; + return val; +} + +static const struct regulator_linear_range mp8859_dcdc_ranges[] = { + REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000), +}; + +static const struct regmap_config mp8859_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MP8859_MAX_REG, + .cache_type = REGCACHE_RBTREE, +}; + +static const struct regulator_ops mp8859_ops = { + .set_voltage_sel = mp8859_set_voltage_sel, + .get_voltage_sel = mp8859_get_voltage_sel, + .list_voltage = regulator_list_voltage_linear_range, +}; + +static const struct regulator_desc mp8859_regulators[] = { + { + .id = 0, + .type = REGULATOR_VOLTAGE, + .name = "mp8859_dcdc", + .of_match = of_match_ptr("mp8859_dcdc"), + .n_voltages = VOL_MAX_IDX + 1, + .linear_ranges = mp8859_dcdc_ranges, + .n_linear_ranges = 1, + .ops = &mp8859_ops, + .owner = THIS_MODULE, + }, +}; + +static int mp8859_i2c_probe(struct i2c_client *i2c) +{ + int ret; + struct regulator_config config = {.dev = &i2c->dev}; + struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap); + struct regulator_dev *rdev; + + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(&i2c->dev, "regmap init failed: %d\n", ret); + return ret; + } + rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0], + &config); + + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(&i2c->dev, "failed to register %s: %d\n", + mp8859_regulators[0].name, ret); + return ret; + } + return 0; +} + +static const struct of_device_id mp8859_dt_id[] = { + {.compatible = "mps,mp8859"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mp8859_dt_id); + +static const struct i2c_device_id mp8859_i2c_id[] = { + { "mp8859", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id); + +static struct i2c_driver mp8859_regulator_driver = { + .driver = { + .name = "mp8859", + .of_match_table = of_match_ptr(mp8859_dt_id), + }, + .probe_new = mp8859_i2c_probe, + .id_table = mp8859_i2c_id, +}; + +module_i2c_driver(mp8859_regulator_driver); + +MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver"); +MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c new file mode 100644 index 000000000000..54c862edf571 --- /dev/null +++ b/drivers/regulator/mpq7920.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// mpq7920.c - regulator driver for mps mpq7920 +// +// Copyright 2019 Monolithic Power Systems, Inc +// +// Author: Saravanan Sekar <sravanhome@gmail.com> + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include "mpq7920.h" + +#define MPQ7920_BUCK_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) +#define MPQ7920_LDO_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) + +#define MPQ7920BUCK(_name, _id, _ilim) \ + [MPQ7920_BUCK ## _id] = { \ + .id = MPQ7920_BUCK ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .of_parse_cb = mpq7920_parse_cb, \ + .ops = &mpq7920_buck_ops, \ + .min_uV = MPQ7920_BUCK_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \ + .curr_table = _ilim, \ + .n_current_limits = ARRAY_SIZE(_ilim), \ + .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .csel_mask = MPQ7920_MASK_BUCK_ILIM, \ + .enable_reg = MPQ7920_REG_REGULATOR_EN, \ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_BUCK ## _id), \ + .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .soft_start_mask = MPQ7920_MASK_SOFTSTART, \ + .owner = THIS_MODULE, \ + } + +#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \ + [MPQ7920_LDO ## _id] = { \ + .id = MPQ7920_LDO ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .ops = _ops, \ + .min_uV = MPQ7920_LDO_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_LDO_VOLT_RANGE, \ + .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .curr_table = _ilim, \ + .n_current_limits = _ilim_sz, \ + .csel_reg = _creg, \ + .csel_mask = _cmask, \ + .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_LDO ##_id + 1), \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + } + +enum mpq7920_regulators { + MPQ7920_BUCK1, + MPQ7920_BUCK2, + MPQ7920_BUCK3, + MPQ7920_BUCK4, + MPQ7920_LDO1, /* LDORTC */ + MPQ7920_LDO2, + MPQ7920_LDO3, + MPQ7920_LDO4, + MPQ7920_LDO5, + MPQ7920_MAX_REGULATORS, +}; + +struct mpq7920_regulator_info { + struct regmap *regmap; + struct regulator_desc *rdesc; +}; + +static const struct regmap_config mpq7920_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x25, +}; + +/* Current limits array (in uA) + * ILIM1 & ILIM3 + */ +static const unsigned int mpq7920_I_limits1[] = { + 4600000, 6600000, 7600000, 9300000 +}; + +/* ILIM2 & ILIM4 */ +static const unsigned int mpq7920_I_limits2[] = { + 2700000, 3900000, 5100000, 6100000 +}; + +/* LDO4 & LDO5 */ +static const unsigned int mpq7920_I_limits3[] = { + 300000, 700000 +}; + +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay); +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *rdesc, + struct regulator_config *config); + +/* RTCLDO not controllable, always ON */ +static const struct regulator_ops mpq7920_ldortc_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_wo_current_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, +}; + +static const struct regulator_ops mpq7920_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_soft_start = regulator_set_soft_start_regmap, + .set_ramp_delay = mpq7920_set_ramp_delay, +}; + +static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = { + MPQ7920BUCK("buck1", 1, mpq7920_I_limits1), + MPQ7920BUCK("buck2", 2, mpq7920_I_limits2), + MPQ7920BUCK("buck3", 3, mpq7920_I_limits1), + MPQ7920BUCK("buck4", 4, mpq7920_I_limits2), + MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B, + MPQ7920_MASK_LDO_ILIM), + MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B, + MPQ7920_MASK_LDO_ILIM), +}; + +/* + * DVS ramp rate BUCK1 to BUCK4 + * 00-01: Reserved + * 10: 8mV/us + * 11: 4mV/us + */ +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int ramp_val; + + if (ramp_delay > 8000 || ramp_delay < 0) + return -EINVAL; + + if (ramp_delay <= 4000) + ramp_val = 3; + else + ramp_val = 2; + + return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6); +} + +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *config) +{ + uint8_t val; + int ret; + struct mpq7920_regulator_info *info = config->driver_data; + struct regulator_desc *rdesc = &info->rdesc[desc->id]; + + if (of_property_read_bool(np, "mps,buck-ovp-disable")) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_B + (rdesc->id * 4), + MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE); + } + + ret = of_property_read_u8(np, "mps,buck-phase-delay", &val); + if (!ret) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_C + (rdesc->id * 4), + MPQ7920_MASK_BUCK_PHASE_DEALY, + (val & 3) << 4); + } + + ret = of_property_read_u8(np, "mps,buck-softstart", &val); + if (!ret) + rdesc->soft_start_val_on = (val & 3) << 2; + + return 0; +} + +static void mpq7920_parse_dt(struct device *dev, + struct mpq7920_regulator_info *info) +{ + int ret; + struct device_node *np = dev->of_node; + uint8_t freq; + + np = of_get_child_by_name(np, "regulators"); + if (!np) { + dev_err(dev, "missing 'regulators' subnode in DT\n"); + return; + } + + ret = of_property_read_u8(np, "mps,switch-freq", &freq); + if (!ret) { + regmap_update_bits(info->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_SWITCH_FREQ, + (freq & 3) << 4); + } + + of_node_put(np); +} + +static int mpq7920_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct mpq7920_regulator_info *info; + struct regulator_config config = { NULL, }; + struct regulator_dev *rdev; + struct regmap *regmap; + int i; + + info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->rdesc = mpq7920_regulators_desc; + regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to allocate regmap!\n"); + return PTR_ERR(regmap); + } + + i2c_set_clientdata(client, info); + info->regmap = regmap; + if (client->dev.of_node) + mpq7920_parse_dt(&client->dev, info); + + config.dev = dev; + config.regmap = regmap; + config.driver_data = info; + + for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) { + rdev = devm_regulator_register(dev, + &mpq7920_regulators_desc[i], + &config); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator!\n"); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct of_device_id mpq7920_of_match[] = { + { .compatible = "mps,mpq7920"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mpq7920_of_match); + +static const struct i2c_device_id mpq7920_id[] = { + { "mpq7920", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mpq7920_id); + +static struct i2c_driver mpq7920_regulator_driver = { + .driver = { + .name = "mpq7920", + .of_match_table = of_match_ptr(mpq7920_of_match), + }, + .probe_new = mpq7920_i2c_probe, + .id_table = mpq7920_id, +}; +module_i2c_driver(mpq7920_regulator_driver); + +MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>"); +MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h new file mode 100644 index 000000000000..489924655a96 --- /dev/null +++ b/drivers/regulator/mpq7920.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * mpq7920.h - Regulator definitions for mpq7920 + * + * Copyright 2019 Monolithic Power Systems, Inc + * + */ + +#ifndef __MPQ7920_H__ +#define __MPQ7920_H__ + +#define MPQ7920_REG_CTL0 0x00 +#define MPQ7920_REG_CTL1 0x01 +#define MPQ7920_REG_CTL2 0x02 +#define MPQ7920_BUCK1_REG_A 0x03 +#define MPQ7920_BUCK1_REG_B 0x04 +#define MPQ7920_BUCK1_REG_C 0x05 +#define MPQ7920_BUCK1_REG_D 0x06 +#define MPQ7920_BUCK2_REG_A 0x07 +#define MPQ7920_BUCK2_REG_B 0x08 +#define MPQ7920_BUCK2_REG_C 0x09 +#define MPQ7920_BUCK2_REG_D 0x0a +#define MPQ7920_BUCK3_REG_A 0x0b +#define MPQ7920_BUCK3_REG_B 0x0c +#define MPQ7920_BUCK3_REG_C 0x0d +#define MPQ7920_BUCK3_REG_D 0x0e +#define MPQ7920_BUCK4_REG_A 0x0f +#define MPQ7920_BUCK4_REG_B 0x10 +#define MPQ7920_BUCK4_REG_C 0x11 +#define MPQ7920_BUCK4_REG_D 0x12 +#define MPQ7920_LDO1_REG_A 0x13 +#define MPQ7920_LDO1_REG_B 0x0 +#define MPQ7920_LDO2_REG_A 0x14 +#define MPQ7920_LDO2_REG_B 0x15 +#define MPQ7920_LDO2_REG_C 0x16 +#define MPQ7920_LDO3_REG_A 0x17 +#define MPQ7920_LDO3_REG_B 0x18 +#define MPQ7920_LDO3_REG_C 0x19 +#define MPQ7920_LDO4_REG_A 0x1a +#define MPQ7920_LDO4_REG_B 0x1b +#define MPQ7920_LDO4_REG_C 0x1c +#define MPQ7920_LDO5_REG_A 0x1d +#define MPQ7920_LDO5_REG_B 0x1e +#define MPQ7920_LDO5_REG_C 0x1f +#define MPQ7920_REG_MODE 0x20 +#define MPQ7920_REG_REGULATOR_EN 0x22 + +#define MPQ7920_MASK_VREF 0x7f +#define MPQ7920_MASK_BUCK_ILIM 0xc0 +#define MPQ7920_MASK_LDO_ILIM BIT(6) +#define MPQ7920_MASK_DISCHARGE BIT(5) +#define MPQ7920_MASK_MODE 0xc0 +#define MPQ7920_MASK_SOFTSTART 0x0c +#define MPQ7920_MASK_SWITCH_FREQ 0x30 +#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30 +#define MPQ7920_MASK_DVS_SLEWRATE 0xc0 +#define MPQ7920_MASK_OVP 0x40 +#define MPQ7920_OVP_DISABLE ~(0x40) +#define MPQ7920_DISCHARGE_ON BIT(5) + +#define MPQ7920_REGULATOR_EN_OFFSET 7 + +/* values in mV */ +#define MPQ7920_BUCK_VOLT_MIN 400000 +#define MPQ7920_LDO_VOLT_MIN 650000 +#define MPQ7920_VOLT_MAX 3587500 +#define MPQ7920_VOLT_STEP 12500 + +#endif /* __MPQ7920_H__ */ diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c index af95449d3590..69e6af3cd505 100644 --- a/drivers/regulator/mt6311-regulator.c +++ b/drivers/regulator/mt6311-regulator.c @@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = { /* * I2C driver interface functions */ -static int mt6311_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int mt6311_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct regulator_dev *rdev; @@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = { .name = "mt6311", .of_match_table = of_match_ptr(mt6311_dt_ids), }, - .probe = mt6311_i2c_probe, + .probe_new = mt6311_i2c_probe, .id_table = mt6311_i2c_id, }; diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c index 3d3415839ba2..787ced918372 100644 --- a/drivers/regulator/pv88060-regulator.c +++ b/drivers/regulator/pv88060-regulator.c @@ -279,8 +279,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88060_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88060_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88060 *chip; @@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = { .name = "pv88060", .of_match_table = of_match_ptr(pv88060_dt_ids), }, - .probe = pv88060_i2c_probe, + .probe_new = pv88060_i2c_probe, .id_table = pv88060_i2c_id, }; diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c index b1d0d97ae935..784729ec2182 100644 --- a/drivers/regulator/pv88090-regulator.c +++ b/drivers/regulator/pv88090-regulator.c @@ -272,8 +272,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88090_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88090_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88090 *chip; @@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = { .name = "pv88090", .of_match_table = of_match_ptr(pv88090_dt_ids), }, - .probe = pv88090_i2c_probe, + .probe_new = pv88090_i2c_probe, .id_table = pv88090_i2c_id, }; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 5b4003226484..31f79fda3238 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev, } if (!pdata->dvs_gpio[i]) { - dev_warn(dev, "there is no dvs%d gpio\n", i); + dev_info(dev, "there is no dvs%d gpio\n", i); continue; } diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 51f7e8b74d8c..115f59530852 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 4f2dc5ebffdc..23d288278957 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 12d6b8d2e97e..4abd3ed31f60 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index bf1a3508ebc4..44e4cecbf6de 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip) dev_dbg(chip->dev, "Fault log: FLT_POR\n"); } -static int slg51000_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int slg51000_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct slg51000 *chip; @@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = { .driver = { .name = "slg51000-regulator", }, - .probe = slg51000_i2c_probe, + .probe_new = slg51000_i2c_probe, .id_table = slg51000_i2c_id, }; diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c index 42e03b2c10a0..2222e739e62b 100644 --- a/drivers/regulator/sy8106a-regulator.c +++ b/drivers/regulator/sy8106a-regulator.c @@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = { /* * I2C driver interface functions */ -static int sy8106a_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int sy8106a_i2c_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; struct regulator_dev *rdev; @@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = { .name = "sy8106a", .of_match_table = of_match_ptr(sy8106a_i2c_of_match), }, - .probe = sy8106a_i2c_probe, + .probe_new = sy8106a_i2c_probe, .id_table = sy8106a_i2c_id, }; diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c index 92adb4f3ee19..62d243f3b904 100644 --- a/drivers/regulator/sy8824x.c +++ b/drivers/regulator/sy8824x.c @@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = { .val_bits = 8, }; -static int sy8824_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sy8824_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device_node *np = dev->of_node; @@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = { .name = "sy8824-regulator", .of_match_table = of_match_ptr(sy8824_dt_ids), }, - .probe = sy8824_i2c_probe, + .probe_new = sy8824_i2c_probe, .id_table = sy8824_id, }; module_i2c_driver(sy8824_regulator_driver); diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c index 7b0e38f8d627..0edc83089ba2 100644 --- a/drivers/regulator/tps65132-regulator.c +++ b/drivers/regulator/tps65132-regulator.c @@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = { .wr_table = &tps65132_no_reg_table, }; -static int tps65132_probe(struct i2c_client *client, - const struct i2c_device_id *client_id) +static int tps65132_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct tps65132_regulator *tps; @@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = { .driver = { .name = "tps65132", }, - .probe = tps65132_probe, + .probe_new = tps65132_probe, .id_table = tps65132_id, }; diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c index 9a9ee8188109..cbadb1c99679 100644 --- a/drivers/regulator/vctrl-regulator.c +++ b/drivers/regulator/vctrl-regulator.c @@ -11,10 +11,13 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/regulator/coupler.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> #include <linux/sort.h> +#include "internal.h" + struct vctrl_voltage_range { int min_uV; int max_uV; @@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV) static int vctrl_get_voltage(struct regulator_dev *rdev) { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); - int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); return vctrl_calc_output_voltage(vctrl, ctrl_uV); } @@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); struct regulator *ctrl_reg = vctrl->ctrl_reg; - int orig_ctrl_uV = regulator_get_voltage(ctrl_reg); + int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev); int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV); int ret; if (req_min_uV >= uV || !vctrl->ovp_threshold) /* voltage rising or no OVP */ - return regulator_set_voltage( - ctrl_reg, + return regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl_calc_ctrl_voltage(vctrl, req_min_uV), - vctrl_calc_ctrl_voltage(vctrl, req_max_uV)); + vctrl_calc_ctrl_voltage(vctrl, req_max_uV), + PM_SUSPEND_ON); while (uV > req_min_uV) { int max_drop_uV = (uV * vctrl->ovp_threshold) / 100; @@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, next_uV = max_t(int, req_min_uV, uV - max_drop_uV); next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV); - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + next_ctrl_uV, next_ctrl_uV, - next_ctrl_uV); + PM_SUSPEND_ON); if (ret) goto err; @@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, err: /* Try to go back to original voltage */ - regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV); + regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV, + PM_SUSPEND_ON); return ret; } @@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, if (selector >= vctrl->sel || !vctrl->ovp_threshold) { /* voltage rising or no OVP */ - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[selector].ctrl, vctrl->vtable[selector].ctrl, - vctrl->vtable[selector].ctrl); + PM_SUSPEND_ON); if (!ret) vctrl->sel = selector; @@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, else next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel; - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl->vtable[next_sel].ctrl, - vctrl->vtable[next_sel].ctrl); + vctrl->vtable[next_sel].ctrl, + PM_SUSPEND_ON); if (ret) { dev_err(&rdev->dev, "failed to set control voltage to %duV\n", @@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, err: if (vctrl->sel != orig_sel) { /* Try to go back to original voltage */ - if (!regulator_set_voltage(ctrl_reg, + if (!regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[orig_sel].ctrl, vctrl->vtable[orig_sel].ctrl, - vctrl->vtable[orig_sel].ctrl)) + PM_SUSPEND_ON)) vctrl->sel = orig_sel; else dev_warn(&rdev->dev, @@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev) if (ret) return ret; - ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); if (ctrl_uV < 0) { dev_err(&pdev->dev, "failed to get control voltage\n"); return ctrl_uV; diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c new file mode 100644 index 000000000000..6d5ae25d08d1 --- /dev/null +++ b/drivers/regulator/vqmmc-ipq4019-regulator.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com> +// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr> +// +// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +static const unsigned int ipq4019_vmmc_voltages[] = { + 1500000, 1800000, 2500000, 3000000, +}; + +static const struct regulator_ops ipq4019_regulator_voltage_ops = { + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_desc vmmc_regulator = { + .name = "vmmcq", + .ops = &ipq4019_regulator_voltage_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .volt_table = ipq4019_vmmc_voltages, + .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages), + .vsel_reg = 0, + .vsel_mask = 0x3, +}; + +static const struct regmap_config ipq4019_vmmcq_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, +}; + +static int ipq4019_regulator_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regulator_init_data *init_data; + struct regulator_config cfg = {}; + struct regulator_dev *rdev; + struct resource *res; + struct regmap *rmap; + void __iomem *base; + + init_data = of_get_regulator_init_data(dev, dev->of_node, + &vmmc_regulator); + if (!init_data) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config); + if (IS_ERR(rmap)) + return PTR_ERR(rmap); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.of_node = dev->of_node; + cfg.regmap = rmap; + + rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator: %ld\n", + PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + platform_set_drvdata(pdev, rdev); + + return 0; +} + +static const struct of_device_id regulator_ipq4019_of_match[] = { + { .compatible = "qcom,vqmmc-ipq4019-regulator", }, + {}, +}; + +static struct platform_driver ipq4019_regulator_driver = { + .probe = ipq4019_regulator_probe, + .driver = { + .name = "vqmmc-ipq4019-regulator", + .of_match_table = of_match_ptr(regulator_ipq4019_of_match), + }, +}; +module_platform_driver(ipq4019_regulator_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>"); +MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator"); diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index df2829dd55ad..2ecd8752b088 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time) save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - -#ifdef CONFIG_X86 - if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 == 0x17) || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)), - RTC_FREQ_SELECT); - save_freq_select &= ~RTC_DIV_RESET2; - } else - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), - RTC_FREQ_SELECT); -#else - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT); -#endif + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 5249fc99fd5f..9135e2101752 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data) irqen = irqsta & ~RTC_IRQ_EN_AL; mutex_lock(&rtc->lock); if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, - irqen) < 0) + irqen) == 0) mtk_rtc_write_trigger(rtc); mutex_unlock(&rtc->lock); @@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM); mutex_unlock(&rtc->lock); - tm->tm_sec = data[RTC_OFFSET_SEC]; - tm->tm_min = data[RTC_OFFSET_MIN]; - tm->tm_hour = data[RTC_OFFSET_HOUR]; - tm->tm_mday = data[RTC_OFFSET_DOM]; - tm->tm_mon = data[RTC_OFFSET_MTH]; - tm->tm_year = data[RTC_OFFSET_YEAR]; + tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK; + tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK; + tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK; + tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK; + tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK; + tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK; tm->tm_year += RTC_MIN_YEAR_OFFSET; tm->tm_mon--; @@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) tm->tm_year -= RTC_MIN_YEAR_OFFSET; tm->tm_mon++; - data[RTC_OFFSET_SEC] = tm->tm_sec; - data[RTC_OFFSET_MIN] = tm->tm_min; - data[RTC_OFFSET_HOUR] = tm->tm_hour; - data[RTC_OFFSET_DOM] = tm->tm_mday; - data[RTC_OFFSET_MTH] = tm->tm_mon; - data[RTC_OFFSET_YEAR] = tm->tm_year; - mutex_lock(&rtc->lock); + ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC, + data, RTC_OFFSET_COUNT); + if (ret < 0) + goto exit; + + data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) | + (tm->tm_sec & RTC_AL_SEC_MASK)); + data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) | + (tm->tm_min & RTC_AL_MIN_MASK)); + data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) | + (tm->tm_hour & RTC_AL_HOU_MASK)); + data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) | + (tm->tm_mday & RTC_AL_DOM_MASK)); + data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) | + (tm->tm_mon & RTC_AL_MTH_MASK)); + data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) | + (tm->tm_year & RTC_AL_YEA_MASK)); + if (alm->enabled) { ret = regmap_bulk_write(rtc->regmap, rtc->addr_base + RTC_AL_SEC, diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 8dcd20b34dde..852f5f3b3592 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node) CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc", sun50i_h6_rtc_clk_init); +/* + * The R40 user manual is self-conflicting on whether the prescaler is + * fixed or configurable. The clock diagram shows it as fixed, but there + * is also a configurable divider in the RTC block. + */ +static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = { + .rc_osc_rate = 16000000, + .fixed_prescaler = 512, +}; +static void __init sun8i_r40_rtc_clk_init(struct device_node *node) +{ + sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data); +} +CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc", + sun8i_r40_rtc_clk_init); + static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = { .rc_osc_rate = 32000, .has_out_clk = 1, diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a1915061932e..5256e3ce84e5 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev) drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) return -ENODEV; - /* (re-)init queue's state machine */ - ap_queue_reinit_state(to_ap_queue(dev)); } /* Add queue/card to list of active queues/cards */ diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 433b7b64368d..bb35ba4a8d24 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); -void ap_queue_reinit_state(struct ap_queue *aq); +void ap_queue_init_state(struct ap_queue *aq); struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, int comp_device_type, unsigned int functions); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index dad2be333d82..37c3bdc3642d 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->ap_dev.device.type = &ap_queue_type; aq->ap_dev.device_type = device_type; aq->qid = qid; - aq->state = AP_STATE_RESET_START; + aq->state = AP_STATE_UNBOUND; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->list); @@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_reinit_state(struct ap_queue *aq) +void ap_queue_init_state(struct ap_queue *aq) { spin_lock_bh(&aq->lock); aq->state = AP_STATE_RESET_START; ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); } +EXPORT_SYMBOL(ap_queue_init_state); diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index c1db64a2db21..110fe9d0cb91 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prepparm = (struct iprepparm *) prepcblk->rpl_parmb; /* do some plausibility checks on the key block */ - if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || - prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) { + if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || + prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) { DEBUG_ERR("%s reply with invalid or unknown key block\n", __func__); rc = -EIO; diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index c50f3e86cc74..7cbb384ec535 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2A_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index 35c7c6672713..c78c0d119806 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_rapq(aq->qid); rc = zcrypt_cex2c_rng_supported(aq); if (rc < 0) { zcrypt_queue_free(zq); @@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) else zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, MSGTYPE06_VARIANT_NORNG); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2C_CLEANUP_TIME; aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 442e3d6162f7..6fabc906114c 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX4_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 90cf4691b8c3..a7881f8eb05e 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)" config BLK_DEV_SD tristate "SCSI disk support" depends on SCSI + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY ---help--- If you want to use SCSI hard disks, Fibre Channel disks, Serial ATA (SATA) or Parallel ATA (PATA) hard disks, diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 8ef150dfb6f7..b60795893994 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 1f55b9e4e74a..1b88a3b53eee 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) int vnic_dev_hang_notify(struct vnic_dev *vdev) { - u64 a0, a1; + u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); } int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { - u64 a0, a1; + u64 a[2] = {}; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; - err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait); if (err) return err; for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = ((u8 *)&a0)[i]; + mac_addr[i] = ((u8 *)&a)[i]; return 0; } @@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait); if (err) pr_err("Can't add addr [%pM], %d\n", addr, err); } void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait); if (err) pr_err("Can't del addr [%pM], %d\n", addr, err); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cea625906440..902b649fc8ef 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2211,8 +2211,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer u8 type; int ret = 0; - if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { + sdkp->protection_type = 0; return ret; + } type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ @@ -2956,15 +2958,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) q->limits.zoned = BLK_ZONED_HM; } else { sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) + if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) { /* Host-aware */ q->limits.zoned = BLK_ZONED_HA; - else + } else { /* - * Treat drive-managed devices as - * regular block devices. + * Treat drive-managed devices and host-aware devices + * with partitions as regular block devices. */ q->limits.zoned = BLK_ZONED_NONE; + } } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index f8faf8b3d965..fb41636519ee 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device, */ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); /* + * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. */ - host->nr_hw_queues = num_present_cpus(); + if (!dev_is_ide) + host->nr_hw_queues = num_present_cpus(); /* * Set the error handler work queue. diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c index 5823f5b67d16..3f0261d53ad9 100644 --- a/drivers/soc/amlogic/meson-ee-pwrc.c +++ b/drivers/soc/amlogic/meson-ee-pwrc.c @@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, struct meson_ee_pwrc *pwrc, struct meson_ee_pwrc_domain *dom) { + int ret; + dom->pwrc = pwrc; dom->num_rstc = dom->desc.reset_names_count; dom->num_clks = dom->desc.clk_names_count; @@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, * prepare/enable counters won't be in sync. */ if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) { - int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); + ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); if (ret) return ret; - pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false); - } else - pm_genpd_init(&dom->base, NULL, - (dom->desc.get_power ? - dom->desc.get_power(dom) : true)); + ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov, + false); + if (ret) + return ret; + } else { + ret = pm_genpd_init(&dom->base, NULL, + (dom->desc.get_power ? + dom->desc.get_power(dom) : true)); + if (ret) + return ret; + } return 0; } @@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev) pwrc->xlate.domains[i] = &dom->base; } - of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); - - return 0; + return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); } static void meson_ee_pwrc_shutdown(struct platform_device *pdev) diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index a9ffff3277c7..a5069394cd61 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -9,7 +9,7 @@ #include <linux/interrupt.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <asm/sifive_l2_cache.h> +#include <soc/sifive/sifive_l2_cache.h> #define SIFIVE_L2_DIRECCFIX_LOW 0x100 #define SIFIVE_L2_DIRECCFIX_HIGH 0x104 diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig index cf545f428d03..4486e055794c 100644 --- a/drivers/soc/ti/Kconfig +++ b/drivers/soc/ti/Kconfig @@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS called ti_sci_pm_domains. Note this is needed early in boot before rootfs may be available. +config TI_K3_RINGACC + bool "K3 Ring accelerator Sub System" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_INTA_IRQCHIP + help + Say y here to support the K3 Ring accelerator module. + The Ring Accelerator (RINGACC or RA) provides hardware acceleration + to enable straightforward passing of work between a producer + and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs + If unsure, say N. + endif # SOC_TI config TI_SCI_INTA_MSI_DOMAIN diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile index 788b5cd1e180..bec827937a5f 100644 --- a/drivers/soc/ti/Makefile +++ b/drivers/soc/ti/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o +obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c new file mode 100644 index 000000000000..5fb2ee2ac978 --- /dev/null +++ b/drivers/soc/ti/k3-ringacc.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI K3 NAVSS Ring Accelerator subsystem driver + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> + +static LIST_HEAD(k3_ringacc_list); +static DEFINE_MUTEX(k3_ringacc_list_lock); + +#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0) + +/** + * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region + * + * @resv_16: Reserved + * @db: Ring Doorbell Register + * @resv_4: Reserved + * @occ: Ring Occupancy Register + * @indx: Ring Current Index Register + * @hwocc: Ring Hardware Occupancy Register + * @hwindx: Ring Hardware Current Index Register + */ +struct k3_ring_rt_regs { + u32 resv_16[4]; + u32 db; + u32 resv_4[1]; + u32 occ; + u32 indx; + u32 hwocc; + u32 hwindx; +}; + +#define K3_RINGACC_RT_REGS_STEP 0x1000 + +/** + * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region + * + * @head_data: Ring Head Entry Data Registers + * @tail_data: Ring Tail Entry Data Registers + * @peek_head_data: Ring Peek Head Entry Data Regs + * @peek_tail_data: Ring Peek Tail Entry Data Regs + */ +struct k3_ring_fifo_regs { + u32 head_data[128]; + u32 tail_data[128]; + u32 peek_head_data[128]; + u32 peek_tail_data[128]; +}; + +/** + * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region + * + * @revision: Revision Register + * @config: Config Register + */ +struct k3_ringacc_proxy_gcfg_regs { + u32 revision; + u32 config; +}; + +#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0) + +/** + * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region + * + * @control: Proxy Control Register + * @status: Proxy Status Register + * @resv_512: Reserved + * @data: Proxy Data Register + */ +struct k3_ringacc_proxy_target_regs { + u32 control; + u32 status; + u8 resv_512[504]; + u32 data[128]; +}; + +#define K3_RINGACC_PROXY_TARGET_STEP 0x1000 +#define K3_RINGACC_PROXY_NOT_USED (-1) + +enum k3_ringacc_proxy_access_mode { + PROXY_ACCESS_MODE_HEAD = 0, + PROXY_ACCESS_MODE_TAIL = 1, + PROXY_ACCESS_MODE_PEEK_HEAD = 2, + PROXY_ACCESS_MODE_PEEK_TAIL = 3, +}; + +#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U) +#define K3_RINGACC_FIFO_REGS_STEP 0x1000 +#define K3_RINGACC_MAX_DB_RING_CNT (127U) + +struct k3_ring_ops { + int (*push_tail)(struct k3_ring *ring, void *elm); + int (*push_head)(struct k3_ring *ring, void *elm); + int (*pop_tail)(struct k3_ring *ring, void *elm); + int (*pop_head)(struct k3_ring *ring, void *elm); +}; + +/** + * struct k3_ring - RA Ring descriptor + * + * @rt: Ring control/status registers + * @fifos: Ring queues registers + * @proxy: Ring Proxy Datapath registers + * @ring_mem_dma: Ring buffer dma address + * @ring_mem_virt: Ring buffer virt address + * @ops: Ring operations + * @size: Ring size in elements + * @elm_size: Size of the ring element + * @mode: Ring mode + * @flags: flags + * @free: Number of free elements + * @occ: Ring occupancy + * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING) + * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING) + * @ring_id: Ring Id + * @parent: Pointer on struct @k3_ringacc + * @use_count: Use count for shared rings + * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY) + */ +struct k3_ring { + struct k3_ring_rt_regs __iomem *rt; + struct k3_ring_fifo_regs __iomem *fifos; + struct k3_ringacc_proxy_target_regs __iomem *proxy; + dma_addr_t ring_mem_dma; + void *ring_mem_virt; + struct k3_ring_ops *ops; + u32 size; + enum k3_ring_size elm_size; + enum k3_ring_mode mode; + u32 flags; +#define K3_RING_FLAG_BUSY BIT(1) +#define K3_RING_FLAG_SHARED BIT(2) + u32 free; + u32 occ; + u32 windex; + u32 rindex; + u32 ring_id; + struct k3_ringacc *parent; + u32 use_count; + int proxy_id; +}; + +/** + * struct k3_ringacc - Rings accelerator descriptor + * + * @dev: pointer on RA device + * @proxy_gcfg: RA proxy global config registers + * @proxy_target_base: RA proxy datapath region + * @num_rings: number of ring in RA + * @rings_inuse: bitfield for ring usage tracking + * @rm_gp_range: general purpose rings range from tisci + * @dma_ring_reset_quirk: DMA reset w/a enable + * @num_proxies: number of RA proxies + * @proxy_inuse: bitfield for proxy usage tracking + * @rings: array of rings descriptors (struct @k3_ring) + * @list: list of RAs in the system + * @req_lock: protect rings allocation + * @tisci: pointer ti-sci handle + * @tisci_ring_ops: ti-sci rings ops + * @tisci_dev_id: ti-sci device id + */ +struct k3_ringacc { + struct device *dev; + struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg; + void __iomem *proxy_target_base; + u32 num_rings; /* number of rings in Ringacc module */ + unsigned long *rings_inuse; + struct ti_sci_resource *rm_gp_range; + + bool dma_ring_reset_quirk; + u32 num_proxies; + unsigned long *proxy_inuse; + + struct k3_ring *rings; + struct list_head list; + struct mutex req_lock; /* protect rings allocation */ + + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_ringacc_ops *tisci_ring_ops; + u32 tisci_dev_id; +}; + +static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring) +{ + return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES - + (4 << ring->elm_size); +} + +static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx) +{ + return (ring->ring_mem_virt + idx * (4 << ring->elm_size)); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_ring_ops = { + .push_tail = k3_ringacc_ring_push_mem, + .pop_head = k3_ringacc_ring_pop_mem, +}; + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_msg_ops = { + .push_tail = k3_ringacc_ring_push_io, + .push_head = k3_ringacc_ring_push_head_io, + .pop_tail = k3_ringacc_ring_pop_tail_io, + .pop_head = k3_ringacc_ring_pop_io, +}; + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_proxy_ops = { + .push_tail = k3_ringacc_ring_push_tail_proxy, + .push_head = k3_ringacc_ring_push_head_proxy, + .pop_tail = k3_ringacc_ring_pop_tail_proxy, + .pop_head = k3_ringacc_ring_pop_head_proxy, +}; + +static void k3_ringacc_ring_dump(struct k3_ring *ring) +{ + struct device *dev = ring->parent->dev; + + dev_dbg(dev, "dump ring: %d\n", ring->ring_id); + dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt, + &ring->ring_mem_dma); + dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n", + ring->elm_size, ring->size, ring->mode, ring->proxy_id); + + dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db)); + dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ)); + dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx)); + dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc)); + dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx)); + + if (ring->ring_mem_virt) + print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE, + 16, 1, ring->ring_mem_virt, 16 * 8, false); +} + +struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, + int id, u32 flags) +{ + int proxy_id = K3_RINGACC_PROXY_NOT_USED; + + mutex_lock(&ringacc->req_lock); + + if (id == K3_RINGACC_RING_ID_ANY) { + /* Request for any general purpose ring */ + struct ti_sci_resource_desc *gp_rings = + &ringacc->rm_gp_range->desc[0]; + unsigned long size; + + size = gp_rings->start + gp_rings->num; + id = find_next_zero_bit(ringacc->rings_inuse, size, + gp_rings->start); + if (id == size) + goto error; + } else if (id < 0) { + goto error; + } + + if (test_bit(id, ringacc->rings_inuse) && + !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) + goto error; + else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) + goto out; + + if (flags & K3_RINGACC_RING_USE_PROXY) { + proxy_id = find_next_zero_bit(ringacc->proxy_inuse, + ringacc->num_proxies, 0); + if (proxy_id == ringacc->num_proxies) + goto error; + } + + if (proxy_id != K3_RINGACC_PROXY_NOT_USED) { + set_bit(proxy_id, ringacc->proxy_inuse); + ringacc->rings[id].proxy_id = proxy_id; + dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id, + proxy_id); + } else { + dev_dbg(ringacc->dev, "Giving ring#%d\n", id); + } + + set_bit(id, ringacc->rings_inuse); +out: + ringacc->rings[id].use_count++; + mutex_unlock(&ringacc->req_lock); + return &ringacc->rings[id]; + +error: + mutex_unlock(&ringacc->req_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(k3_ringacc_request_ring); + +static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + ring->size, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + k3_ringacc_ring_reset_sci(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset); + +static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring, + enum k3_ring_mode mode) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + mode, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + if (!ring->parent->dma_ring_reset_quirk) + goto reset; + + if (!occ) + occ = readl(&ring->rt->occ); + + if (occ) { + u32 db_ring_cnt, db_ring_cnt_cur; + + dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__, + ring->ring_id, occ); + /* TI-SCI ring reset */ + k3_ringacc_ring_reset_sci(ring); + + /* + * Setup the ring in ring/doorbell mode (if not already in this + * mode) + */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci( + ring, K3_RINGACC_RING_MODE_RING); + /* + * Ring the doorbell 2**22 – ringOcc times. + * This will wrap the internal UDMAP ring state occupancy + * counter (which is 21-bits wide) to 0. + */ + db_ring_cnt = (1U << 22) - occ; + + while (db_ring_cnt != 0) { + /* + * Ring the doorbell with the maximum count each + * iteration if possible to minimize the total + * of writes + */ + if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT) + db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT; + else + db_ring_cnt_cur = db_ring_cnt; + + writel(db_ring_cnt_cur, &ring->rt->db); + db_ring_cnt -= db_ring_cnt_cur; + } + + /* Restore the original ring mode (if not ring mode) */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); + } + +reset: + /* Reset the ring */ + k3_ringacc_ring_reset(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma); + +static void k3_ringacc_ring_free_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +int k3_ringacc_ring_free(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc; + + if (!ring) + return -EINVAL; + + ringacc = ring->parent; + + dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags); + + if (!test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + mutex_lock(&ringacc->req_lock); + + if (--ring->use_count) + goto out; + + if (!(ring->flags & K3_RING_FLAG_BUSY)) + goto no_init; + + k3_ringacc_ring_free_sci(ring); + + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, ring->ring_mem_dma); + ring->flags = 0; + ring->ops = NULL; + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { + clear_bit(ring->proxy_id, ringacc->proxy_inuse); + ring->proxy = NULL; + ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + +no_init: + clear_bit(ring->ring_id, ringacc->rings_inuse); + +out: + mutex_unlock(&ringacc->req_lock); + return 0; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_free); + +u32 k3_ringacc_get_ring_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->ring_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id); + +u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->parent->tisci_dev_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id); + +int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) +{ + int irq_num; + + if (!ring) + return -EINVAL; + + irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id); + if (irq_num <= 0) + irq_num = -EINVAL; + return irq_num; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num); + +static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + u32 ring_idx; + int ret; + + if (!ringacc->tisci) + return -EINVAL; + + ring_idx = ring->ring_id; + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring_idx, + lower_32_bits(ring->ring_mem_dma), + upper_32_bits(ring->ring_mem_dma), + ring->size, + ring->mode, + ring->elm_size, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", + ret, ring_idx); + + return ret; +} + +int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret = 0; + + if (!ring || !cfg) + return -EINVAL; + if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 || + cfg->mode >= K3_RINGACC_RING_MODE_INVALID || + cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK || + !test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE && + ring->proxy_id == K3_RINGACC_PROXY_NOT_USED && + cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) { + dev_err(ringacc->dev, + "Message mode must use proxy for %u element size\n", + 4 << ring->elm_size); + return -EINVAL; + } + + /* + * In case of shared ring only the first user (master user) can + * configure the ring. The sequence should be by the client: + * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user + * k3_ringacc_ring_cfg(ring, cfg); # master configuration + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + */ + if (ring->use_count != 1) + return 0; + + ring->size = cfg->size; + ring->elm_size = cfg->elm_size; + ring->mode = cfg->mode; + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) + ring->proxy = ringacc->proxy_target_base + + ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; + + switch (ring->mode) { + case K3_RINGACC_RING_MODE_RING: + ring->ops = &k3_ring_mode_ring_ops; + break; + case K3_RINGACC_RING_MODE_MESSAGE: + if (ring->proxy) + ring->ops = &k3_ring_mode_proxy_ops; + else + ring->ops = &k3_ring_mode_msg_ops; + break; + default: + ring->ops = NULL; + ret = -EINVAL; + goto err_free_proxy; + }; + + ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + &ring->ring_mem_dma, GFP_KERNEL); + if (!ring->ring_mem_virt) { + dev_err(ringacc->dev, "Failed to alloc ring mem\n"); + ret = -ENOMEM; + goto err_free_ops; + } + + ret = k3_ringacc_ring_cfg_sci(ring); + + if (ret) + goto err_free_mem; + + ring->flags |= K3_RING_FLAG_BUSY; + ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ? + K3_RING_FLAG_SHARED : 0; + + k3_ringacc_ring_dump(ring); + + return 0; + +err_free_mem: + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, + ring->ring_mem_dma); +err_free_ops: + ring->ops = NULL; +err_free_proxy: + ring->proxy = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg); + +u32 k3_ringacc_ring_get_size(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return ring->size; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size); + +u32 k3_ringacc_ring_get_free(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->free) + ring->free = ring->size - readl(&ring->rt->occ); + + return ring->free; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free); + +u32 k3_ringacc_ring_get_occ(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return readl(&ring->rt->occ); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ); + +u32 k3_ringacc_ring_is_full(struct k3_ring *ring) +{ + return !k3_ringacc_ring_get_free(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full); + +enum k3_ringacc_access_mode { + K3_RINGACC_ACCESS_MODE_PUSH_HEAD, + K3_RINGACC_ACCESS_MODE_POP_HEAD, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL, + K3_RINGACC_ACCESS_MODE_POP_TAIL, + K3_RINGACC_ACCESS_MODE_PEEK_HEAD, + K3_RINGACC_ACCESS_MODE_PEEK_TAIL, +}; + +#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16) +#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24) +static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring, + enum k3_ringacc_proxy_access_mode mode) +{ + u32 val; + + val = ring->ring_id; + val |= K3_RINGACC_PROXY_MODE(mode); + val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size); + writel(val, &ring->proxy->control); + return 0; +} + +static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + ptr = (void __iomem *)&ring->proxy->data; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free, + ring->occ); + return 0; +} + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + ptr = (void __iomem *)&ring->fifos->head_data; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + ptr = (void __iomem *)&ring->fifos->tail_data; + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free, + ring->windex, ring->occ, ring->rindex); + return 0; +} + +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex); + + memcpy(elem_ptr, elem, (4 << ring->elm_size)); + + ring->windex = (ring->windex + 1) % ring->size; + ring->free--; + writel(1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n", + ring->free, ring->windex); + + return 0; +} + +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex); + + memcpy(elem, elem_ptr, (4 << ring->elm_size)); + + ring->rindex = (ring->rindex + 1) % ring->size; + ring->occ--; + writel(-1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n", + ring->occ, ring->rindex, elem_ptr); + return 0; +} + +int k3_ringacc_ring_push(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free, + ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_tail) + ret = ring->ops->push_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push); + +int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n", + ring->free, ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_head) + ret = ring->ops->push_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head); + +int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_head) + ret = ring->ops->pop_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop); + +int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_tail) + ret = ring->ops->pop_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail); + +struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, + const char *property) +{ + struct device_node *ringacc_np; + struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER); + struct k3_ringacc *entry; + + ringacc_np = of_parse_phandle(np, property, 0); + if (!ringacc_np) + return ERR_PTR(-ENODEV); + + mutex_lock(&k3_ringacc_list_lock); + list_for_each_entry(entry, &k3_ringacc_list, list) + if (entry->dev->of_node == ringacc_np) { + ringacc = entry; + break; + } + mutex_unlock(&k3_ringacc_list_lock); + of_node_put(ringacc_np); + + return ringacc; +} +EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle); + +static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc) +{ + struct device_node *node = ringacc->dev->of_node; + struct device *dev = ringacc->dev; + struct platform_device *pdev = to_platform_device(dev); + int ret; + + if (!node) { + dev_err(dev, "device tree info unavailable\n"); + return -ENODEV; + } + + ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings); + if (ret) { + dev_err(dev, "ti,num-rings read failure %d\n", ret); + return ret; + } + + ringacc->dma_ring_reset_quirk = + of_property_read_bool(node, "ti,dma-ring-reset-quirk"); + + ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci"); + if (IS_ERR(ringacc->tisci)) { + ret = PTR_ERR(ringacc->tisci); + if (ret != -EPROBE_DEFER) + dev_err(dev, "ti,sci read fail %d\n", ret); + ringacc->tisci = NULL; + return ret; + } + + ret = of_property_read_u32(node, "ti,sci-dev-id", + &ringacc->tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read fail %d\n", ret); + return ret; + } + + pdev->id = ringacc->tisci_dev_id; + + ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev, + ringacc->tisci_dev_id, + "ti,sci-rm-range-gp-rings"); + if (IS_ERR(ringacc->rm_gp_range)) { + dev_err(dev, "Failed to allocate MSI interrupts\n"); + return PTR_ERR(ringacc->rm_gp_range); + } + + return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev, + ringacc->rm_gp_range); +} + +static int k3_ringacc_probe(struct platform_device *pdev) +{ + struct k3_ringacc *ringacc; + void __iomem *base_fifo, *base_rt; + struct device *dev = &pdev->dev; + struct resource *res; + int ret, i; + + ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); + if (!ringacc) + return -ENOMEM; + + ringacc->dev = dev; + mutex_init(&ringacc->req_lock); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + ret = k3_ringacc_probe_dt(ringacc); + if (ret) + return ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt"); + base_rt = devm_ioremap_resource(dev, res); + if (IS_ERR(base_rt)) + return PTR_ERR(base_rt); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos"); + base_fifo = devm_ioremap_resource(dev, res); + if (IS_ERR(base_fifo)) + return PTR_ERR(base_fifo); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg"); + ringacc->proxy_gcfg = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_gcfg)) + return PTR_ERR(ringacc->proxy_gcfg); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "proxy_target"); + ringacc->proxy_target_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_target_base)) + return PTR_ERR(ringacc->proxy_target_base); + + ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) & + K3_RINGACC_PROXY_CFG_THREADS_MASK; + + ringacc->rings = devm_kzalloc(dev, + sizeof(*ringacc->rings) * + ringacc->num_rings, + GFP_KERNEL); + ringacc->rings_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_rings), + sizeof(unsigned long), GFP_KERNEL); + ringacc->proxy_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_proxies), + sizeof(unsigned long), GFP_KERNEL); + + if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) + return -ENOMEM; + + for (i = 0; i < ringacc->num_rings; i++) { + ringacc->rings[i].rt = base_rt + + K3_RINGACC_RT_REGS_STEP * i; + ringacc->rings[i].fifos = base_fifo + + K3_RINGACC_FIFO_REGS_STEP * i; + ringacc->rings[i].parent = ringacc; + ringacc->rings[i].ring_id = i; + ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + dev_set_drvdata(dev, ringacc); + + ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; + + mutex_lock(&k3_ringacc_list_lock); + list_add_tail(&ringacc->list, &k3_ringacc_list); + mutex_unlock(&k3_ringacc_list_lock); + + dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", + ringacc->num_rings, + ringacc->rm_gp_range->desc[0].start, + ringacc->rm_gp_range->desc[0].num, + ringacc->tisci_dev_id); + dev_info(dev, "dma-ring-reset-quirk: %s\n", + ringacc->dma_ring_reset_quirk ? "enabled" : "disabled"); + dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n", + readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies); + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id k3_ringacc_of_match[] = { + { .compatible = "ti,am654-navss-ringacc", }, + {}, +}; + +static struct platform_driver k3_ringacc_driver = { + .probe = k3_ringacc_probe, + .driver = { + .name = "k3-ringacc", + .of_match_table = k3_ringacc_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(k3_ringacc_driver); diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 378369d9364a..e9ece45d7a33 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc) ret = rproc_boot(m3_ipc->rproc); if (ret) dev_err(dev, "rproc_boot failed\n"); + else + m3_ipc_state = m3_ipc; do_exit(0); } @@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) goto err_put_rproc; } - m3_ipc_state = m3_ipc; - return 0; err_put_rproc: diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 870f7797b56b..d6ed0c355954 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI This controller does not support generic SPI messages. It only supports the high-level SPI memory interface. +config SPI_HISI_SFC_V3XX + tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets" + depends on (ARM64 && ACPI) || COMPILE_TEST + depends on HAS_IOMEM + select CONFIG_MTD_SPI_NOR + help + This enables support for HiSilicon v3xx SPI-NOR flash controller + found in hi16xx chipsets. + config SPI_NXP_FLEXSPI tristate "NXP Flex SPI controller" depends on ARCH_LAYERSCAPE || HAS_IOMEM diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index bb49c9e6d0a0..9b65ec5afc5e 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o +obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o obj-$(CONFIG_SPI_IMX) += spi-imx.o obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 56f0ca361deb..013458cabe3c 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master, master->dma_tx = dma_request_chan(dev, "tx"); if (IS_ERR(master->dma_tx)) { err = PTR_ERR(master->dma_tx); - if (err == -EPROBE_DEFER) { - dev_warn(dev, "no DMA channel available at the moment\n"); - goto error_clear; - } - dev_err(dev, - "DMA TX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + if (err != -EPROBE_DEFER) + dev_err(dev, "No TX DMA channel, DMA is disabled\n"); goto error_clear; } - /* - * No reason to check EPROBE_DEFER here since we have already requested - * tx channel. If it fails here, it's for another reason. - */ - master->dma_rx = dma_request_slave_channel(dev, "rx"); - - if (!master->dma_rx) { - dev_err(dev, - "DMA RX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + master->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(master->dma_rx)) { + err = PTR_ERR(master->dma_rx); + /* + * No reason to check EPROBE_DEFER here since we have already + * requested tx channel. + */ + dev_err(dev, "No RX DMA channel, DMA is disabled\n"); goto error; } @@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master, return 0; error: - if (master->dma_rx) + if (!IS_ERR(master->dma_rx)) dma_release_channel(master->dma_rx); if (!IS_ERR(master->dma_tx)) dma_release_channel(master->dma_tx); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 85bad70f59e3..23d295f36c80 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev, name = qspi_irq_tab[val].irq_name; if (qspi_irq_tab[val].irq_source == SINGLE_L2) { /* get the l2 interrupts */ - irq = platform_get_irq_byname(pdev, name); + irq = platform_get_irq_byname_optional(pdev, name); } else if (!num_ints && soc_intc) { /* all mspi, bspi intrs muxed to one L1 intr */ irq = platform_get_irq(pdev, 0); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index fb61a620effc..11c235879bb7 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -68,7 +68,7 @@ #define BCM2835_SPI_FIFO_SIZE 64 #define BCM2835_SPI_FIFO_SIZE_3_4 48 #define BCM2835_SPI_DMA_MIN_LENGTH 96 -#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */ +#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */ #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | SPI_NO_CS | SPI_3WIRE) @@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr, } } -static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, - struct bcm2835_spi *bs) +static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, + struct bcm2835_spi *bs) { struct dma_slave_config slave_config; const __be32 *addr; @@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); if (!addr) { dev_err(dev, "could not get DMA-register address - not using dma mode\n"); - goto err; + /* Fall back to interrupt mode */ + return 0; } dma_reg_base = be32_to_cpup(addr); /* get tx/rx dma */ - ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); - if (!ctlr->dma_tx) { + ctlr->dma_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(ctlr->dma_tx)) { dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_tx); + ctlr->dma_tx = NULL; goto err; } - ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); - if (!ctlr->dma_rx) { + ctlr->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(ctlr->dma_rx)) { dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_rx); + ctlr->dma_rx = NULL; goto err_release; } @@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, /* all went well, so set can_dma */ ctlr->can_dma = bcm2835_spi_can_dma; - return; + return 0; err_config: dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", @@ -1005,7 +1010,14 @@ err_config: err_release: bcm2835_dma_release(ctlr, bs); err: - return; + /* + * Only report error for deferred probing, otherwise fall back to + * interrupt mode + */ + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, @@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev) bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); - dev_err(&pdev->dev, "could not get clk: %d\n", err); + if (err == -EPROBE_DEFER) + dev_dbg(&pdev->dev, "could not get clk: %d\n", err); + else + dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_controller_put; } @@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev) clk_prepare_enable(bs->clk); - bcm2835_dma_init(ctlr, &pdev->dev, bs); + err = bcm2835_dma_init(ctlr, &pdev->dev, bs); + if (err) + goto out_clk_disable; /* initialise the hardware with the default polarities */ bcm2835_wr(bs, BCM2835_SPI_CS, @@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev) dev_name(&pdev->dev), ctlr); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } err = devm_spi_register_controller(&pdev->dev, ctlr); if (err) { dev_err(&pdev->dev, "could not register SPI controller: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); return 0; +out_dma_release: + bcm2835_dma_release(ctlr, bs); out_clk_disable: clk_disable_unprepare(bs->clk); out_controller_put: diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index d84e22dd6f9f..68491a8bf7b5 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable) int spi_bitbang_init(struct spi_bitbang *bitbang) { struct spi_master *master = bitbang->master; + bool custom_cs; - if (!master || !bitbang->chipselect) + if (!master) + return -EINVAL; + /* + * We only need the chipselect callback if we are actually using it. + * If we just use GPIO descriptors, it is surplus. If the + * SPI_MASTER_GPIO_SS flag is set, we always need to call the + * driver-specific chipselect routine. + */ + custom_cs = (!master->use_gpio_descriptors || + (master->flags & SPI_MASTER_GPIO_SS)); + + if (custom_cs && !bitbang->chipselect) return -EINVAL; mutex_init(&bitbang->lock); @@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang) master->prepare_transfer_hardware = spi_bitbang_prepare_hardware; master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware; master->transfer_one = spi_bitbang_transfer_one; - master->set_cs = spi_bitbang_set_cs; + /* + * When using GPIO descriptors, the ->set_cs() callback doesn't even + * get called unless SPI_MASTER_GPIO_SS is set. + */ + if (custom_cs) + master->set_cs = spi_bitbang_set_cs; if (!bitbang->txrx_bufs) { bitbang->use_dma = 0; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 76d6b94a7597..31e3f866d11a 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -172,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws) static void dw_writer(struct dw_spi *dws) { - u32 max = tx_max(dws); + u32 max; u16 txw = 0; + spin_lock(&dws->buf_lock); + max = tx_max(dws); while (max--) { /* Set the tx word if the transfer's original "tx" is not null */ if (dws->tx_end - dws->len) { @@ -186,13 +188,16 @@ static void dw_writer(struct dw_spi *dws) dw_write_io_reg(dws, DW_SPI_DR, txw); dws->tx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void dw_reader(struct dw_spi *dws) { - u32 max = rx_max(dws); + u32 max; u16 rxw; + spin_lock(&dws->buf_lock); + max = rx_max(dws); while (max--) { rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ @@ -204,6 +209,7 @@ static void dw_reader(struct dw_spi *dws) } dws->rx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void int_error_stop(struct dw_spi *dws, const char *msg) @@ -276,18 +282,23 @@ static int dw_spi_transfer_one(struct spi_controller *master, { struct dw_spi *dws = spi_controller_get_devdata(master); struct chip_data *chip = spi_get_ctldata(spi); + unsigned long flags; u8 imask = 0; u16 txlevel = 0; u32 cr0; int ret; dws->dma_mapped = 0; - + spin_lock_irqsave(&dws->buf_lock, flags); dws->tx = (void *)transfer->tx_buf; dws->tx_end = dws->tx + transfer->len; dws->rx = transfer->rx_buf; dws->rx_end = dws->rx + transfer->len; dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); + + /* Ensure dw->rx and dw->rx_end are visible */ + smp_mb(); spi_enable_chip(dws, 0); @@ -461,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) struct spi_controller *master; int ret; - BUG_ON(dws == NULL); + if (!dws) + return -EINVAL; master = spi_alloc_master(dev, 0); if (!master) @@ -471,6 +483,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->type = SSI_MOTO_SPI; dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); + spin_lock_init(&dws->buf_lock); spi_controller_set_devdata(master, dws); diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 38c7de1f0aa9..1bf5713e047d 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -119,6 +119,7 @@ struct dw_spi { size_t len; void *tx; void *tx_end; + spinlock_t buf_lock; void *rx; void *rx_end; int dma_mapped; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 442cff71a0d2..6ec2dcb8c57a 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -185,6 +185,7 @@ struct fsl_dspi { struct spi_transfer *cur_transfer; struct spi_message *cur_msg; struct chip_data *cur_chip; + size_t progress; size_t len; const void *tx; void *rx; @@ -395,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) if (!dma) return -ENOMEM; - dma->chan_rx = dma_request_slave_channel(dev, "rx"); - if (!dma->chan_rx) { + dma->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(dma->chan_rx)) { dev_err(dev, "rx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_rx); return ret; } - dma->chan_tx = dma_request_slave_channel(dev, "tx"); - if (!dma->chan_tx) { + dma->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(dma->chan_tx)) { dev_err(dev, "tx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_tx); goto err_tx_channel; } @@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi) dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { - /* Write two TX FIFO entries first, and then the corresponding - * CMD FIFO entry. + /* Write the CMD FIFO entry first, and then the two + * corresponding TX FIFO entries. */ u32 data = dspi_pop_tx(dspi); - if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) { - /* LSB */ - tx_fifo_write(dspi, data & 0xFFFF); - tx_fifo_write(dspi, data >> 16); - } else { - /* MSB */ - tx_fifo_write(dspi, data >> 16); - tx_fifo_write(dspi, data & 0xFFFF); - } cmd_fifo_write(dspi); + tx_fifo_write(dspi, data & 0xFFFF); + tx_fifo_write(dspi, data >> 16); } else { /* Write one entry to both TX FIFO and CMD FIFO * simultaneously. @@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) u32 spi_tcr; spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, - dspi->tx - dspi->bytes_per_word, !dspi->irq); + dspi->progress, !dspi->irq); /* Get transfer counter (in number of SPI transfers). It was * reset to 0 when transfer(s) were started. @@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr); /* Update total number of bytes that were transferred */ msg->actual_length += spi_tcnt * dspi->bytes_per_word; + dspi->progress += spi_tcnt; trans_mode = dspi->devtype_data->trans_mode; if (trans_mode == DSPI_EOQ_MODE) @@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) return 0; spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, - dspi->tx, !dspi->irq); + dspi->progress, !dspi->irq); if (trans_mode == DSPI_EOQ_MODE) dspi_eoq_write(dspi); @@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->rx = transfer->rx_buf; dspi->rx_end = dspi->rx + transfer->len; dspi->len = transfer->len; + dspi->progress = 0; /* Validated transfer specific frame size (defaults applied) */ dspi->bits_per_word = transfer->bits_per_word; if (transfer->bits_per_word <= 8) @@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_CTARE_DTCP(1)); spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, - dspi->tx, !dspi->irq); + dspi->progress, !dspi->irq); trans_mode = dspi->devtype_data->trans_mode; switch (trans_mode) { diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 2cc0ddb4a988..d0b8cc741a24 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller, fsl_lpspi->watermark = fsl_lpspi->txfifosize; if (fsl_lpspi_can_dma(controller, spi, t)) - fsl_lpspi->usedma = 1; + fsl_lpspi->usedma = true; else - fsl_lpspi->usedma = 0; + fsl_lpspi->usedma = false; return fsl_lpspi_config(fsl_lpspi); } @@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev) fsl_lpspi->dev = &pdev->dev; fsl_lpspi->is_slave = is_slave; + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + controller->transfer_one = fsl_lpspi_transfer_one; + controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; + controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + controller->dev.of_node = pdev->dev.of_node; + controller->bus_num = pdev->id; + controller->slave_abort = fsl_lpspi_slave_abort; + + ret = devm_spi_register_controller(&pdev->dev, controller); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_controller error.\n"); + goto out_controller_put; + } + if (!fsl_lpspi->is_slave) { for (i = 0; i < controller->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); @@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) controller->prepare_message = fsl_lpspi_prepare_message; } - controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - controller->transfer_one = fsl_lpspi_transfer_one; - controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; - controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; - controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; - controller->dev.of_node = pdev->dev.of_node; - controller->bus_num = pdev->id; - controller->slave_abort = fsl_lpspi_slave_abort; - init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); - ret = devm_spi_register_controller(&pdev->dev, controller); - if (ret < 0) { - dev_err(&pdev->dev, "spi_register_controller error.\n"); - goto out_controller_put; - } - return 0; out_controller_put: diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 79b1558b74b8..e8a499cd1f13 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem, op->data.nbytes > q->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } static void fsl_qspi_prepare_lut(struct fsl_qspi *q, diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index fb4159ad6bf6..3b81772fea0d 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -706,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; - int irq = 0, type; - int ret = -ENOMEM; + int irq, type; + int ret; ret = of_mpc8xxx_spi_probe(ofdev); if (ret) @@ -722,10 +722,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) if (spisel_boot) { pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4); - if (!pinfo->immr_spi_cs) { - ret = -ENOMEM; - goto err; - } + if (!pinfo->immr_spi_cs) + return -ENOMEM; } #endif /* @@ -744,24 +742,15 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) ret = of_address_to_resource(np, 0, &mem); if (ret) - goto err; + return ret; irq = platform_get_irq(ofdev, 0); - if (irq < 0) { - ret = irq; - goto err; - } + if (irq < 0) + return irq; master = fsl_spi_probe(dev, &mem, irq); - if (IS_ERR(master)) { - ret = PTR_ERR(master); - goto err; - } - - return 0; -err: - return ret; + return PTR_ERR_OR_ZERO(master); } static int of_fsl_spi_remove(struct platform_device *ofdev) diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c new file mode 100644 index 000000000000..4cf8fc80a7b7 --- /dev/null +++ b/drivers/spi/spi-hisi-sfc-v3xx.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets +// +// Copyright (c) 2019 HiSilicon Technologies Co., Ltd. +// Author: John Garry <john.garry@huawei.com> + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +#define HISI_SFC_V3XX_VERSION (0x1f8) + +#define HISI_SFC_V3XX_CMD_CFG (0x300) +#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9 +#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8) +#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7) +#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4 +#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3) +#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1 +#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0) +#define HISI_SFC_V3XX_CMD_INS (0x308) +#define HISI_SFC_V3XX_CMD_ADDR (0x30c) +#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400) + +struct hisi_sfc_v3xx_host { + struct device *dev; + void __iomem *regbase; + int max_cmd_dword; +}; + +#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000 +#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10 + +static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg, + !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK), + HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US, + HISI_SFC_V3XX_WAIT_TIMEOUT_US); +} + +static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct spi_device *spi = mem->spi; + struct hisi_sfc_v3xx_host *host; + uintptr_t addr = (uintptr_t)op->data.buf.in; + int max_byte_count; + + host = spi_controller_get_devdata(spi->master); + + max_byte_count = host->max_cmd_dword * 4; + + if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4) + op->data.nbytes = 4 - (addr % 4); + else if (op->data.nbytes > max_byte_count) + op->data.nbytes = max_byte_count; + + return 0; +} + +/* + * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the + * DATABUF registers -so use __io{read,write}32_copy when possible. For + * trailing bytes, copy them byte-by-byte from the DATABUF register, as we + * can't clobber outside the source/dest buffer. + * + * For efficient data read/write, we try to put any start 32b unaligned data + * into a separate transaction in hisi_sfc_v3xx_adjust_op_size(). + */ +static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host, + u8 *to, unsigned int len) +{ + void __iomem *from; + int i; + + from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)to, 4)) { + int words = len / 4; + + __ioread32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val; + + to += words * 4; + from += words * 4; + + val = __raw_readl(from); + + for (i = 0; i < len; i++, val >>= 8, to++) + *to = (u8)val; + } + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) { + u32 val = __raw_readl(from); + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + to++, val >>= 8, j++) + *to = (u8)val; + } + } +} + +static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host, + const u8 *from, unsigned int len) +{ + void __iomem *to; + int i; + + to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)from, 4)) { + int words = len / 4; + + __iowrite32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val = 0; + + to += words * 4; + from += words * 4; + + for (i = 0; i < len; i++, from++) + val |= *from << i * 8; + __raw_writel(val, to); + } + + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) { + u32 val = 0; + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + from++, j++) + val |= *from << j * 8; + __raw_writel(val, to); + } + } +} + +static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host, + const struct spi_mem_op *op, + u8 chip_select) +{ + int ret, len = op->data.nbytes; + u32 config = 0; + + if (op->addr.nbytes) + config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK; + + if (op->data.dir != SPI_MEM_NO_DATA) { + config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF; + config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK; + } + + if (op->data.dir == SPI_MEM_DATA_OUT) + hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len); + else if (op->data.dir == SPI_MEM_DATA_IN) + config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK; + + config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF | + chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF | + HISI_SFC_V3XX_CMD_CFG_START_MSK; + + writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR); + writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS); + + writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG); + + ret = hisi_sfc_v3xx_wait_cmd_idle(host); + if (ret) + return ret; + + if (op->data.dir == SPI_MEM_DATA_IN) + hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len); + + return 0; +} + +static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct hisi_sfc_v3xx_host *host; + struct spi_device *spi = mem->spi; + u8 chip_select = spi->chip_select; + + host = spi_controller_get_devdata(spi->master); + + return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select); +} + +static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = { + .adjust_op_size = hisi_sfc_v3xx_adjust_op_size, + .exec_op = hisi_sfc_v3xx_exec_op, +}; + +static int hisi_sfc_v3xx_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hisi_sfc_v3xx_host *host; + struct spi_controller *ctlr; + u32 version; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*host)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD; + + host = spi_controller_get_devdata(ctlr); + host->dev = dev; + + platform_set_drvdata(pdev, host); + + host->regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->regbase)) { + ret = PTR_ERR(host->regbase); + goto err_put_master; + } + + ctlr->bus_num = -1; + ctlr->num_chipselect = 1; + ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops; + + version = readl(host->regbase + HISI_SFC_V3XX_VERSION); + + switch (version) { + case 0x351: + host->max_cmd_dword = 64; + break; + default: + host->max_cmd_dword = 16; + break; + } + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) + goto err_put_master; + + dev_info(&pdev->dev, "hw version 0x%x\n", version); + + return 0; + +err_put_master: + spi_master_put(ctlr); + return ret; +} + +#if IS_ENABLED(CONFIG_ACPI) +static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = { + {"HISI0341", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids); +#endif + +static struct platform_driver hisi_sfc_v3xx_spi_driver = { + .driver = { + .name = "hisi-sfc-v3xx", + .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids), + }, + .probe = hisi_sfc_v3xx_probe, +}; + +module_platform_driver(hisi_sfc_v3xx_spi_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); +MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets"); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index f4a8f470aecc..8543f5ed1099 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev) master->unprepare_message = img_spfi_unprepare; master->handle_err = img_spfi_handle_err; - spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); - spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); + spfi->tx_ch = dma_request_chan(spfi->dev, "tx"); + if (IS_ERR(spfi->tx_ch)) { + ret = PTR_ERR(spfi->tx_ch); + spfi->tx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + + spfi->rx_ch = dma_request_chan(spfi->dev, "rx"); + if (IS_ERR(spfi->rx_ch)) { + ret = PTR_ERR(spfi->rx_ch); + spfi->rx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + if (!spfi->tx_ch || !spfi->rx_ch) { if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 49f0099db0cb..f4f28a400a96 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi, } if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) - spi_imx->usedma = 1; + spi_imx->usedma = true; else - spi_imx->usedma = 0; + spi_imx->usedma = false; if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) { spi_imx->rx = mx53_ecspi_rx_slave; diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index f3f10443f9e2..7f5680fe2568 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -19,7 +19,6 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/reset.h> -#include <linux/gpio.h> /* * The Meson SPICC controller could support DMA based transfers, but is not @@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master) static int meson_spicc_setup(struct spi_device *spi) { - int ret = 0; - if (!spi->controller_state) spi->controller_state = spi_master_get_devdata(spi->master); - else if (gpio_is_valid(spi->cs_gpio)) - goto out_gpio; - else if (spi->cs_gpio == -ENOENT) - return 0; - - if (gpio_is_valid(spi->cs_gpio)) { - ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); - if (ret) { - dev_err(&spi->dev, "failed to request cs gpio\n"); - return ret; - } - } - -out_gpio: - ret = gpio_direction_output(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); - return ret; + return 0; } static void meson_spicc_cleanup(struct spi_device *spi) { - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); - spi->controller_state = NULL; } @@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev) master->prepare_message = meson_spicc_prepare_message; master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer; master->transfer_one = meson_spicc_transfer_one; + master->use_gpio_descriptors = true; /* Setup max rate according to the Meson GX datasheet */ if ((rate >> 2) > SPICC_MAX_FREQ) diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 996c1c8a9c71..dce85ee07cd0 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev) if (ret) goto out_master_free; - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(ssp->dev, "Failed to request DMA\n"); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_master_free; } diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index fe624731c74c..87cd0233c60b 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -12,6 +12,7 @@ #include <linux/spi/spi.h> #include <linux/gpio.h> #include <linux/of_gpio.h> +#include <linux/reset.h> #include <asm/unaligned.h> @@ -20,7 +21,7 @@ struct npcm_pspi { struct completion xfer_done; - struct regmap *rst_regmap; + struct reset_control *reset; struct spi_master *master; unsigned int tx_bytes; unsigned int rx_bytes; @@ -59,12 +60,6 @@ struct npcm_pspi { #define NPCM_PSPI_MIN_CLK_DIVIDER 4 #define NPCM_PSPI_DEFAULT_CLK 25000000 -/* reset register */ -#define NPCM7XX_IPSRST2_OFFSET 0x24 - -#define NPCM7XX_PSPI1_RESET BIT(22) -#define NPCM7XX_PSPI2_RESET BIT(23) - static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : 2; @@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, priv->mode = spi->mode; } + /* + * If transfer is even length, and 8 bits per word transfer, + * then implement 16 bits-per-word transfer. + */ + if (priv->bits_per_word == 8 && !(t->len & 0x1)) + t->bits_per_word = 16; + if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) { npcm_pspi_set_transfer_size(priv, t->bits_per_word); priv->bits_per_word = t->bits_per_word; @@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, static void npcm_pspi_send(struct npcm_pspi *priv) { int wsize; + u16 val; wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes); priv->tx_bytes -= wsize; @@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv) switch (wsize) { case 1: - iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + iowrite8(val, NPCM_PSPI_DATA + priv->base); break; case 2: - iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + val = *priv->tx_buf++ | (val << 8); + iowrite16(val, NPCM_PSPI_DATA + priv->base); break; default: WARN_ON_ONCE(1); return; } - - priv->tx_buf += wsize; } static void npcm_pspi_recv(struct npcm_pspi *priv) @@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv) switch (rsize) { case 1: - val = ioread8(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA); break; case 2: val = ioread16(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = (val >> 8); + *priv->rx_buf++ = val & 0xff; break; default: WARN_ON_ONCE(1); return; } - - *priv->rx_buf = val; - priv->rx_buf += rsize; } static int npcm_pspi_transfer_one(struct spi_master *master, @@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master) static void npcm_pspi_reset_hw(struct npcm_pspi *priv) { - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, - NPCM7XX_PSPI1_RESET << priv->id); - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0); + reset_control_assert(priv->reset); + udelay(5); + reset_control_deassert(priv->reset); } static irqreturn_t npcm_pspi_handler(int irq, void *dev_id) @@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (num_cs < 0) return num_cs; - pdev->id = of_alias_get_id(np, "spi"); - if (pdev->id < 0) - pdev->id = 0; - master = spi_alloc_master(&pdev->dev, sizeof(*priv)); if (!master) return -ENOMEM; @@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) priv = spi_master_get_devdata(master); priv->master = master; priv->is_save_param = false; - priv->id = pdev->id; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { @@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev) goto out_disable_clk; } - priv->rst_regmap = - syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst"); - if (IS_ERR(priv->rst_regmap)) { - dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n"); - return PTR_ERR(priv->rst_regmap); + priv->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(priv->reset)) { + ret = PTR_ERR(priv->reset); + goto out_disable_clk; } /* reset SPI-HW block */ @@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER); master->mode_bits = SPI_CPHA | SPI_CPOL; master->dev.of_node = pdev->dev.of_node; - master->bus_num = pdev->id; + master->bus_num = -1; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->transfer_one = npcm_pspi_transfer_one; master->prepare_transfer_hardware = @@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (ret) goto out_disable_clk; - pr_info("NPCM Peripheral SPI %d probed\n", pdev->id); + pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num); return 0; diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index e2331eb7b47a..9df7c5979c29 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c @@ -20,7 +20,6 @@ #include <linux/spi/spi_bitbang.h> #include <linux/spi/spi_oc_tiny.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/of.h> #define DRV_NAME "spi_oc_tiny" @@ -50,8 +49,6 @@ struct tiny_spi { unsigned int txc, rxc; const u8 *txp; u8 *rxp; - int gpio_cs_count; - int *gpio_cs; }; static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev) @@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz) return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1; } -static void tiny_spi_chipselect(struct spi_device *spi, int is_active) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - - if (hw->gpio_cs_count > 0) { - gpio_set_value(hw->gpio_cs[spi->chip_select], - (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); - } -} - static int tiny_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { @@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node; - unsigned int i; u32 val; if (!np) return 0; - hw->gpio_cs_count = of_gpio_count(np); - if (hw->gpio_cs_count > 0) { - hw->gpio_cs = devm_kcalloc(&pdev->dev, - hw->gpio_cs_count, sizeof(unsigned int), - GFP_KERNEL); - if (!hw->gpio_cs) - return -ENOMEM; - } - for (i = 0; i < hw->gpio_cs_count; i++) { - hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL); - if (hw->gpio_cs[i] < 0) - return -ENODEV; - } hw->bitbang.master->dev.of_node = pdev->dev.of_node; if (!of_property_read_u32(np, "clock-frequency", &val)) hw->freq = val; @@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev) struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev); struct tiny_spi *hw; struct spi_master *master; - unsigned int i; int err = -ENODEV; master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi)); @@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the master state. */ master->bus_num = pdev->id; - master->num_chipselect = 255; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->setup = tiny_spi_setup; + master->use_gpio_descriptors = true; hw = spi_master_get_devdata(master); platform_set_drvdata(pdev, hw); @@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the state for the bitbang driver */ hw->bitbang.master = master; hw->bitbang.setup_transfer = tiny_spi_setup_transfer; - hw->bitbang.chipselect = tiny_spi_chipselect; hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs; /* find and map our resources */ @@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev) } /* find platform data */ if (platp) { - hw->gpio_cs_count = platp->gpio_cs_count; - hw->gpio_cs = platp->gpio_cs; - if (platp->gpio_cs_count && !platp->gpio_cs) { - err = -EBUSY; - goto exit; - } hw->freq = platp->freq; hw->baudwidth = platp->baudwidth; } else { @@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev) if (err) goto exit; } - for (i = 0; i < hw->gpio_cs_count; i++) { - err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev)); - if (err) - goto exit_gpio; - gpio_direction_output(hw->gpio_cs[i], 1); - } - hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count); /* register our spi controller */ err = spi_bitbang_start(&hw->bitbang); @@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev) return 0; -exit_gpio: - while (i-- > 0) - gpio_free(hw->gpio_cs[i]); exit: spi_master_put(master); return err; @@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct spi_master *master = hw->bitbang.master; - unsigned int i; spi_bitbang_stop(&hw->bitbang); - for (i = 0; i < hw->gpio_cs_count; i++) - gpio_free(hw->gpio_cs[i]); spi_master_put(master); return 0; } diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 9071333ebdd8..4c7a71f0fb3e 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data) return limit; } +static void pxa2xx_spi_off(struct driver_data *drv_data) +{ + /* On MMP, disabling SSE seems to corrupt the rx fifo */ + if (drv_data->ssp_type == MMP2_SSP) + return; + + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); +} + static int null_writer(struct driver_data *drv_data) { u8 n_bytes = drv_data->n_bytes; @@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_err(&drv_data->pdev->dev, "%s\n", msg); @@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) static void handle_bad_msg(struct driver_data *drv_data) { - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1); if (!pxa25x_ssp_comp(drv_data)) @@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) != (cr1 & change_mask)) { /* stop the SSP, and update the other bits */ - pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); + if (drv_data->ssp_type != MMP2_SSP) + pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, chip->timeout); /* first set CR1 without interrupt and service enables */ @@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); @@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller, struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); /* Clear and disable interrupts and service requests */ write_SSSR_CS(drv_data, drv_data->clear_sr); pxa2xx_spi_write(drv_data, SSCR1, @@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP now */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); return 0; } @@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { /* KBL-H */ { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP }, { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP }, + /* CML-V */ + { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP }, + { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP }, /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP }, diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c index 250fd60e1678..3c4f83bf7084 100644 --- a/drivers/spi/spi-qcom-qspi.c +++ b/drivers/spi/spi-qcom-qspi.c @@ -137,7 +137,7 @@ enum qspi_clocks { struct qcom_qspi { void __iomem *base; struct device *dev; - struct clk_bulk_data clks[QSPI_NUM_CLKS]; + struct clk_bulk_data *clks; struct qspi_xfer xfer; /* Lock to protect xfer and IRQ accessed registers */ spinlock_t lock; @@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev) goto exit_probe_master_put; } + ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, + sizeof(*ctrl->clks), GFP_KERNEL); + if (!ctrl->clks) { + ret = -ENOMEM; + goto exit_probe_master_put; + } + ctrl->clks[QSPI_CLK_CORE].id = "core"; ctrl->clks[QSPI_CLK_IFACE].id = "iface"; ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 7222c7689c3c..85575d45901c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -159,7 +159,7 @@ #define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0 #define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1 #define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */ -#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */ +#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */ #define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */ #define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */ #define SPCMD_CPHA 0x0001 /* Clock Phase Setting */ @@ -242,6 +242,7 @@ struct spi_ops { u16 mode_bits; u16 flags; u16 fifo_size; + u8 num_hw_ss; }; /* @@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) return n; } -#define set_config_register(spi, n) spi->ops->set_config_register(spi, n) - static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) { rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR); @@ -620,9 +619,8 @@ no_dma_tx: dmaengine_terminate_all(rspi->ctlr->dma_rx); no_dma_rx: if (ret == -EAGAIN) { - pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->ctlr->dev), - dev_name(&rspi->ctlr->dev)); + dev_warn_once(&rspi->ctlr->dev, + "DMA not available, falling back to PIO\n"); } return ret; } @@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr, if (spi->mode & SPI_CPHA) rspi->spcmd |= SPCMD_CPHA; + /* Configure slave signal to assert */ + rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs + : spi->chip_select); + /* CMOS output mode and MOSI signal from previous transfer */ rspi->sppcr = 0; if (spi->mode & SPI_LOOP) rspi->sppcr |= SPPCR_SPLP; - set_config_register(rspi, 8); + rspi->ops->set_config_register(rspi, 8); if (msg->spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) { @@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, + .num_hw_ss = 2, }; static const struct spi_ops rspi_rz_ops = { @@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ + .num_hw_ss = 1, }; static const struct spi_ops qspi_ops = { @@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = { SPI_RX_DUAL | SPI_RX_QUAD, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, + .num_hw_ss = 1, }; #ifdef CONFIG_OF @@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev) ctlr->mode_bits = ops->mode_bits; ctlr->flags = ops->flags; ctlr->dev.of_node = pdev->dev.of_node; + ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = rspi->ops->num_hw_ss; ret = platform_get_irq_byname_optional(pdev, "rx"); if (ret < 0) { @@ -1314,8 +1321,6 @@ error1: static const struct platform_device_id spi_driver_ids[] = { { "rspi", (kernel_ulong_t)&rspi_ops }, - { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops }, - { "qspi", (kernel_ulong_t)&qspi_ops }, {}, }; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 8f134735291f..1c11a00a2c36 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -14,8 +14,6 @@ #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/err.h> -#include <linux/gpio.h> -#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -55,7 +53,6 @@ struct sh_msiof_spi_priv { void *rx_dma_page; dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; - unsigned short unused_ss; bool native_cs_inited; bool native_cs_high; bool slave_aborted; @@ -63,140 +60,140 @@ struct sh_msiof_spi_priv { #define MAX_SS 3 /* Maximum number of native chip selects */ -#define TMDR1 0x00 /* Transmit Mode Register 1 */ -#define TMDR2 0x04 /* Transmit Mode Register 2 */ -#define TMDR3 0x08 /* Transmit Mode Register 3 */ -#define RMDR1 0x10 /* Receive Mode Register 1 */ -#define RMDR2 0x14 /* Receive Mode Register 2 */ -#define RMDR3 0x18 /* Receive Mode Register 3 */ -#define TSCR 0x20 /* Transmit Clock Select Register */ -#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ -#define CTR 0x28 /* Control Register */ -#define FCTR 0x30 /* FIFO Control Register */ -#define STR 0x40 /* Status Register */ -#define IER 0x44 /* Interrupt Enable Register */ -#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ -#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ -#define TFDR 0x50 /* Transmit FIFO Data Register */ -#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ -#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ -#define RFDR 0x60 /* Receive FIFO Data Register */ - -/* TMDR1 and RMDR1 */ -#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ -#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ -#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */ -#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */ -#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ -#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ -#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ -#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ -#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ -#define MDR1_FLD_SHIFT 2 -#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ -/* TMDR1 */ -#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */ -#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ -#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ - -/* TMDR2 and RMDR2 */ -#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ -#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ -#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ - -/* TSCR and RSCR */ -#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ -#define SCR_BRPS(i) (((i) - 1) << 8) -#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ -#define SCR_BRDV_DIV_2 0 -#define SCR_BRDV_DIV_4 1 -#define SCR_BRDV_DIV_8 2 -#define SCR_BRDV_DIV_16 3 -#define SCR_BRDV_DIV_32 4 -#define SCR_BRDV_DIV_1 7 - -/* CTR */ -#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ -#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ -#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ -#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ -#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ -#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ -#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ -#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ -#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ -#define CTR_TXDIZ_LOW (0 << 22) /* 0 */ -#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */ -#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ -#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ -#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ -#define CTR_TXE BIT(9) /* Transmit Enable */ -#define CTR_RXE BIT(8) /* Receive Enable */ -#define CTR_TXRST BIT(1) /* Transmit Reset */ -#define CTR_RXRST BIT(0) /* Receive Reset */ - -/* FCTR */ -#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ -#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ -#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ -#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ -#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ -#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ -#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ -#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ -#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ -#define FCTR_TFUA_SHIFT 20 -#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT) -#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ -#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ -#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ -#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ -#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ -#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ -#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ -#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ -#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ -#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ -#define FCTR_RFUA_SHIFT 4 -#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT) - -/* STR */ -#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */ -#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */ -#define STR_TEOF BIT(23) /* Frame Transmission End */ -#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ -#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */ -#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */ -#define STR_RFFUL BIT(13) /* Receive FIFO Full */ -#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */ -#define STR_REOF BIT(7) /* Frame Reception End */ -#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ -#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */ -#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */ - -/* IER */ -#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ -#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ -#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ -#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */ -#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ -#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ -#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ -#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ -#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */ -#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ -#define IER_REOFE BIT(7) /* Frame Reception End Enable */ -#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ -#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ -#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ +#define SITMDR1 0x00 /* Transmit Mode Register 1 */ +#define SITMDR2 0x04 /* Transmit Mode Register 2 */ +#define SITMDR3 0x08 /* Transmit Mode Register 3 */ +#define SIRMDR1 0x10 /* Receive Mode Register 1 */ +#define SIRMDR2 0x14 /* Receive Mode Register 2 */ +#define SIRMDR3 0x18 /* Receive Mode Register 3 */ +#define SITSCR 0x20 /* Transmit Clock Select Register */ +#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ +#define SICTR 0x28 /* Control Register */ +#define SIFCTR 0x30 /* FIFO Control Register */ +#define SISTR 0x40 /* Status Register */ +#define SIIER 0x44 /* Interrupt Enable Register */ +#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ +#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ +#define SITFDR 0x50 /* Transmit FIFO Data Register */ +#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ +#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ +#define SIRFDR 0x60 /* Receive FIFO Data Register */ + +/* SITMDR1 and SIRMDR1 */ +#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ +#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ +#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */ +#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */ +#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ +#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ +#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ +#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ +#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ +#define SIMDR1_FLD_SHIFT 2 +#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ +/* SITMDR1 */ +#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ +#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ +#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ + +/* SITMDR2 and SIRMDR2 */ +#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ +#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ +#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ + +/* SITSCR and SIRSCR */ +#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ +#define SISCR_BRPS(i) (((i) - 1) << 8) +#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ +#define SISCR_BRDV_DIV_2 0 +#define SISCR_BRDV_DIV_4 1 +#define SISCR_BRDV_DIV_8 2 +#define SISCR_BRDV_DIV_16 3 +#define SISCR_BRDV_DIV_32 4 +#define SISCR_BRDV_DIV_1 7 + +/* SICTR */ +#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ +#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ +#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ +#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ +#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ +#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ +#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ +#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ +#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ +#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */ +#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */ +#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ +#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ +#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ +#define SICTR_TXE BIT(9) /* Transmit Enable */ +#define SICTR_RXE BIT(8) /* Receive Enable */ +#define SICTR_TXRST BIT(1) /* Transmit Reset */ +#define SICTR_RXRST BIT(0) /* Receive Reset */ + +/* SIFCTR */ +#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ +#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ +#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ +#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ +#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ +#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ +#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ +#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ +#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ +#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ +#define SIFCTR_TFUA_SHIFT 20 +#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) +#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ +#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ +#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ +#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ +#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ +#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ +#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ +#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ +#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ +#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ +#define SIFCTR_RFUA_SHIFT 4 +#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT) + +/* SISTR */ +#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ +#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ +#define SISTR_TEOF BIT(23) /* Frame Transmission End */ +#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ +#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ +#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ +#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ +#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ +#define SISTR_REOF BIT(7) /* Frame Reception End */ +#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ +#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ +#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ + +/* SIIER */ +#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ +#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ +#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ +#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ +#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ +#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ +#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ +#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ +#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ +#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ +#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ +#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ +#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ +#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: return ioread16(p->mapbase + reg_offs); default: return ioread32(p->mapbase + reg_offs); @@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, u32 value) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: iowrite16(value, p->mapbase + reg_offs); break; default: @@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, u32 mask = clr | set; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data &= ~clr; data |= set; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - return readl_poll_timeout_atomic(p->mapbase + CTR, data, + return readl_poll_timeout_atomic(p->mapbase + SICTR, data, (data & mask) == set, 1, 100); } @@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) struct sh_msiof_spi_priv *p = data; /* just disable the interrupt and wake up */ - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); complete(&p->done); return IRQ_HANDLED; @@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p) { - u32 mask = CTR_TXRST | CTR_RXRST; + u32 mask = SICTR_TXRST | SICTR_RXRST; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data |= mask; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1, + readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1, 100); } static const u32 sh_msiof_spi_div_array[] = { - SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4, - SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32, + SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4, + SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32, }; static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, @@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, div = DIV_ROUND_UP(parent_rate, spi_hz); if (div <= 1024) { - /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ + /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ if (!div_pow && div <= 32 && div > 2) div_pow = 1; @@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, brps = 32; } - scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); - sh_msiof_write(p, TSCR, scr); + scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps); + sh_msiof_write(p, SITSCR, scr); if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, RSCR, scr); + sh_msiof_write(p, SIRSCR, scr); } static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl) @@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) return 0; } - val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT; - val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT; + val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT; + val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT; return val; } @@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, * 1 0 11 11 0 0 * 1 1 11 11 1 1 */ - tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; - tmp |= !cs_high << MDR1_SYNCAC_SHIFT; - tmp |= lsb_first << MDR1_BITLSB_SHIFT; + tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP; + tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT; + tmp |= lsb_first << SIMDR1_BITLSB_SHIFT; tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); if (spi_controller_is_slave(p->ctlr)) { - sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); + sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON); } else { - sh_msiof_write(p, TMDR1, - tmp | MDR1_TRMD | TMDR1_PCON | - (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); + sh_msiof_write(p, SITMDR1, + tmp | SIMDR1_TRMD | SITMDR1_PCON | + (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT); } if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ tmp &= ~0x0000ffff; } - sh_msiof_write(p, RMDR1, tmp); + sh_msiof_write(p, SIRMDR1, tmp); tmp = 0; - tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT; - tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT; + tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT; + tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT; edge = cpol ^ !cpha; - tmp |= edge << CTR_TEDG_SHIFT; - tmp |= edge << CTR_REDG_SHIFT; - tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW; - sh_msiof_write(p, CTR, tmp); + tmp |= edge << SICTR_TEDG_SHIFT; + tmp |= edge << SICTR_REDG_SHIFT; + tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW; + sh_msiof_write(p, SICTR, tmp); } static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, const void *tx_buf, void *rx_buf, u32 bits, u32 words) { - u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); + u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words); if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, TMDR2, dr2); + sh_msiof_write(p, SITMDR2, dr2); else - sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); + sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1); if (rx_buf) - sh_msiof_write(p, RMDR2, dr2); + sh_msiof_write(p, SIRMDR2, dr2); } static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) { - sh_msiof_write(p, STR, - sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); + sh_msiof_write(p, SISTR, + sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ)); } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, @@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_8[k] << fs); + sh_msiof_write(p, SITFDR, buf_8[k] << fs); } static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, @@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_16[k] << fs); + sh_msiof_write(p, SITFDR, buf_16[k] << fs); } static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, @@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs); } static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, @@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_32[k] << fs); + sh_msiof_write(p, SITFDR, buf_32[k] << fs); } static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, @@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs); } static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, @@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs)); + sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs)); } static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs)); + sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs)); } static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, @@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_8[k] = sh_msiof_read(p, RFDR) >> fs; + buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, @@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_16[k] = sh_msiof_read(p, RFDR) >> fs; + buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, @@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]); } static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, @@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = sh_msiof_read(p, RFDR) >> fs; + buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, @@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]); } static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, @@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs); + buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs); } static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]); + put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]); } static int sh_msiof_spi_setup(struct spi_device *spi) @@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; /* Configure native chip select mode/polarity early */ - clr = MDR1_SYNCMD_MASK; - set = MDR1_SYNCMD_SPI; + clr = SIMDR1_SYNCMD_MASK; + set = SIMDR1_SYNCMD_SPI; if (spi->mode & SPI_CS_HIGH) - clr |= BIT(MDR1_SYNCAC_SHIFT); + clr |= BIT(SIMDR1_SYNCAC_SHIFT); else - set |= BIT(MDR1_SYNCAC_SHIFT); + set |= BIT(SIMDR1_SYNCAC_SHIFT); pm_runtime_get_sync(&p->pdev->dev); - tmp = sh_msiof_read(p, TMDR1) & ~clr; - sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON); - tmp = sh_msiof_read(p, RMDR1) & ~clr; - sh_msiof_write(p, RMDR1, tmp | set); + tmp = sh_msiof_read(p, SITMDR1) & ~clr; + sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON); + tmp = sh_msiof_read(p, SIRMDR1) & ~clr; + sh_msiof_write(p, SIRMDR1, tmp | set); pm_runtime_put(&p->pdev->dev); p->native_cs_high = spi->mode & SPI_CS_HIGH; p->native_cs_inited = true; @@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, /* Configure pins before asserting CS */ if (spi->cs_gpiod) { - ss = p->unused_ss; + ss = ctlr->unused_native_cs; cs_high = p->native_cs_high; } else { ss = spi->chip_select; @@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) /* setup clock and rx/tx signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE); /* start by setting frame bit */ if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE); return ret; } @@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) /* shut down frame, rx/tx and clock signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0); if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0); return ret; } @@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, fifo_shift = 32 - bits; /* default FIFO watermarks for PIO */ - sh_msiof_write(p, FCTR, 0); + sh_msiof_write(p, SIFCTR, 0); /* setup msiof transfer mode registers */ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); - sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE); /* write tx fifo */ if (tx_buf) @@ -731,7 +728,7 @@ stop_reset: sh_msiof_reset_str(p); sh_msiof_spi_stop(p, rx_buf); stop_ier: - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { - ier_bits |= IER_RDREQE | IER_RDMAE; + ier_bits |= SIIER_RDREQE | SIIER_RDMAE; desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, p->rx_dma_addr, len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } if (tx) { - ier_bits |= IER_TDREQE | IER_TDMAE; + ier_bits |= SIIER_TDREQE | SIIER_TDMAE; dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, p->tx_dma_addr, len, DMA_TO_DEVICE); desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, @@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } /* 1 stage FIFO watermarks for DMA */ - sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1); + sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1); /* setup msiof transfer mode registers (32-bit words) */ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); - sh_msiof_write(p, IER, ier_bits); + sh_msiof_write(p, SIIER, ier_bits); reinit_completion(&p->done); if (tx) @@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, if (ret) goto stop_reset; - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); } else { /* wait for tx fifo to be emptied */ - sh_msiof_write(p, IER, IER_TEOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE); ret = sh_msiof_wait_for_completion(p, &p->done); if (ret) goto stop_reset; @@ -856,7 +853,7 @@ stop_dma: no_dma_tx: if (rx) dmaengine_terminate_all(p->ctlr->dma_rx); - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) } #endif -static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p) -{ - struct device *dev = &p->pdev->dev; - unsigned int used_ss_mask = 0; - unsigned int cs_gpios = 0; - unsigned int num_cs, i; - int ret; - - ret = gpiod_count(dev, "cs"); - if (ret <= 0) - return 0; - - num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); - for (i = 0; i < num_cs; i++) { - struct gpio_desc *gpiod; - - gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); - if (!IS_ERR(gpiod)) { - devm_gpiod_put(dev, gpiod); - cs_gpios++; - continue; - } - - if (PTR_ERR(gpiod) != -ENOENT) - return PTR_ERR(gpiod); - - if (i >= MAX_SS) { - dev_err(dev, "Invalid native chip select %d\n", i); - return -EINVAL; - } - used_ss_mask |= BIT(i); - } - p->unused_ss = ffz(used_ss_mask); - if (cs_gpios && p->unused_ss >= MAX_SS) { - dev_err(dev, "No unused native chip select available\n"); - return -EINVAL; - } - return 0; -} - static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev, enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr) { @@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) ctlr = p->ctlr; ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - dma_tx_id, res->start + TFDR); + dma_tx_id, res->start + SITFDR); if (!ctlr->dma_tx) return -ENODEV; ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - dma_rx_id, res->start + RFDR); + dma_rx_id, res->start + SIRFDR); if (!ctlr->dma_rx) goto free_tx_chan; @@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) if (p->info->rx_fifo_override) p->rx_fifo_size = p->info->rx_fifo_override; - /* Setup GPIO chip selects */ - ctlr->num_chipselect = p->info->num_chipselect; - ret = sh_msiof_get_cs_gpios(p); - if (ret) - goto err1; - /* init controller code */ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; ctlr->flags = chipdata->ctlr_flags; ctlr->bus_num = pdev->id; + ctlr->num_chipselect = p->info->num_chipselect; ctlr->dev.of_node = pdev->dev.of_node; ctlr->setup = sh_msiof_spi_setup; ctlr->prepare_message = sh_msiof_prepare_message; @@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ctlr->auto_runtime_pm = true; ctlr->transfer_one = sh_msiof_transfer_one; ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = MAX_SS; ret = sh_msiof_request_dma(p); if (ret < 0) diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index e1e639191557..8419e6722e17 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) sspi->bitbang.master->dev.of_node = pdev->dev.of_node; /* request DMA channels */ - sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx"); - if (!sspi->rx_chan) { + sspi->rx_chan = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(sspi->rx_chan)) { dev_err(&pdev->dev, "can not allocate rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->rx_chan); goto free_master; } - sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx"); - if (!sspi->tx_chan) { + sspi->tx_chan = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(sspi->tx_chan)) { dev_err(&pdev->dev, "can not allocate tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->tx_chan); goto free_rx_dma; } diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 4e726929bb4f..4ef569b47aa6 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi) return 0; } -static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) +static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) { struct dma_slave_config dma_cfg; struct device *dev = qspi->dev; + int ret = 0; memset(&dma_cfg, 0, sizeof(dma_cfg)); @@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) dma_cfg.src_maxburst = 4; dma_cfg.dst_maxburst = 4; - qspi->dma_chrx = dma_request_slave_channel(dev, "rx"); - if (qspi->dma_chrx) { + qspi->dma_chrx = dma_request_chan(dev, "rx"); + if (IS_ERR(qspi->dma_chrx)) { + ret = PTR_ERR(qspi->dma_chrx); + qspi->dma_chrx = NULL; + if (ret == -EPROBE_DEFER) + goto out; + } else { if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) { dev_err(dev, "dma rx config failed\n"); dma_release_channel(qspi->dma_chrx); @@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } - qspi->dma_chtx = dma_request_slave_channel(dev, "tx"); - if (qspi->dma_chtx) { + qspi->dma_chtx = dma_request_chan(dev, "tx"); + if (IS_ERR(qspi->dma_chtx)) { + ret = PTR_ERR(qspi->dma_chtx); + qspi->dma_chtx = NULL; + } else { if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) { dev_err(dev, "dma tx config failed\n"); dma_release_channel(qspi->dma_chtx); @@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } +out: init_completion(&qspi->dma_completion); + + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static void stm32_qspi_dma_free(struct stm32_qspi *qspi) @@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev) qspi->dev = dev; platform_set_drvdata(pdev, qspi); - stm32_qspi_dma_setup(qspi); + ret = stm32_qspi_dma_setup(qspi); + if (ret) + goto err; + mutex_init(&qspi->lock); ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index b222ce8d083e..e041f9c4ec47 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -9,7 +9,6 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -974,29 +973,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } /** - * stm32_spi_setup - setup device chip select - */ -static int stm32_spi_setup(struct spi_device *spi_dev) -{ - int ret = 0; - - if (!gpio_is_valid(spi_dev->cs_gpio)) { - dev_err(&spi_dev->dev, "%d is not a valid gpio\n", - spi_dev->cs_gpio); - return -EINVAL; - } - - dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__, - spi_dev->cs_gpio, - (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high"); - - ret = gpio_direction_output(spi_dev->cs_gpio, - !(spi_dev->mode & SPI_CS_HIGH)); - - return ret; -} - -/** * stm32_spi_prepare_msg - set up the controller to transfer a single message */ static int stm32_spi_prepare_msg(struct spi_master *master, @@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev) struct spi_master *master; struct stm32_spi *spi; struct resource *res; - int i, ret; + int ret; master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi)); if (!master) { @@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev) master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; - master->setup = stm32_spi_setup; + master->use_gpio_descriptors = true; master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; master->unprepare_message = stm32_spi_unprepare_msg; - spi->dma_tx = dma_request_slave_channel(spi->dev, "tx"); - if (!spi->dma_tx) + spi->dma_tx = dma_request_chan(spi->dev, "tx"); + if (IS_ERR(spi->dma_tx)) { + ret = PTR_ERR(spi->dma_tx); + spi->dma_tx = NULL; + if (ret == -EPROBE_DEFER) + goto err_clk_disable; + dev_warn(&pdev->dev, "failed to request tx dma channel\n"); - else + } else { master->dma_tx = spi->dma_tx; + } + + spi->dma_rx = dma_request_chan(spi->dev, "rx"); + if (IS_ERR(spi->dma_rx)) { + ret = PTR_ERR(spi->dma_rx); + spi->dma_rx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma_release; - spi->dma_rx = dma_request_slave_channel(spi->dev, "rx"); - if (!spi->dma_rx) dev_warn(&pdev->dev, "failed to request rx dma channel\n"); - else + } else { master->dma_rx = spi->dma_rx; + } if (spi->dma_tx || spi->dma_rx) master->can_dma = stm32_spi_can_dma; @@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "spi master registration failed: %d\n", ret); - goto err_dma_release; + goto err_pm_disable; } - if (!master->cs_gpios) { + if (!master->cs_gpiods) { dev_err(&pdev->dev, "no CS gpios available\n"); ret = -EINVAL; - goto err_dma_release; - } - - for (i = 0; i < master->num_chipselect; i++) { - if (!gpio_is_valid(master->cs_gpios[i])) { - dev_err(&pdev->dev, "%i is not a valid gpio\n", - master->cs_gpios[i]); - ret = -EINVAL; - goto err_dma_release; - } - - ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], - DRIVER_NAME); - if (ret) { - dev_err(&pdev->dev, "can't get CS gpio %i\n", - master->cs_gpios[i]); - goto err_dma_release; - } + goto err_pm_disable; } dev_info(&pdev->dev, "driver initialized\n"); return 0; +err_pm_disable: + pm_runtime_disable(&pdev->dev); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); if (spi->dma_rx) dma_release_channel(spi->dma_rx); - - pm_runtime_disable(&pdev->dev); err_clk_disable: clk_disable_unprepare(spi->clk); err_master_put: diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index fc40ab146c86..83edabdb41ad 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param( if ((bits_per_word == 8 || bits_per_word == 16 || bits_per_word == 32) && t->len > 3) { - tspi->is_packed = 1; + tspi->is_packed = true; tspi->words_per_32bit = 32/bits_per_word; } else { - tspi->is_packed = 0; + tspi->is_packed = false; tspi->words_per_32bit = 1; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 66dcb6128539..366a3e5cca6b 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -80,8 +80,6 @@ struct ti_qspi { #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000) -#define QSPI_FCLK 192000000 - /* Clock Control */ #define QSPI_CLK_EN (1 << 31) #define QSPI_CLK_DIV_MAX 0xffff @@ -316,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, { int wlen; unsigned int cmd; + u32 rx; + u8 rxlen, rx_wlen; u8 *rxbuf; rxbuf = t->rx_buf; @@ -332,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, break; } wlen = t->bits_per_word >> 3; /* in bytes */ + rx_wlen = wlen; while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); if (qspi_is_busy(qspi)) return -EBUSY; + switch (wlen) { + case 1: + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + rxlen = QSPI_WLEN_MAX_BYTES; + } else { + rxlen = min(count, 4); + } + rx_wlen = rxlen << 3; + cmd &= ~QSPI_WLEN_MASK; + cmd |= QSPI_WLEN(rx_wlen); + break; + default: + rxlen = wlen; + break; + } + ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); if (ti_qspi_poll_wc(qspi)) { dev_err(qspi->dev, "read timed out\n"); return -ETIMEDOUT; } + switch (wlen) { case 1: - *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG); + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + u32 *rxp = (u32 *) rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG_3); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_2); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_1); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + *rxp++ = be32_to_cpu(rx); + } else { + u8 *rxp = rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + if (rx_wlen >= 8) + *rxp++ = rx >> (rx_wlen - 8); + if (rx_wlen >= 16) + *rxp++ = rx >> (rx_wlen - 16); + if (rx_wlen >= 24) + *rxp++ = rx >> (rx_wlen - 24); + if (rx_wlen >= 32) + *rxp++ = rx; + } break; case 2: *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG); @@ -354,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG); break; } - rxbuf += wlen; - count -= wlen; + rxbuf += rxlen; + count -= rxlen; } return 0; @@ -527,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, QSPI_SPI_SETUP_REG(spi->chip_select)); } +static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master); + size_t max_len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (op->addr.val < qspi->mmap_size) { + /* Limit MMIO to the mmaped region */ + if (op->addr.val + op->data.nbytes > qspi->mmap_size) { + max_len = qspi->mmap_size - op->addr.val; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } else { + /* + * Use fallback mode (SW generated transfers) above the + * mmaped region. + * Adjust size to comply with the QSPI max frame length. + */ + max_len = QSPI_FRAME; + max_len -= 1 + op->addr.nbytes + op->dummy.nbytes; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } + + return 0; +} + static int ti_qspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { @@ -577,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, static const struct spi_controller_mem_ops ti_qspi_mem_ops = { .exec_op = ti_qspi_exec_mem_op, + .adjust_op_size = ti_qspi_adjust_op_size, }; static int ti_qspi_start_transfer_one(struct spi_master *master, diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 223353fa2d8a..d7ea6af74743 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Tx DMA */ param = &dma->param_tx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2; /* Tx = 0, 2 */; + param->chan_id = data->ch * 2; /* Tx = 0, 2 */ param->tx_reg = data->io_base_addr + PCH_SPDWR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); @@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Rx DMA */ param = &dma->param_rx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */; + param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */ param->rx_reg = data->io_base_addr + PCH_SPDRR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 47cde1864630..0fa50979644d 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -8,6 +8,7 @@ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -23,6 +24,7 @@ struct uniphier_spi_priv { void __iomem *base; + dma_addr_t base_dma_addr; struct clk *clk; struct spi_master *master; struct completion xfer_done; @@ -32,6 +34,7 @@ struct uniphier_spi_priv { unsigned int rx_bytes; const u8 *tx_buf; u8 *rx_buf; + atomic_t dma_busy; bool is_save_param; u8 bits_per_word; @@ -61,11 +64,16 @@ struct uniphier_spi_priv { #define SSI_FPS_FSTRT BIT(14) #define SSI_SR 0x14 +#define SSI_SR_BUSY BIT(7) #define SSI_SR_RNE BIT(0) #define SSI_IE 0x18 +#define SSI_IE_TCIE BIT(4) #define SSI_IE_RCIE BIT(3) +#define SSI_IE_TXRE BIT(2) +#define SSI_IE_RXRE BIT(1) #define SSI_IE_RORIE BIT(0) +#define SSI_IE_ALL_MASK GENMASK(4, 0) #define SSI_IS 0x1c #define SSI_IS_RXRS BIT(9) @@ -87,15 +95,19 @@ struct uniphier_spi_priv { #define SSI_RXDR 0x24 #define SSI_FIFO_DEPTH 8U +#define SSI_FIFO_BURST_NUM 1 + +#define SSI_DMA_RX_BUSY BIT(1) +#define SSI_DMA_TX_BUSY BIT(0) static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4); } -static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) writel(val, priv->base + SSI_IE); } -static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -290,25 +302,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv) } } -static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv, + unsigned int threshold) { - unsigned int fifo_threshold, fill_bytes; u32 val; - fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, - bytes_per_word(priv->bits_per_word)); - fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); - - fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes); - - /* set fifo threshold */ val = readl(priv->base + SSI_FC); val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK); - val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold); - val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold); + val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold); + val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold); writel(val, priv->base + SSI_FC); +} + +static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +{ + unsigned int fifo_threshold, fill_words; + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw); + fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); + + uniphier_spi_set_fifo_threshold(priv, fifo_threshold); + + fill_words = fifo_threshold - + DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw); - while (fill_bytes--) + while (fill_words--) uniphier_spi_send(priv); } @@ -327,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable) writel(val, priv->base + SSI_FPS); } +static bool uniphier_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + if ((!master->dma_tx && !master->dma_rx) + || (!master->dma_tx && t->tx_buf) + || (!master->dma_rx && t->rx_buf)) + return false; + + return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH; +} + +static void uniphier_spi_dma_rxcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_RXRE); + + if (!(state & SSI_DMA_TX_BUSY)) + spi_finalize_current_transfer(master); +} + +static void uniphier_spi_dma_txcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_TXRE); + + if (!(state & SSI_DMA_RX_BUSY)) + spi_finalize_current_transfer(master); +} + +static int uniphier_spi_transfer_one_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; + int buswidth; + + atomic_set(&priv->dma_busy, 0); + + uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM); + + if (priv->bits_per_word <= 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (priv->bits_per_word <= 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + + if (priv->rx_buf) { + struct dma_slave_config rxconf = { + .direction = DMA_DEV_TO_MEM, + .src_addr = priv->base_dma_addr + SSI_RXDR, + .src_addr_width = buswidth, + .src_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_rx, &rxconf); + + rxdesc = dmaengine_prep_slave_sg( + master->dma_rx, + t->rx_sg.sgl, t->rx_sg.nents, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto out_err_prep; + + rxdesc->callback = uniphier_spi_dma_rxcb; + rxdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_RXRE); + atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy); + + dmaengine_submit(rxdesc); + dma_async_issue_pending(master->dma_rx); + } + + if (priv->tx_buf) { + struct dma_slave_config txconf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = priv->base_dma_addr + SSI_TXDR, + .dst_addr_width = buswidth, + .dst_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_tx, &txconf); + + txdesc = dmaengine_prep_slave_sg( + master->dma_tx, + t->tx_sg.sgl, t->tx_sg.nents, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto out_err_prep; + + txdesc->callback = uniphier_spi_dma_txcb; + txdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_TXRE); + atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy); + + dmaengine_submit(txdesc); + dma_async_issue_pending(master->dma_tx); + } + + /* signal that we need to wait for completion */ + return (priv->tx_buf || priv->rx_buf); + +out_err_prep: + if (rxdesc) + dmaengine_terminate_sync(master->dma_rx); + + return -EINVAL; +} + static int uniphier_spi_transfer_one_irq(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) @@ -339,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master, uniphier_spi_fill_tx_fifo(priv); - uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE); time_left = wait_for_completion_timeout(&priv->xfer_done, msecs_to_jiffies(SSI_TIMEOUT_MS)); - uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE); if (!time_left) { dev_err(dev, "transfer timeout.\n"); @@ -388,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master, { struct uniphier_spi_priv *priv = spi_master_get_devdata(master); unsigned long threshold; + bool use_dma; /* Terminate and return success for 0 byte length transfer */ if (!t->len) @@ -395,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master, uniphier_spi_setup_transfer(spi, t); + use_dma = master->can_dma ? master->can_dma(master, spi, t) : false; + if (use_dma) + return uniphier_spi_transfer_one_dma(master, spi, t); + /* * If the transfer operation will take longer than * SSI_POLL_TIMEOUT_US, it should use irq. @@ -425,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master) return 0; } +static void uniphier_spi_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + u32 val; + + /* stop running spi transfer */ + writel(0, priv->base + SSI_CTL); + + /* reset FIFOs */ + val = SSI_FC_TXFFL | SSI_FC_RXFFL; + writel(val, priv->base + SSI_FC); + + uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK); + + if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) { + dmaengine_terminate_async(master->dma_tx); + atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + } + + if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) { + dmaengine_terminate_async(master->dma_rx); + atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + } +} + static irqreturn_t uniphier_spi_handler(int irq, void *dev_id) { struct uniphier_spi_priv *priv = dev_id; @@ -470,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev) { struct uniphier_spi_priv *priv; struct spi_master *master; + struct resource *res; + struct dma_slave_caps caps; + u32 dma_tx_burst = 0, dma_rx_burst = 0; unsigned long clk_rate; int irq; int ret; @@ -484,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev) priv->master = master; priv->is_save_param = false; - priv->base = devm_platform_ioremap_resource(pdev, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out_master_put; } + priv->base_dma_addr = res->start; priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { @@ -531,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev) = uniphier_spi_prepare_transfer_hardware; master->unprepare_transfer_hardware = uniphier_spi_unprepare_transfer_hardware; + master->handle_err = uniphier_spi_handle_err; + master->can_dma = uniphier_spi_can_dma; + master->num_chipselect = 1; + master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + + master->dma_tx = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR_OR_NULL(master->dma_tx)) { + if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_tx = NULL; + dma_tx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_tx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_tx_burst = caps.max_burst; + } + + master->dma_rx = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR_OR_NULL(master->dma_rx)) { + if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_rx = NULL; + dma_rx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_rx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_rx_burst = caps.max_burst; + } + + master->max_dma_len = min(dma_tx_burst, dma_rx_burst); ret = devm_spi_register_master(&pdev->dev, master); if (ret) @@ -551,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev) { struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); + if (priv->master->dma_tx) + dma_release_channel(priv->master->dma_tx); + if (priv->master->dma_rx) + dma_release_channel(priv->master->dma_rx); + clk_disable_unprepare(priv->clk); return 0; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 5e4c4532f7f3..38b4c78df506 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work) * advances its @tx buffer pointer monotonically. * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped - * @tx: Pointer to the current word within the xfer->tx_buf that the driver is - * preparing to transmit right now. + * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will disable IRQs and preemption for the duration of the * transfer, for less jitter in time measurement. Only compatible * with PIO drivers. If true, must follow up with @@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work) */ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off) + size_t progress, bool irqs_off) { - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); - if (!xfer->ptp_sts) return; if (xfer->timestamped_pre) return; - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word)) + if (progress < xfer->ptp_sts_word_pre) return; /* Capture the resolution of the timestamp */ - xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word; + xfer->ptp_sts_word_pre = progress; xfer->timestamped_pre = true; @@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); * timestamped. * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped - * @tx: Pointer to the current word within the xfer->tx_buf that the driver has - * just transmitted. + * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. */ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off) + size_t progress, bool irqs_off) { - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); - if (!xfer->ptp_sts) return; if (xfer->timestamped_post) return; - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word)) + if (progress < xfer->ptp_sts_word_post) return; ptp_read_system_postts(xfer->ptp_sts); @@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, } /* Capture the resolution of the timestamp */ - xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word; + xfer->ptp_sts_word_post = progress; xfer->timestamped_post = true; } @@ -1680,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } + if (unlikely(ctlr->ptp_sts_supported)) { + list_for_each_entry(xfer, &mesg->transfers, transfer_list) { + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre); + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post); + } + } + spi_unmap_msg(ctlr, mesg); if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { @@ -2457,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) int nb, i; struct gpio_desc **cs; struct device *dev = &ctlr->dev; + unsigned long native_cs_mask = 0; + unsigned int num_cs_gpios = 0; nb = gpiod_count(dev, "cs"); ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); @@ -2498,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) if (!gpioname) return -ENOMEM; gpiod_set_consumer_name(cs[i], gpioname); + num_cs_gpios++; + continue; } + + if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { + dev_err(dev, "Invalid native chip select %d\n", i); + return -EINVAL; + } + native_cs_mask |= BIT(i); + } + + ctlr->unused_native_cs = ffz(native_cs_mask); + if (num_cs_gpios && ctlr->max_native_cs && + ctlr->unused_native_cs >= ctlr->max_native_cs) { + dev_err(dev, "No unused native chip select available\n"); + return -EINVAL; } return 0; diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index dbff0f7e7cf5..ddc0dc93d08b 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -46,8 +46,8 @@ #define PCI171X_RANGE_UNI BIT(4) #define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0) #define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */ -#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8) -#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0) +#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8) +#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0) #define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x)) #define PCI171X_STATUS_REG 0x06 /* R: status register */ #define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */ diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c index 673d732dcb8f..8f398b30f5bf 100644 --- a/drivers/staging/comedi/drivers/ni_routes.c +++ b/drivers/staging/comedi/drivers/ni_routes.c @@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family, } } - if (!rv) - return -ENODATA; - /* Second, find the set of routes valid for this device. */ for (i = 0; ni_device_routes_list[i]; ++i) { if (memcmp(ni_device_routes_list[i]->device, board_name, @@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family, } } - if (!dr) - return -ENODATA; - tables->route_values = rv; tables->valid_routes = dr; + if (!rv || !dr) + return -ENODATA; + return 0; } @@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest, { int src; + if (!tables->route_values) + return -EINVAL; + dest = B(dest); /* subtract NI names offset */ /* ensure we are not going to under/over run the route value table */ if (dest < 0 || dest >= NI_NUM_NAMES) diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index a7cac0719b8b..b5d42f411dd8 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -37,6 +37,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c index 8d19ae71e7cc..4e651b698617 100644 --- a/drivers/staging/vt6656/baseband.c +++ b/drivers/staging/vt6656/baseband.c @@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv) memcpy(array, addr, length); - ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0, - MESSAGE_REQUEST_BBREG, length, array); + ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE, + MESSAGE_REQUEST_BBREG, length, array); if (ret) goto end; diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c index 56cd77fd9ea0..7958fc165462 100644 --- a/drivers/staging/vt6656/card.c +++ b/drivers/staging/vt6656/card.c @@ -719,7 +719,7 @@ end: */ int vnt_radio_power_on(struct vnt_private *priv) { - int ret = true; + int ret = 0; vnt_exit_deep_sleep(priv); diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h index 6074ceda78bf..50e1c8918040 100644 --- a/drivers/staging/vt6656/device.h +++ b/drivers/staging/vt6656/device.h @@ -259,6 +259,7 @@ struct vnt_private { u8 mac_hw; /* netdev */ struct usb_device *usb; + struct usb_interface *intf; u64 tsf_time; u8 rx_rate; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 4ac85ecb0921..9cb924c54571 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -949,7 +949,7 @@ static const struct ieee80211_ops vnt_mac_ops = { int vnt_init(struct vnt_private *priv) { - if (!(vnt_init_registers(priv))) + if (vnt_init_registers(priv)) return -EAGAIN; SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr); @@ -992,6 +992,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id) priv = hw->priv; priv->hw = hw; priv->usb = udev; + priv->intf = intf; vnt_set_options(priv); diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index d3304df6bd53..d977d4777e4f 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: @@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data) reg_off, reg, sizeof(u8), &data); } +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 length, u8 *data) +{ + int ret = 0, i; + + for (i = 0; i < length; i += block) { + u16 len = min_t(int, length - i, block); + + ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, + i, reg, len, data + i); + if (ret) + goto end; + } +end: + return ret; +} + int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { @@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h index 95147ec7b96a..b65d9c01a211 100644 --- a/drivers/staging/vt6656/usbpipe.h +++ b/drivers/staging/vt6656/usbpipe.h @@ -18,6 +18,8 @@ #include "device.h" +#define VNT_REG_BLOCK_SIZE 64 + int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer); int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, @@ -26,6 +28,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data); int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data); +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 len, u8 *data); + int vnt_start_interrupt_urb(struct vnt_private *priv); int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb); int vnt_tx_context(struct vnt_private *priv, diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c index 3eb2f11a5de1..2c5250ca2801 100644 --- a/drivers/staging/vt6656/wcmd.c +++ b/drivers/staging/vt6656/wcmd.c @@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work) if (vnt_init(priv)) { /* If fail all ends TODO retry */ dev_err(&priv->usb->dev, "failed to start\n"); + usb_set_intfdata(priv->intf, NULL); ieee80211_free_hw(priv->hw); return; } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7251a87bb576..b94ed4e30770 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4149,9 +4149,6 @@ int iscsit_close_connection( iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); - if (conn->conn_transport->iscsit_wait_conn) - conn->conn_transport->iscsit_wait_conn(conn); - /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands @@ -4237,6 +4234,9 @@ int iscsit_close_connection( target_sess_cmd_list_set_waiting(sess->se_sess); target_wait_for_sess_cmds(sess->se_sess); + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + ahash_request_free(conn->conn_tx_hash); if (conn->conn_rx_hash) { struct crypto_ahash *tfm; diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index d1ad512e1708..3ca71e3812ed 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -3,6 +3,7 @@ config OPTEE tristate "OP-TEE" depends on HAVE_ARM_SMCCC + depends on MMU help This implements the OP-TEE Trusted Execution Environment (TEE) driver. diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c index 0332a5301d61..d767eebf30bd 100644 --- a/drivers/tee/optee/shm_pool.c +++ b/drivers/tee/optee/shm_pool.c @@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, shm->size = PAGE_SIZE << order; if (shm->flags & TEE_SHM_DMA_BUF) { + unsigned int nr_pages = 1 << order, i; + struct page **pages; + + pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0; i < nr_pages; i++) { + pages[i] = page; + page++; + } + shm->flags |= TEE_SHM_REGISTER; - rc = optee_shm_register(shm->ctx, shm, &page, 1 << order, + rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, (unsigned long)shm->kaddr); + kfree(pages); } return rc; diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 3517883b5cdb..efae0c02d898 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -369,6 +369,7 @@ static int int3400_thermal_remove(struct platform_device *pdev) } static const struct acpi_device_id int3400_thermal_match[] = { + {"INT1040", 0}, {"INT3400", 0}, {} }; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index a7bbd8584ae2..aeece1e136a5 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -282,6 +282,7 @@ static int int3403_remove(struct platform_device *pdev) } static const struct acpi_device_id int3403_device_ids[] = { + {"INT1043", 0}, {"INT3403", 0}, {"", 0}, }; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 015e7d201598..0e7cf5236932 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -110,6 +110,9 @@ static int tsens_register(struct tsens_priv *priv) irq = platform_get_irq_byname(pdev, "uplow"); if (irq < 0) { ret = irq; + /* For old DTs with no IRQ defined */ + if (irq == -ENXIO) + ret = 0; goto err_put_device; } diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 226adeec2aed..ce5309d00280 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -663,6 +663,12 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl, return AE_OK; } +static const struct acpi_device_id serdev_acpi_devices_blacklist[] = { + { "INT3511", 0 }, + { "INT3512", 0 }, + { }, +}; + static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -675,6 +681,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, if (acpi_device_enumerated(adev)) return AE_OK; + /* Skip if black listed */ + if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist)) + return AE_OK; + if (acpi_serdev_check_resources(ctrl, adev)) return AE_OK; diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 5023c85ebc6e..044c3cbdcfa4 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -89,8 +89,7 @@ void tty_port_link_device(struct tty_port *port, { if (WARN_ON(index >= driver->num)) return; - if (!driver->ports[index]) - driver->ports[index] = port; + driver->ports[index] = port; } EXPORT_SYMBOL_GPL(tty_port_link_device); diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 4c1e75509303..02f6ca2cb1ba 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1375,13 +1375,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, */ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; u32 reg; - priv_dev = cdns->gadget_dev; - /* check USB device interrupt */ reg = readl(&priv_dev->regs->usb_ists); if (reg) { @@ -1419,14 +1416,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) */ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; unsigned long flags; int bit; u32 reg; - priv_dev = cdns->gadget_dev; spin_lock_irqsave(&priv_dev->lock, flags); reg = readl(&priv_dev->regs->usb_ists); @@ -2539,7 +2534,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns) priv_dev = cdns->gadget_dev; - devm_free_irq(cdns->dev, cdns->dev_irq, cdns); + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); pm_runtime_mark_last_busy(cdns->dev); pm_runtime_put_autosuspend(cdns->dev); @@ -2710,7 +2705,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns) ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, cdns3_device_irq_handler, cdns3_device_thread_irq_handler, - IRQF_SHARED, dev_name(cdns->dev), cdns); + IRQF_SHARED, dev_name(cdns->dev), + cdns->gadget_dev); if (ret) goto err0; diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index b45ceb91c735..48e4a5ca1835 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd); struct ehci_ci_priv { struct regulator *reg_vbus; + bool enabled; }; static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) @@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) int ret = 0; int port = HCS_N_PORTS(ehci->hcs_params); - if (priv->reg_vbus) { + if (priv->reg_vbus && enable != priv->enabled) { if (port > 1) { dev_warn(dev, "Not support multi-port regulator control\n"); @@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) enable ? "enable" : "disable", ret); return ret; } + priv->enabled = enable; } if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) { diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 5f40117e68e7..26bc05e48d8a 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_INT] = 1024, }; -static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, - int asnum, struct usb_host_interface *ifp, int num_ep, - unsigned char *buffer, int size) +static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1, + struct usb_endpoint_descriptor *e2) +{ + if (e1->bEndpointAddress == e2->bEndpointAddress) + return true; + + if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) { + if (usb_endpoint_num(e1) == usb_endpoint_num(e2)) + return true; + } + + return false; +} + +/* + * Check for duplicate endpoint addresses in other interfaces and in the + * altsetting currently being parsed. + */ +static bool config_endpoint_is_duplicate(struct usb_host_config *config, + int inum, int asnum, struct usb_endpoint_descriptor *d) +{ + struct usb_endpoint_descriptor *epd; + struct usb_interface_cache *intfc; + struct usb_host_interface *alt; + int i, j, k; + + for (i = 0; i < config->desc.bNumInterfaces; ++i) { + intfc = config->intf_cache[i]; + + for (j = 0; j < intfc->num_altsetting; ++j) { + alt = &intfc->altsetting[j]; + + if (alt->desc.bInterfaceNumber == inum && + alt->desc.bAlternateSetting != asnum) + continue; + + for (k = 0; k < alt->desc.bNumEndpoints; ++k) { + epd = &alt->endpoint[k].desc; + + if (endpoint_is_duplicate(epd, d)) + return true; + } + } + } + + return false; +} + +static int usb_parse_endpoint(struct device *ddev, int cfgno, + struct usb_host_config *config, int inum, int asnum, + struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; @@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, goto skip_to_next_endpoint_or_interface_descriptor; /* Check for duplicate endpoint addresses */ - for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { - if (ifp->endpoint[i].desc.bEndpointAddress == - d->bEndpointAddress) { - dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", - cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; - } + if (config_endpoint_is_duplicate(config, inum, asnum, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; } endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; @@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } - /* Validate the wMaxPacketSize field */ + /* + * Validate the wMaxPacketSize field. + * Some devices have isochronous endpoints in altsetting 0; + * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 + * (see the end of section 5.6.3), so don't warn about them. + */ maxp = usb_endpoint_maxp(&endpoint->desc); - if (maxp == 0) { - dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n", + if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; } /* Find the highest legal maxpacket size for this endpoint */ @@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno, if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; - retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, - num_ep, buffer, size); + retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum, + alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f229ad6952c0..3405b146edc9 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || + (portchange & USB_PORT_STAT_C_CONNECTION) || (portstatus & USB_PORT_STAT_OVERCURRENT) || (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); @@ -2692,7 +2693,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) -#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme) +#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme)) #define HUB_ROOT_RESET_TIME 60 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0c960a97ea02..154f3f3e8cff 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2467,6 +2467,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { + /* + * For OUT direction, host may send less than the setup + * length. Return true for all OUT requests. + */ + if (!req->direction) + return true; + return req->request.actual == req->request.length; } diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 5567ed2cddbe..fa252870c926 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc) memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); if (dwc->usb3_lpm_capable) - props[prop_idx++].name = "usb3-lpm-capable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable"); if (dwc->usb2_lpm_disable) - props[prop_idx++].name = "usb2-lpm-disable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable"); /** * WORKAROUND: dwc3 revisions <=3.00a have a limitation @@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc) * This following flag tells XHCI to do just that. */ if (dwc->revision <= DWC3_REVISION_300A) - props[prop_idx++].name = "quirk-broken-port-ped"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped"); if (prop_idx) { ret = platform_device_add_properties(xhci, props); diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index ae70ce29d5e4..797d6ace8994 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -445,6 +445,7 @@ config USB_TEGRA_XUDC tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller" depends on ARCH_TEGRA || COMPILE_TEST depends on PHY_TEGRA_XUSB + select USB_ROLE_SWITCH help Enables NVIDIA Tegra USB 3.0 device mode controller driver. diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 38183ac438c6..1371b0c249ec 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev) } da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN); - if (IS_ERR(da8xx_ohci->oc_gpio)) + if (IS_ERR(da8xx_ohci->oc_gpio)) { + error = PTR_ERR(da8xx_ohci->oc_gpio); goto err; + } if (da8xx_ohci->oc_gpio) { oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio); - if (oc_irq < 0) + if (oc_irq < 0) { + error = oc_irq; goto err; + } error = devm_request_threaded_irq(dev, oc_irq, NULL, ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING | diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index 5261f8dfedec..e3b8c84ccdb8 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -75,14 +75,17 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = { static int jz4740_musb_init(struct musb *musb) { struct device *dev = musb->controller->parent; + int err; if (dev->of_node) musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0); else musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); if (IS_ERR(musb->xceiv)) { - dev_err(dev, "No transceiver configured\n"); - return PTR_ERR(musb->xceiv); + err = PTR_ERR(musb->xceiv); + if (err != -EPROBE_DEFER) + dev_err(dev, "No transceiver configured: %d", err); + return err; } /* Silicon does not implement ConfigData register. diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 15cca912c53e..5ebf30bd61bd 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1840,6 +1840,9 @@ ATTRIBUTE_GROUPS(musb); #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \ (2 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) +#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \ + (3 << MUSB_DEVCTL_VBUS_SHIFT) | \ + MUSB_DEVCTL_SESSION) #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) @@ -1862,6 +1865,11 @@ static void musb_pm_runtime_check_session(struct musb *musb) s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV | MUSB_DEVCTL_HR; switch (devctl & ~s) { + case MUSB_QUIRK_B_DISCONNECT_99: + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + break; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, @@ -2310,6 +2318,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_disable_interrupts(musb); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */ + musb_writeb(musb->mregs, MUSB_POWER, 0); + /* Init IRQ workqueue before request_irq */ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 5fc6825745f2..2d3751d885b4 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -425,7 +425,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb, controller->controller.channel_abort = dma_channel_abort; if (request_irq(irq, dma_controller_irq, 0, - dev_name(musb->controller), &controller->controller)) { + dev_name(musb->controller), controller)) { dev_err(dev, "request_irq %d failed!\n", irq); musb_dma_controller_destroy(&controller->controller); diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index df582fe855f0..d3f420f3a083 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty) static int ch341_reset_resume(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; - struct ch341_private *priv = usb_get_serial_port_data(port); + struct ch341_private *priv; int ret; + priv = usb_get_serial_port_data(port); + if (!priv) + return 0; + /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 9690a5f4b9d6..5737add6a2a4 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb) if (txCredits) { port = edge_serial->serial->port[portNumber]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { spin_lock_irqsave(&edge_port->ep_lock, flags); edge_port->txCredits += txCredits; @@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state) static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength) { - struct device *dev = &edge_serial->serial->dev->dev; + struct usb_serial *serial = edge_serial->serial; + struct device *dev = &serial->dev->dev; struct usb_serial_port *port; struct edgeport_port *edge_port; __u16 lastBufferLength; @@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, /* spit this data back into the tty driver if this port is open */ - if (rxLen) { - port = edge_serial->serial->port[ - edge_serial->rxPort]; + if (rxLen && edge_serial->rxPort < serial->num_ports) { + port = serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n", __func__, rxLen, edge_serial->rxPort); @@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, rxLen); edge_port->port->icount.rx += rxLen; } - buffer += rxLen; } + buffer += rxLen; break; case EXPECT_HDR3: /* Expect 3rd byte of status header */ @@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 code = edge_serial->rxStatusCode; /* switch the port pointer to the one being currently talked about */ + if (edge_serial->rxPort >= edge_serial->serial->num_ports) + return; port = edge_serial->serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port == NULL) { diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index e66a59ef43a1..aa3dbce22cfb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1058,6 +1058,8 @@ static void usa49_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); @@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index cb7aac9cd9e7..ed2b4e6dca38 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -113,7 +113,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), requesttype, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, - 0, 0, buffer, 1, 0); + 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT); kfree(buffer); if (retval < 0) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e9491d400a24..084cc2fff3ae 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 +#define QUECTEL_PRODUCT_RM500Q 0x0800 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -567,6 +568,9 @@ static void option_instat_callback(struct urb *urb); /* Interface must have two endpoints */ #define NUMEP2 BIT(16) +/* Device needs ZLP */ +#define ZLP BIT(17) + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, @@ -1101,6 +1105,11 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), + .driver_info = ZLP }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), @@ -1172,6 +1181,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */ + .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), @@ -1196,6 +1207,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -2097,6 +2110,9 @@ static int option_attach(struct usb_serial *serial) if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) data->use_send_setup = 1; + if (device_flags & ZLP) + data->use_zlp = 1; + spin_lock_init(&data->susp_lock); usb_set_serial_data(serial, data); diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index a62981ca7a73..f93b81a297d6 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) u8 newMSR = (u8) *ch; unsigned long flags; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowMSR = newMSR; @@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch) unsigned long flags; u8 newLSR = (u8) *ch; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; if (newLSR & UART_LSR_BI) newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index edbbb13d6de6..bd23a7cb1be2 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS); #define MOTOROLA_TETRA_IDS() \ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ + { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \ + { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 8f066bb55d7d..dc7a65b9ec98 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver) return -EINVAL; } + /* Prevent individual ports from being unbound. */ + driver->driver.suppress_bind_attrs = true; + usb_serial_operations_init(driver); /* Add this device to our list of devices */ diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h index 1c120eaf4091..934e9361cf6b 100644 --- a/drivers/usb/serial/usb-wwan.h +++ b/drivers/usb/serial/usb-wwan.h @@ -38,6 +38,7 @@ struct usb_wwan_intf_private { spinlock_t susp_lock; unsigned int suspended:1; unsigned int use_send_setup:1; + unsigned int use_zlp:1; int in_flight; unsigned int open_ports; void *private; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 7e855c87e4f7..13be21aad2f4 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, void (*callback) (struct urb *)) { struct usb_serial *serial = port->serial; + struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ @@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); + if (intfdata->use_zlp && dir == USB_DIR_OUT) + urb->transfer_flags |= URB_ZERO_PACKET; + return urb; } diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index c1f7073a56de..8b4ff9fff340 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci) if (status & TCPC_ALERT_RX_STATUS) { struct pd_message msg; - unsigned int cnt; + unsigned int cnt, payload_cnt; u16 header; regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt); + /* + * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14 + * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is + * defined in table 4-36 as one greater than the number of + * bytes received. And that number includes the header. So: + */ + if (cnt > 3) + payload_cnt = cnt - (1 + sizeof(msg.header)); + else + payload_cnt = 0; tcpci_read16(tcpci, TCPC_RX_HDR, &header); msg.header = cpu_to_le16(header); - if (WARN_ON(cnt > sizeof(msg.payload))) - cnt = sizeof(msg.payload); + if (WARN_ON(payload_cnt > sizeof(msg.payload))) + payload_cnt = sizeof(msg.payload); - if (cnt > 0) + if (payload_cnt > 0) regmap_raw_read(tcpci->regmap, TCPC_RX_DATA, - &msg.payload, cnt); + &msg.payload, payload_cnt); /* Read complete, clear RX status alert bit */ tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 8569bbd3762f..831c9470bdc1 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -94,15 +94,15 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); #define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16) #define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17) #define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18) -#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19) -#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20) -#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21) -#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22) -#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23) -#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24) -#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25) -#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26) -#define UCSI_ENABLE_NTFY_ERROR BIT(27) +#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(21) +#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(22) +#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(23) +#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(24) +#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(25) +#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(27) +#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(28) +#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(30) +#define UCSI_ENABLE_NTFY_ERROR BIT(31) #define UCSI_ENABLE_NTFY_ALL 0xdbe70000 /* SET_UOR command bits */ diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1679e0dc869b..cec868f8db3f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG config MAX77620_WATCHDOG tristate "Maxim Max77620 Watchdog Timer" depends on MFD_MAX77620 || COMPILE_TEST + select WATCHDOG_CORE help This is the driver for the Max77620 watchdog timer. Say 'Y' here to enable the watchdog timer support for @@ -1444,6 +1445,7 @@ config SMSC37B787_WDT config TQMX86_WDT tristate "TQ-Systems TQMX86 Watchdog Timer" depends on X86 + select WATCHDOG_CORE help This is the driver for the hardware watchdog timer in the TQMX86 IO controller found on some of their ComExpress Modules. diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c index 0a87c6f4bab2..11b9e7c6b7f5 100644 --- a/drivers/watchdog/imx7ulp_wdt.c +++ b/drivers/watchdog/imx7ulp_wdt.c @@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog, { struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); - imx7ulp_wdt_enable(wdt->base, true); + imx7ulp_wdt_enable(wdog, true); imx7ulp_wdt_set_timeout(&wdt->wdd, 1); /* wait for wdog to fire */ diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 1cccf8eb1c5d..8e6dfe76f9c9 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev) set_bit(WDOG_HW_RUNNING, &dev->wdt.status); /* Request the IRQ only after the watchdog is disabled */ - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { /* * Not all supported platforms specify an interrupt for the @@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev) } /* Optional 2nd interrupt for pretimeout */ - irq = platform_get_irq(pdev, 1); + irq = platform_get_irq_optional(pdev, 1); if (irq > 0) { orion_wdt_info.options |= WDIOF_PRETIMEOUT; ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq, diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c index 234876047431..6e524c8e26a8 100644 --- a/drivers/watchdog/rn5t618_wdt.c +++ b/drivers/watchdog/rn5t618_wdt.c @@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = { module_platform_driver(rn5t618_wdt_driver); +MODULE_ALIAS("platform:rn5t618-wdt"); MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); MODULE_DESCRIPTION("RN5T618 watchdog driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index fdf533fe0bb2..56a4a4030ca9 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -420,7 +420,7 @@ static int wdt_find(int addr) cr_wdt_csr = NCT6102D_WDT_CSR; break; case NCT6116_ID: - ret = nct6102; + ret = nct6116; cr_wdt_timeout = NCT6102D_WDT_TIMEOUT; cr_wdt_control = NCT6102D_WDT_CONTROL; cr_wdt_csr = NCT6102D_WDT_CSR; |