diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-17 13:01:31 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-17 13:01:31 -0800 |
commit | 312dcaf967219effe0483785f24e4072a5bed9a5 (patch) | |
tree | ffa4dbb68b062541330bd2a21a31807d37cd84c3 /kernel | |
parent | 6daa90439e91bb9a71864b02f7d0af8587ea889a (diff) | |
parent | 38dc717e97153e46375ee21797aa54777e5498f3 (diff) | |
download | linux-312dcaf967219effe0483785f24e4072a5bed9a5.tar.bz2 |
Merge tag 'modules-for-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux
Pull modules updates from Jessica Yu:
"Summary of modules changes for the 5.11 merge window:
- Fix a race condition between systemd/udev and the module loader.
The module loader was sending a uevent before the module was fully
initialized (i.e., before its init function has been called). This
means udev can start processing the module uevent before the module
has finished initializing, and some udev rules expect that the
module has initialized already upon receiving the uevent.
This resulted in some systemd mount units failing if udev processes
the event faster than the module can finish init. This is fixed by
delaying the uevent until after the module has called its init
routine.
- Make the linker array sections for kernel params and module version
attributes more robust by switching to use the alignment of the
type in question.
Namely, linker section arrays will be constructed using the
alignment required by the struct (using __alignof__()) as opposed
to a specific value such as sizeof(void *) or sizeof(long). This is
less likely to cause breakages should the size of the type ever
change (Johan Hovold)
- Fix module state inconsistency by setting it back to GOING when a
module fails to load and is on its way out (Miroslav Benes)
- Some comment and code cleanups (Sergey Shtylyov)"
* tag 'modules-for-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux:
module: delay kobject uevent until after module init call
module: drop semicolon from version macro
init: use type alignment for kernel parameters
params: clean up module-param macros
params: use type alignment for kernel parameters
params: drop redundant "unused" attributes
module: simplify version-attribute handling
module: drop version-attribute alignment
module: fix comment style
module: add more 'kernel-doc' comments
module: fix up 'kernel-doc' comments
module: only handle errors with the *switch* statement in module_sig_check()
module: avoid *goto*s in module_sig_check()
module: merge repetitive strings in module_sig_check()
module: set MODULE_STATE_GOING state when a module fails to load
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/module.c | 200 | ||||
-rw-r--r-- | kernel/params.c | 10 |
2 files changed, 121 insertions, 89 deletions
diff --git a/kernel/module.c b/kernel/module.c index c3a9e972d3b2..4bf30e4b3eaa 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1,9 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - Copyright (C) 2002 Richard Henderson - Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. - -*/ + * Copyright (C) 2002 Richard Henderson + * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. + */ #define INCLUDE_VERMAGIC @@ -86,7 +85,8 @@ * 1) List of modules (also safely readable with preempt_disable), * 2) module_use links, * 3) module_addr_min/module_addr_max. - * (delete and add uses RCU list operations). */ + * (delete and add uses RCU list operations). + */ DEFINE_MUTEX(module_mutex); EXPORT_SYMBOL_GPL(module_mutex); static LIST_HEAD(modules); @@ -615,8 +615,10 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, return false; } -/* Find an exported symbol and return it, along with, (optional) crc and - * (optional) module which owns it. Needs preempt disabled or module_mutex. */ +/* + * Find an exported symbol and return it, along with, (optional) crc and + * (optional) module which owns it. Needs preempt disabled or module_mutex. + */ static const struct kernel_symbol *find_symbol(const char *name, struct module **owner, const s32 **crc, @@ -756,13 +758,12 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) } /** - * is_module_percpu_address - test whether address is from module static percpu + * is_module_percpu_address() - test whether address is from module static percpu * @addr: address to test * * Test whether @addr belongs to module static percpu area. * - * RETURNS: - * %true if @addr is from module static percpu area + * Return: %true if @addr is from module static percpu area */ bool is_module_percpu_address(unsigned long addr) { @@ -986,11 +987,10 @@ static int try_stop_module(struct module *mod, int flags, int *forced) } /** - * module_refcount - return the refcount or -1 if unloading - * + * module_refcount() - return the refcount or -1 if unloading * @mod: the module we're checking * - * Returns: + * Return: * -1 if the module is in the process of unloading * otherwise the number of references in the kernel to the module */ @@ -1675,8 +1675,10 @@ static void remove_sect_attrs(struct module *mod) if (mod->sect_attrs) { sysfs_remove_group(&mod->mkobj.kobj, &mod->sect_attrs->grp); - /* We are positive that no one is using any sect attrs - * at this point. Deallocate immediately. */ + /* + * We are positive that no one is using any sect attrs + * at this point. Deallocate immediately. + */ free_sect_attrs(mod->sect_attrs); mod->sect_attrs = NULL; } @@ -1924,7 +1926,6 @@ static int mod_sysfs_init(struct module *mod) if (err) mod_kobject_put(mod); - /* delay uevent until full sysfs population */ out: return err; } @@ -1961,7 +1962,6 @@ static int mod_sysfs_setup(struct module *mod, add_sect_attrs(mod, info); add_notes_attrs(mod, info); - kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); return 0; out_unreg_modinfo_attrs: @@ -2247,8 +2247,10 @@ static void free_module(struct module *mod) mod_sysfs_teardown(mod); - /* We leave it in list to prevent duplicate loads, but make sure - * that noone uses it while it's being deconstructed. */ + /* + * We leave it in list to prevent duplicate loads, but make sure + * that noone uses it while it's being deconstructed. + */ mutex_lock(&module_mutex); mod->state = MODULE_STATE_UNFORMED; mutex_unlock(&module_mutex); @@ -2365,8 +2367,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) if (!strncmp(name, "__gnu_lto", 9)) break; - /* We compiled with -fno-common. These are not - supposed to happen. */ + /* + * We compiled with -fno-common. These are not + * supposed to happen. + */ pr_debug("Common symbol: %s\n", name); pr_warn("%s: please compile with -fno-common\n", mod->name); @@ -2469,16 +2473,20 @@ static long get_offset(struct module *mod, unsigned int *size, return ret; } -/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld - might -- code, read-only data, read-write data, small data. Tally - sizes, and place the offsets into sh_entsize fields: high bit means it - belongs in init. */ +/* + * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld + * might -- code, read-only data, read-write data, small data. Tally + * sizes, and place the offsets into sh_entsize fields: high bit means it + * belongs in init. + */ static void layout_sections(struct module *mod, struct load_info *info) { static unsigned long const masks[][2] = { - /* NOTE: all executable code must be the first section + /* + * NOTE: all executable code must be the first section * in this array; otherwise modify the text_size - * finder in the two loops below */ + * finder in the two loops below + */ { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, @@ -2924,40 +2932,43 @@ static int module_sig_check(struct load_info *info, int flags) /* We truncate the module to discard the signature */ info->len -= markerlen; err = mod_verify_sig(mod, info); + if (!err) { + info->sig_ok = true; + return 0; + } } + /* + * We don't permit modules to be loaded into the trusted kernels + * without a valid signature on them, but if we're not enforcing, + * certain errors are non-fatal. + */ switch (err) { - case 0: - info->sig_ok = true; - return 0; - - /* We don't permit modules to be loaded into trusted kernels - * without a valid signature on them, but if we're not - * enforcing, certain errors are non-fatal. - */ case -ENODATA: - reason = "Loading of unsigned module"; - goto decide; + reason = "unsigned module"; + break; case -ENOPKG: - reason = "Loading of module with unsupported crypto"; - goto decide; + reason = "module with unsupported crypto"; + break; case -ENOKEY: - reason = "Loading of module with unavailable key"; - decide: - if (is_module_sig_enforced()) { - pr_notice("%s: %s is rejected\n", info->name, reason); - return -EKEYREJECTED; - } - - return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); + reason = "module with unavailable key"; + break; - /* All other errors are fatal, including nomem, unparseable - * signatures and signature check failures - even if signatures - * aren't required. - */ default: + /* + * All other errors are fatal, including lack of memory, + * unparseable signatures, and signature check failures -- + * even if signatures aren't required. + */ return err; } + + if (is_module_sig_enforced()) { + pr_notice("%s: loading of %s is rejected\n", info->name, reason); + return -EKEYREJECTED; + } + + return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); } #else /* !CONFIG_MODULE_SIG */ static int module_sig_check(struct load_info *info, int flags) @@ -3090,8 +3101,10 @@ static int rewrite_section_headers(struct load_info *info, int flags) return -ENOEXEC; } - /* Mark all sections sh_addr with their address in the - temporary image. */ + /* + * Mark all sections sh_addr with their address in the + * temporary image. + */ shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; #ifndef CONFIG_MODULE_UNLOAD @@ -3525,9 +3538,11 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) if (ndx) info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; - /* Determine total sizes, and put offsets in sh_entsize. For now - this is done generically; there doesn't appear to be any - special cases for the architectures. */ + /* + * Determine total sizes, and put offsets in sh_entsize. For now + * this is done generically; there doesn't appear to be any + * special cases for the architectures. + */ layout_sections(info->mod, info); layout_symtab(info->mod, info); @@ -3671,6 +3686,9 @@ static noinline int do_init_module(struct module *mod) blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); + /* Delay uevent until module has finished its init routine */ + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + /* * We need to finish all async code before the module init sequence * is done. This has potential to deadlock. For example, a newly @@ -3815,8 +3833,10 @@ static int complete_formation(struct module *mod, struct load_info *info) module_enable_nx(mod); module_enable_x(mod); - /* Mark state as coming so strong_try_module_get() ignores us, - * but kallsyms etc. can see us. */ + /* + * Mark state as coming so strong_try_module_get() ignores us, + * but kallsyms etc. can see us. + */ mod->state = MODULE_STATE_COMING; mutex_unlock(&module_mutex); @@ -3863,8 +3883,10 @@ static int unknown_module_param_cb(char *param, char *val, const char *modname, return 0; } -/* Allocate and load the module: note that size of section 0 is always - zero, and we rely on this for optional sections. */ +/* + * Allocate and load the module: note that size of section 0 is always + * zero, and we rely on this for optional sections. + */ static int load_module(struct load_info *info, const char __user *uargs, int flags) { @@ -3938,8 +3960,10 @@ static int load_module(struct load_info *info, const char __user *uargs, init_param_lock(mod); - /* Now we've got everything in the final locations, we can - * find optional sections. */ + /* + * Now we've got everything in the final locations, we can + * find optional sections. + */ err = find_module_sections(mod, info); if (err) goto free_unload; @@ -4027,6 +4051,7 @@ static int load_module(struct load_info *info, const char __user *uargs, MODULE_STATE_GOING, mod); klp_module_going(mod); bug_cleanup: + mod->state = MODULE_STATE_GOING; /* module_bug_cleanup needs module_mutex protection */ mutex_lock(&module_mutex); module_bug_cleanup(mod); @@ -4152,8 +4177,10 @@ static const char *find_kallsyms_symbol(struct module *mod, bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); - /* Scan for closest preceding symbol, and next symbol. (ELF - starts real symbols at 1). */ + /* + * Scan for closest preceding symbol, and next symbol. (ELF + * starts real symbols at 1). + */ for (i = 1; i < kallsyms->num_symtab; i++) { const Elf_Sym *sym = &kallsyms->symtab[i]; unsigned long thisval = kallsyms_symbol_value(sym); @@ -4161,8 +4188,10 @@ static const char *find_kallsyms_symbol(struct module *mod, if (sym->st_shndx == SHN_UNDEF) continue; - /* We ignore unnamed symbols: they're uninformative - * and inserted at a whim. */ + /* + * We ignore unnamed symbols: they're uninformative + * and inserted at a whim. + */ if (*kallsyms_symbol_name(kallsyms, i) == '\0' || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))) continue; @@ -4192,8 +4221,10 @@ void * __weak dereference_module_function_descriptor(struct module *mod, return ptr; } -/* For kallsyms to ask for address resolution. NULL means not found. Careful - * not to lock to avoid deadlock on oopses, simply disable preemption. */ +/* + * For kallsyms to ask for address resolution. NULL means not found. Careful + * not to lock to avoid deadlock on oopses, simply disable preemption. + */ const char *module_address_lookup(unsigned long addr, unsigned long *size, unsigned long *offset, @@ -4451,11 +4482,12 @@ static int m_show(struct seq_file *m, void *p) return 0; } -/* Format: modulename size refcount deps address - - Where refcount is a number or -, and deps is a comma-separated list - of depends or -. -*/ +/* + * Format: modulename size refcount deps address + * + * Where refcount is a number or -, and deps is a comma-separated list + * of depends or -. + */ static const struct seq_operations modules_op = { .start = m_start, .next = m_next, @@ -4525,8 +4557,8 @@ out: return e; } -/* - * is_module_address - is this address inside a module? +/** + * is_module_address() - is this address inside a module? * @addr: the address to check. * * See is_module_text_address() if you simply want to see if the address @@ -4543,8 +4575,8 @@ bool is_module_address(unsigned long addr) return ret; } -/* - * __module_address - get the module which contains an address. +/** + * __module_address() - get the module which contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that @@ -4568,8 +4600,8 @@ struct module *__module_address(unsigned long addr) return mod; } -/* - * is_module_text_address - is this address inside module code? +/** + * is_module_text_address() - is this address inside module code? * @addr: the address to check. * * See is_module_address() if you simply want to see if the address is @@ -4587,8 +4619,8 @@ bool is_module_text_address(unsigned long addr) return ret; } -/* - * __module_text_address - get the module whose code contains an address. +/** + * __module_text_address() - get the module whose code contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that @@ -4627,8 +4659,10 @@ void print_modules(void) } #ifdef CONFIG_MODVERSIONS -/* Generate the signature for all relevant module structures here. - * If these change, we don't want to try to parse the module. */ +/* + * Generate the signature for all relevant module structures here. + * If these change, we don't want to try to parse the module. + */ void module_layout(struct module *mod, struct modversion_info *ver, struct kernel_param *kp, diff --git a/kernel/params.c b/kernel/params.c index 164d79330849..2daa2780a92c 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -843,18 +843,16 @@ ssize_t __modver_version_show(struct module_attribute *mattr, return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version); } -extern const struct module_version_attribute *__start___modver[]; -extern const struct module_version_attribute *__stop___modver[]; +extern const struct module_version_attribute __start___modver[]; +extern const struct module_version_attribute __stop___modver[]; static void __init version_sysfs_builtin(void) { - const struct module_version_attribute **p; + const struct module_version_attribute *vattr; struct module_kobject *mk; int err; - for (p = __start___modver; p < __stop___modver; p++) { - const struct module_version_attribute *vattr = *p; - + for (vattr = __start___modver; vattr < __stop___modver; vattr++) { mk = locate_module_kobject(vattr->module_name); if (mk) { err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); |