summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 17:11:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 17:11:27 -0700
commit0b86c75db6e7f68c22ac5d0dae0f551c4897cdf5 (patch)
treed1be280c331fbd85c021b5686914d2cc21475f54 /kernel
parent16bf8348055fe4615bd08ef50f9874f5dcc10268 (diff)
parentbe69f70e6395a4ba9c178b2531433547e1955195 (diff)
downloadlinux-0b86c75db6e7f68c22ac5d0dae0f551c4897cdf5.tar.bz2
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching
Pull livepatching updates from Jiri Kosina: - remove of our own implementation of architecture-specific relocation code and leveraging existing code in the module loader to perform arch-dependent work, from Jessica Yu. The relevant patches have been acked by Rusty (for module.c) and Heiko (for s390). - live patching support for ppc64le, which is a joint work of Michael Ellerman and Torsten Duwe. This is coming from topic branch that is share between livepatching.git and ppc tree. - addition of livepatching documentation from Petr Mladek * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching: livepatch: make object/func-walking helpers more robust livepatch: Add some basic livepatch documentation powerpc/livepatch: Add live patching support on ppc64le powerpc/livepatch: Add livepatch stack to struct thread_info powerpc/livepatch: Add livepatch header livepatch: Allow architectures to specify an alternate ftrace location ftrace: Make ftrace_location_range() global livepatch: robustify klp_register_patch() API error checking Documentation: livepatch: outline Elf format and requirements for patch modules livepatch: reuse module loader code to write relocations module: s390: keep mod_arch_specific for livepatch modules module: preserve Elf information for livepatch modules Elf: add livepatch-specific Elf constants
Diffstat (limited to 'kernel')
-rw-r--r--kernel/livepatch/core.c191
-rw-r--r--kernel/module.c125
-rw-r--r--kernel/trace/ftrace.c14
3 files changed, 266 insertions, 64 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index d68fbf63b083..5c2bc1052691 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -28,6 +28,8 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/livepatch.h>
+#include <linux/elf.h>
+#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
/**
@@ -204,75 +206,109 @@ static int klp_find_object_symbol(const char *objname, const char *name,
return -EINVAL;
}
-/*
- * external symbols are located outside the parent object (where the parent
- * object is either vmlinux or the kmod being patched).
- */
-static int klp_find_external_symbol(struct module *pmod, const char *name,
- unsigned long *addr)
+static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
{
- const struct kernel_symbol *sym;
-
- /* first, check if it's an exported symbol */
- preempt_disable();
- sym = find_symbol(name, NULL, NULL, true, true);
- if (sym) {
- *addr = sym->value;
- preempt_enable();
- return 0;
- }
- preempt_enable();
+ int i, cnt, vmlinux, ret;
+ char objname[MODULE_NAME_LEN];
+ char symname[KSYM_NAME_LEN];
+ char *strtab = pmod->core_kallsyms.strtab;
+ Elf_Rela *relas;
+ Elf_Sym *sym;
+ unsigned long sympos, addr;
/*
- * Check if it's in another .o within the patch module. This also
- * checks that the external symbol is unique.
+ * Since the field widths for objname and symname in the sscanf()
+ * call are hard-coded and correspond to MODULE_NAME_LEN and
+ * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
+ * and KSYM_NAME_LEN have the values we expect them to have.
+ *
+ * Because the value of MODULE_NAME_LEN can differ among architectures,
+ * we use the smallest/strictest upper bound possible (56, based on
+ * the current definition of MODULE_NAME_LEN) to prevent overflows.
*/
- return klp_find_object_symbol(pmod->name, name, 0, addr);
+ BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
+
+ relas = (Elf_Rela *) relasec->sh_addr;
+ /* For each rela in this klp relocation section */
+ for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
+ sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
+ if (sym->st_shndx != SHN_LIVEPATCH) {
+ pr_err("symbol %s is not marked as a livepatch symbol",
+ strtab + sym->st_name);
+ return -EINVAL;
+ }
+
+ /* Format: .klp.sym.objname.symname,sympos */
+ cnt = sscanf(strtab + sym->st_name,
+ ".klp.sym.%55[^.].%127[^,],%lu",
+ objname, symname, &sympos);
+ if (cnt != 3) {
+ pr_err("symbol %s has an incorrectly formatted name",
+ strtab + sym->st_name);
+ return -EINVAL;
+ }
+
+ /* klp_find_object_symbol() treats a NULL objname as vmlinux */
+ vmlinux = !strcmp(objname, "vmlinux");
+ ret = klp_find_object_symbol(vmlinux ? NULL : objname,
+ symname, sympos, &addr);
+ if (ret)
+ return ret;
+
+ sym->st_value = addr;
+ }
+
+ return 0;
}
static int klp_write_object_relocations(struct module *pmod,
struct klp_object *obj)
{
- int ret = 0;
- unsigned long val;
- struct klp_reloc *reloc;
+ int i, cnt, ret = 0;
+ const char *objname, *secname;
+ char sec_objname[MODULE_NAME_LEN];
+ Elf_Shdr *sec;
if (WARN_ON(!klp_is_object_loaded(obj)))
return -EINVAL;
- if (WARN_ON(!obj->relocs))
- return -EINVAL;
+ objname = klp_is_module(obj) ? obj->name : "vmlinux";
module_disable_ro(pmod);
+ /* For each klp relocation section */
+ for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
+ sec = pmod->klp_info->sechdrs + i;
+ secname = pmod->klp_info->secstrings + sec->sh_name;
+ if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
+ continue;
- for (reloc = obj->relocs; reloc->name; reloc++) {
- /* discover the address of the referenced symbol */
- if (reloc->external) {
- if (reloc->sympos > 0) {
- pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n",
- reloc->name);
- ret = -EINVAL;
- goto out;
- }
- ret = klp_find_external_symbol(pmod, reloc->name, &val);
- } else
- ret = klp_find_object_symbol(obj->name,
- reloc->name,
- reloc->sympos,
- &val);
+ /*
+ * Format: .klp.rela.sec_objname.section_name
+ * See comment in klp_resolve_symbols() for an explanation
+ * of the selected field width value.
+ */
+ cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
+ if (cnt != 1) {
+ pr_err("section %s has an incorrectly formatted name",
+ secname);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (strcmp(objname, sec_objname))
+ continue;
+
+ ret = klp_resolve_symbols(sec, pmod);
if (ret)
- goto out;
+ break;
- ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
- val + reloc->addend);
- if (ret) {
- pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
- reloc->name, val, ret);
- goto out;
- }
+ ret = apply_relocate_add(pmod->klp_info->sechdrs,
+ pmod->core_kallsyms.strtab,
+ pmod->klp_info->symndx, i, pmod);
+ if (ret)
+ break;
}
-out:
module_enable_ro(pmod);
return ret;
}
@@ -298,6 +334,19 @@ unlock:
rcu_read_unlock();
}
+/*
+ * Convert a function address into the appropriate ftrace location.
+ *
+ * Usually this is just the address of the function, but on some architectures
+ * it's more complicated so allow them to provide a custom behaviour.
+ */
+#ifndef klp_get_ftrace_location
+static unsigned long klp_get_ftrace_location(unsigned long faddr)
+{
+ return faddr;
+}
+#endif
+
static void klp_disable_func(struct klp_func *func)
{
struct klp_ops *ops;
@@ -312,8 +361,14 @@ static void klp_disable_func(struct klp_func *func)
return;
if (list_is_singular(&ops->func_stack)) {
+ unsigned long ftrace_loc;
+
+ ftrace_loc = klp_get_ftrace_location(func->old_addr);
+ if (WARN_ON(!ftrace_loc))
+ return;
+
WARN_ON(unregister_ftrace_function(&ops->fops));
- WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
+ WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
list_del_rcu(&func->stack_node);
list_del(&ops->node);
@@ -338,6 +393,15 @@ static int klp_enable_func(struct klp_func *func)
ops = klp_find_ops(func->old_addr);
if (!ops) {
+ unsigned long ftrace_loc;
+
+ ftrace_loc = klp_get_ftrace_location(func->old_addr);
+ if (!ftrace_loc) {
+ pr_err("failed to find location for function '%s'\n",
+ func->old_name);
+ return -EINVAL;
+ }
+
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
@@ -352,7 +416,7 @@ static int klp_enable_func(struct klp_func *func)
INIT_LIST_HEAD(&ops->func_stack);
list_add_rcu(&func->stack_node, &ops->func_stack);
- ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
+ ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
if (ret) {
pr_err("failed to set ftrace filter for function '%s' (%d)\n",
func->old_name, ret);
@@ -363,7 +427,7 @@ static int klp_enable_func(struct klp_func *func)
if (ret) {
pr_err("failed to register ftrace handler for function '%s' (%d)\n",
func->old_name, ret);
- ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+ ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
goto err;
}
@@ -683,6 +747,9 @@ static void klp_free_patch(struct klp_patch *patch)
static int klp_init_func(struct klp_object *obj, struct klp_func *func)
{
+ if (!func->old_name || !func->new_func)
+ return -EINVAL;
+
INIT_LIST_HEAD(&func->stack_node);
func->state = KLP_DISABLED;
@@ -703,11 +770,9 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func;
int ret;
- if (obj->relocs) {
- ret = klp_write_object_relocations(patch->mod, obj);
- if (ret)
- return ret;
- }
+ ret = klp_write_object_relocations(patch->mod, obj);
+ if (ret)
+ return ret;
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
@@ -842,12 +907,18 @@ int klp_register_patch(struct klp_patch *patch)
{
int ret;
- if (!klp_initialized())
- return -ENODEV;
-
if (!patch || !patch->mod)
return -EINVAL;
+ if (!is_livepatch_module(patch->mod)) {
+ pr_err("module %s is not marked as a livepatch module",
+ patch->mod->name);
+ return -EINVAL;
+ }
+
+ if (!klp_initialized())
+ return -ENODEV;
+
/*
* A reference is taken on the patch module to prevent it from being
* unloaded. Right now, we don't allow patch modules to unload since
diff --git a/kernel/module.c b/kernel/module.c
index 041200ca4a2d..5f71aa63ed2a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1973,6 +1973,83 @@ static void module_enable_nx(const struct module *mod) { }
static void module_disable_nx(const struct module *mod) { }
#endif
+#ifdef CONFIG_LIVEPATCH
+/*
+ * Persist Elf information about a module. Copy the Elf header,
+ * section header table, section string table, and symtab section
+ * index from info to mod->klp_info.
+ */
+static int copy_module_elf(struct module *mod, struct load_info *info)
+{
+ unsigned int size, symndx;
+ int ret;
+
+ size = sizeof(*mod->klp_info);
+ mod->klp_info = kmalloc(size, GFP_KERNEL);
+ if (mod->klp_info == NULL)
+ return -ENOMEM;
+
+ /* Elf header */
+ size = sizeof(mod->klp_info->hdr);
+ memcpy(&mod->klp_info->hdr, info->hdr, size);
+
+ /* Elf section header table */
+ size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
+ mod->klp_info->sechdrs = kmalloc(size, GFP_KERNEL);
+ if (mod->klp_info->sechdrs == NULL) {
+ ret = -ENOMEM;
+ goto free_info;
+ }
+ memcpy(mod->klp_info->sechdrs, info->sechdrs, size);
+
+ /* Elf section name string table */
+ size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
+ mod->klp_info->secstrings = kmalloc(size, GFP_KERNEL);
+ if (mod->klp_info->secstrings == NULL) {
+ ret = -ENOMEM;
+ goto free_sechdrs;
+ }
+ memcpy(mod->klp_info->secstrings, info->secstrings, size);
+
+ /* Elf symbol section index */
+ symndx = info->index.sym;
+ mod->klp_info->symndx = symndx;
+
+ /*
+ * For livepatch modules, core_kallsyms.symtab is a complete
+ * copy of the original symbol table. Adjust sh_addr to point
+ * to core_kallsyms.symtab since the copy of the symtab in module
+ * init memory is freed at the end of do_init_module().
+ */
+ mod->klp_info->sechdrs[symndx].sh_addr = \
+ (unsigned long) mod->core_kallsyms.symtab;
+
+ return 0;
+
+free_sechdrs:
+ kfree(mod->klp_info->sechdrs);
+free_info:
+ kfree(mod->klp_info);
+ return ret;
+}
+
+static void free_module_elf(struct module *mod)
+{
+ kfree(mod->klp_info->sechdrs);
+ kfree(mod->klp_info->secstrings);
+ kfree(mod->klp_info);
+}
+#else /* !CONFIG_LIVEPATCH */
+static int copy_module_elf(struct module *mod, struct load_info *info)
+{
+ return 0;
+}
+
+static void free_module_elf(struct module *mod)
+{
+}
+#endif /* CONFIG_LIVEPATCH */
+
void __weak module_memfree(void *module_region)
{
vfree(module_region);
@@ -2011,6 +2088,9 @@ static void free_module(struct module *mod)
/* Free any allocated parameters. */
destroy_params(mod->kp, mod->num_kp);
+ if (is_livepatch_module(mod))
+ free_module_elf(mod);
+
/* Now we can delete it from the lists */
mutex_lock(&module_mutex);
/* Unlink carefully: kallsyms could be walking list. */
@@ -2126,6 +2206,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
(long)sym[i].st_value);
break;
+ case SHN_LIVEPATCH:
+ /* Livepatch symbols are resolved by livepatch */
+ break;
+
case SHN_UNDEF:
ksym = resolve_symbol_wait(mod, info, name);
/* Ok if resolved. */
@@ -2174,6 +2258,10 @@ static int apply_relocations(struct module *mod, const struct load_info *info)
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue;
+ /* Livepatch relocation sections are applied by livepatch */
+ if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
+ continue;
+
if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod);
@@ -2469,7 +2557,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
/* Compute total space required for the core symbols' strtab. */
for (ndst = i = 0; i < nsrc; i++) {
- if (i == 0 ||
+ if (i == 0 || is_livepatch_module(mod) ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) {
strtab_size += strlen(&info->strtab[src[i].st_name])+1;
@@ -2528,7 +2616,7 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
src = mod->kallsyms->symtab;
for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
- if (i == 0 ||
+ if (i == 0 || is_livepatch_module(mod) ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) {
dst[ndst] = src[i];
@@ -2667,6 +2755,26 @@ static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned l
return 0;
}
+#ifdef CONFIG_LIVEPATCH
+static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
+{
+ mod->klp = get_modinfo(info, "livepatch") ? true : false;
+
+ return 0;
+}
+#else /* !CONFIG_LIVEPATCH */
+static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
+{
+ if (get_modinfo(info, "livepatch")) {
+ pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
+ mod->name);
+ return -ENOEXEC;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_LIVEPATCH */
+
/* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len,
struct load_info *info)
@@ -2821,6 +2929,10 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
"is unknown, you have been warned.\n", mod->name);
}
+ err = find_livepatch_modinfo(mod, info);
+ if (err)
+ return err;
+
/* Set up license info based on the info section */
set_license(mod, get_modinfo(info, "license"));
@@ -3494,6 +3606,12 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (err < 0)
goto coming_cleanup;
+ if (is_livepatch_module(mod)) {
+ err = copy_module_elf(mod, info);
+ if (err < 0)
+ goto sysfs_cleanup;
+ }
+
/* Get rid of temporary copy. */
free_copy(info);
@@ -3502,11 +3620,12 @@ static int load_module(struct load_info *info, const char __user *uargs,
return do_init_module(mod);
+ sysfs_cleanup:
+ mod_sysfs_teardown(mod);
coming_cleanup:
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
-
bug_cleanup:
/* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b1870fbd2b67..7e8d792da963 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1530,7 +1530,19 @@ static int ftrace_cmp_recs(const void *a, const void *b)
return 0;
}
-static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+/**
+ * ftrace_location_range - return the first address of a traced location
+ * if it touches the given ip range
+ * @start: start of range to search.
+ * @end: end of range to search (inclusive). @end points to the last byte
+ * to check.
+ *
+ * Returns rec->ip if the related ftrace location is a least partly within
+ * the given address range. That is, the first address of the instruction
+ * that is either a NOP or call to the function tracer. It checks the ftrace
+ * internal tables to determine if the address belongs or not.
+ */
+unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;