summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/btf.c8
-rw-r--r--kernel/bpf/lpm_trie.c5
-rw-r--r--kernel/bpf/verifier.c10
-rw-r--r--kernel/cgroup/cgroup-v1.c4
-rw-r--r--kernel/cgroup/cpuset.c5
-rw-r--r--kernel/configs/android-recommended.config2
-rw-r--r--kernel/configs/tiny.config4
-rw-r--r--kernel/debug/kdb/kdb_main.c13
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/events/uprobes.c3
-rw-r--r--kernel/fail_function.c2
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/gcov/Kconfig17
-rw-r--r--kernel/gcov/Makefile2
-rw-r--r--kernel/kcov.c21
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/kexec_file.c2
-rw-r--r--kernel/locking/locktorture.c14
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/rcu/rcutorture.c5
-rw-r--r--kernel/relay.c5
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ftrace.c28
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_events_filter.c6
-rw-r--r--kernel/trace/tracing_map.c2
-rw-r--r--kernel/user_namespace.c5
-rw-r--r--kernel/workqueue.c2
34 files changed, 132 insertions, 91 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 8653ab004c73..2d49d18b793a 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -608,7 +608,7 @@ static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
new_size = min_t(u32, BTF_MAX_TYPE,
btf->types_size + expand_by);
- new_types = kvzalloc(new_size * sizeof(*new_types),
+ new_types = kvcalloc(new_size, sizeof(*new_types),
GFP_KERNEL | __GFP_NOWARN);
if (!new_types)
return -ENOMEM;
@@ -698,17 +698,17 @@ static int env_resolve_init(struct btf_verifier_env *env)
u8 *visit_states = NULL;
/* +1 for btf_void */
- resolved_sizes = kvzalloc((nr_types + 1) * sizeof(*resolved_sizes),
+ resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_sizes)
goto nomem;
- resolved_ids = kvzalloc((nr_types + 1) * sizeof(*resolved_ids),
+ resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_ids)
goto nomem;
- visit_states = kvzalloc((nr_types + 1) * sizeof(*visit_states),
+ visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
GFP_KERNEL | __GFP_NOWARN);
if (!visit_states)
goto nomem;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index b4b5b81e7251..1603492c9cc7 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -623,8 +623,9 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
if (!key || key->prefixlen > trie->max_prefixlen)
goto find_leftmost;
- node_stack = kmalloc(trie->max_prefixlen * sizeof(struct lpm_trie_node *),
- GFP_ATOMIC | __GFP_NOWARN);
+ node_stack = kmalloc_array(trie->max_prefixlen,
+ sizeof(struct lpm_trie_node *),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!node_stack)
return -ENOMEM;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index cced0c1e63e2..9e2bf834f13a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5206,7 +5206,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
if (cnt == 1)
return 0;
- new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
+ new_data = vzalloc(array_size(prog_len,
+ sizeof(struct bpf_insn_aux_data)));
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
@@ -5447,7 +5448,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
insn->imm = 1;
}
- func = kzalloc(sizeof(prog) * env->subprog_cnt, GFP_KERNEL);
+ func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
return -ENOMEM;
@@ -5870,8 +5871,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
return -ENOMEM;
log = &env->log;
- env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
- (*prog)->len);
+ env->insn_aux_data =
+ vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
+ (*prog)->len));
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index e06c97f3ed1a..8b4f0768efd6 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -195,9 +195,9 @@ struct cgroup_pidlist {
static void *pidlist_allocate(int count)
{
if (PIDLIST_TOO_LARGE(count))
- return vmalloc(count * sizeof(pid_t));
+ return vmalloc(array_size(count, sizeof(pid_t)));
else
- return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
+ return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
}
static void pidlist_free(void *p)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index b42037e6e81d..d8b12e0d39cd 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -683,7 +683,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
goto done;
}
- csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
+ csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
@@ -753,7 +753,8 @@ restart:
* The rest of the code, including the scheduler, can deal with
* dattr==NULL case. No need to abort if alloc fails.
*/
- dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
+ dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
+ GFP_KERNEL);
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index 946fb92418f7..81e9af7dcec2 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -12,7 +12,7 @@ CONFIG_BLK_DEV_DM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_STACKPROTECTOR_STRONG=y
CONFIG_COMPACTION=y
CONFIG_CPU_SW_DOMAIN_PAN=y
CONFIG_DM_CRYPT=y
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
index 9bfdffc100da..7fa0c4ae6394 100644
--- a/kernel/configs/tiny.config
+++ b/kernel/configs/tiny.config
@@ -10,7 +10,3 @@ CONFIG_OPTIMIZE_INLINING=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
-CONFIG_CC_STACKPROTECTOR_NONE=y
-# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
-# CONFIG_CC_STACKPROTECTOR_STRONG is not set
-# CONFIG_CC_STACKPROTECTOR_AUTO is not set
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index e405677ee08d..2ddfce8f1e8f 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -691,7 +691,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
}
if (!s->usable)
return KDB_NOTIMP;
- s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
+ s->command = kcalloc(s->count + 1, sizeof(*(s->command)), GFP_KDB);
if (!s->command) {
kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
cmdstr);
@@ -729,8 +729,8 @@ static int kdb_defcmd(int argc, const char **argv)
kdb_printf("Command only available during kdb_init()\n");
return KDB_NOTIMP;
}
- defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
- GFP_KDB);
+ defcmd_set = kmalloc_array(defcmd_set_count + 1, sizeof(*defcmd_set),
+ GFP_KDB);
if (!defcmd_set)
goto fail_defcmd;
memcpy(defcmd_set, save_defcmd_set,
@@ -2706,8 +2706,11 @@ int kdb_register_flags(char *cmd,
}
if (i >= kdb_max_commands) {
- kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
- kdb_command_extend) * sizeof(*new), GFP_KDB);
+ kdbtab_t *new = kmalloc_array(kdb_max_commands -
+ KDB_BASE_CMD_MAX +
+ kdb_command_extend,
+ sizeof(*new),
+ GFP_KDB);
if (!new) {
kdb_printf("Could not allocate new kdb_command "
"table\n");
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 1d8ca9ea9979..045a37e9ddee 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -614,7 +614,8 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
}
}
- rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
+ rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
+ node);
if (!rb->aux_pages)
return -ENOMEM;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 1725b902983f..ccc579a7d32e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1184,7 +1184,8 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
if (unlikely(!area))
goto out;
- area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
+ area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
+ GFP_KERNEL);
if (!area->bitmap)
goto free_area;
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 1d5632d8bbcc..5349c91c2298 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -258,7 +258,7 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
/* cut off if it is too long */
if (count > KSYM_NAME_LEN)
count = KSYM_NAME_LEN;
- buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+ buf = kmalloc(count + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/kernel/fork.c b/kernel/fork.c
index 08c6e5e217a0..9440d61b925c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -440,6 +440,14 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
continue;
}
charge = 0;
+ /*
+ * Don't duplicate many vmas if we've been oom-killed (for
+ * example)
+ */
+ if (fatal_signal_pending(current)) {
+ retval = -EINTR;
+ goto out;
+ }
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned long len = vma_pages(mpnt);
@@ -811,7 +819,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
clear_tsk_need_resched(tsk);
set_task_stack_end_magic(tsk);
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 1276aabaab55..1e3823fa799b 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -53,23 +53,16 @@ config GCOV_PROFILE_ALL
choice
prompt "Specify GCOV format"
depends on GCOV_KERNEL
- default GCOV_FORMAT_AUTODETECT
---help---
- The gcov format is usually determined by the GCC version, but there are
+ The gcov format is usually determined by the GCC version, and the
+ default is chosen according to your GCC version. However, there are
exceptions where format changes are integrated in lower-version GCCs.
- In such a case use this option to adjust the format used in the kernel
- accordingly.
-
- If unsure, choose "Autodetect".
-
-config GCOV_FORMAT_AUTODETECT
- bool "Autodetect"
- ---help---
- Select this option to use the format that corresponds to your GCC
- version.
+ In such a case, change this option to adjust the format used in the
+ kernel accordingly.
config GCOV_FORMAT_3_4
bool "GCC 3.4 format"
+ depends on CC_IS_GCC && GCC_VERSION < 40700
---help---
Select this option to use the format defined by GCC 3.4.
diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile
index c6c50e5c680e..ff06d64df397 100644
--- a/kernel/gcov/Makefile
+++ b/kernel/gcov/Makefile
@@ -4,5 +4,3 @@ ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
obj-y := base.o fs.o
obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o
obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o
-obj-$(CONFIG_GCOV_FORMAT_AUTODETECT) += $(call cc-ifversion, -lt, 0407, \
- gcc_3_4.o, gcc_4_7.o)
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 2c16f1ab5e10..3ebd09efe72a 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -58,7 +58,7 @@ struct kcov {
static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
- enum kcov_mode mode;
+ unsigned int mode;
/*
* We are interested in code coverage as a function of a syscall inputs,
@@ -241,7 +241,8 @@ static void kcov_put(struct kcov *kcov)
void kcov_task_init(struct task_struct *t)
{
- t->kcov_mode = KCOV_MODE_DISABLED;
+ WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
+ barrier();
t->kcov_size = 0;
t->kcov_area = NULL;
t->kcov = NULL;
@@ -323,6 +324,21 @@ static int kcov_close(struct inode *inode, struct file *filep)
return 0;
}
+/*
+ * Fault in a lazily-faulted vmalloc area before it can be used by
+ * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
+ * vmalloc fault handling path is instrumented.
+ */
+static void kcov_fault_in_area(struct kcov *kcov)
+{
+ unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
+ unsigned long *area = kcov->area;
+ unsigned long offset;
+
+ for (offset = 0; offset < kcov->size; offset += stride)
+ READ_ONCE(area[offset]);
+}
+
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
{
@@ -371,6 +387,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
#endif
else
return -EINVAL;
+ kcov_fault_in_area(kcov);
/* Cache in task struct for performance. */
t->kcov_size = kcov->size;
t->kcov_area = kcov->area;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 20fef1a38602..23a83a4da38a 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -829,6 +829,8 @@ static int kimage_load_normal_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
+
+ cond_resched();
}
out:
return result;
@@ -893,6 +895,8 @@ static int kimage_load_crash_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
+
+ cond_resched();
}
out:
return result;
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 75d8e7cf040e..c6a3b6851372 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -793,7 +793,7 @@ static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
* The section headers in kexec_purgatory are read-only. In order to
* have them modifiable make a temporary copy.
*/
- sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
+ sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
if (!sechdrs)
return -ENOMEM;
memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 6850ffd69125..8402b3349dca 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -913,7 +913,9 @@ static int __init lock_torture_init(void)
/* Initialize the statistics so that each run gets its own numbers. */
if (nwriters_stress) {
lock_is_write_held = 0;
- cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
+ sizeof(*cxt.lwsa),
+ GFP_KERNEL);
if (cxt.lwsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
@@ -942,7 +944,9 @@ static int __init lock_torture_init(void)
if (nreaders_stress) {
lock_is_read_held = 0;
- cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
+ sizeof(*cxt.lrsa),
+ GFP_KERNEL);
if (cxt.lrsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
firsterr = -ENOMEM;
@@ -985,7 +989,8 @@ static int __init lock_torture_init(void)
}
if (nwriters_stress) {
- writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
+ writer_tasks = kcalloc(cxt.nrealwriters_stress,
+ sizeof(writer_tasks[0]),
GFP_KERNEL);
if (writer_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
@@ -995,7 +1000,8 @@ static int __init lock_torture_init(void)
}
if (cxt.cur_ops->readlock) {
- reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
+ reader_tasks = kcalloc(cxt.nrealreaders_stress,
+ sizeof(reader_tasks[0]),
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
diff --git a/kernel/module.c b/kernel/module.c
index 68469b37d61a..f475f30eed8c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -274,9 +274,7 @@ static void module_assert_mutex_or_preempt(void)
}
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
-#ifndef CONFIG_MODULE_SIG_FORCE
module_param(sig_enforce, bool_enable_only, 0644);
-#endif /* !CONFIG_MODULE_SIG_FORCE */
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
@@ -2785,7 +2783,7 @@ static int module_sig_check(struct load_info *info, int flags)
}
/* Not having a signature is only an error if we're strict. */
- if (err == -ENOKEY && !sig_enforce)
+ if (err == -ENOKEY && !is_module_sig_enforced())
err = 0;
return err;
diff --git a/kernel/panic.c b/kernel/panic.c
index 42e487488554..8b2e002d52eb 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -623,7 +623,7 @@ static __init int register_warn_debugfs(void)
device_initcall(register_warn_debugfs);
#endif
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
/*
* Called when gcc's -fstack-protector feature is used, and
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 1efcb5b0c3ed..c2bcf97d24c8 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -698,7 +698,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
goto out_clean;
}
- data = vmalloc(sizeof(*data) * nr_threads);
+ data = vmalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
@@ -1183,14 +1183,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
- page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
+ page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
if (!page) {
pr_err("Failed to allocate LZO page\n");
ret = -ENOMEM;
goto out_clean;
}
- data = vmalloc(sizeof(*data) * nr_threads);
+ data = vmalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index e628fcfd1bde..42fcb7f05fac 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -831,8 +831,9 @@ rcu_torture_cbflood(void *arg)
cbflood_intra_holdoff > 0 &&
cur_ops->call &&
cur_ops->cb_barrier) {
- rhp = vmalloc(sizeof(*rhp) *
- cbflood_n_burst * cbflood_n_per_burst);
+ rhp = vmalloc(array3_size(cbflood_n_burst,
+ cbflood_n_per_burst,
+ sizeof(*rhp)));
err = !rhp;
}
if (err) {
diff --git a/kernel/relay.c b/kernel/relay.c
index c955b10c973c..04f248644e06 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -39,7 +39,7 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
/*
* fault() vm_op implementation for relay file mapping.
*/
-static int relay_buf_fault(struct vm_fault *vmf)
+static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
{
struct page *page;
struct rchan_buf *buf = vmf->vma->vm_private_data;
@@ -169,7 +169,8 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
if (!buf)
return NULL;
- buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
+ buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
+ GFP_KERNEL);
if (!buf->padding)
goto free_buf;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a98d54cd5535..78d8facba456 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10,6 +10,8 @@
#include <linux/kthread.h>
#include <linux/nospec.h>
+#include <linux/kcov.h>
+
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -2633,6 +2635,7 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
+ kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
rseq_preempt(prev);
@@ -2702,6 +2705,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
finish_task(prev);
finish_lock_switch(rq);
finish_arch_post_lock_switch();
+ kcov_finish_switch(current);
fire_sched_in_preempt_notifiers(current);
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e497c05aab7f..1866e64792a7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10215,10 +10215,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
struct cfs_rq *cfs_rq;
int i;
- tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
+ tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
- tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
+ tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
if (!tg->se)
goto err;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ef3c4e6f5345..47556b0c9a95 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -183,10 +183,10 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
struct sched_rt_entity *rt_se;
int i;
- tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
+ tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
if (!tg->rt_rq)
goto err;
- tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
+ tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
if (!tg->rt_se)
goto err;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 61a1125c1ae4..05a831427bc7 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1750,7 +1750,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
int i;
cpumask_var_t *doms;
- doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
+ doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6a78cf70761d..2d9837c0aff4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -3047,7 +3047,8 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
- tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long),
+ tmp_bitmap = kcalloc(BITS_TO_LONGS(bitmap_len),
+ sizeof(unsigned long),
GFP_KERNEL);
if (!tmp_bitmap) {
kfree(kbuf);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8d83bcf9ef69..efed9c1cfb7e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -728,7 +728,7 @@ static int ftrace_profile_init_cpu(int cpu)
*/
size = FTRACE_PROFILE_HASH_SIZE;
- stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
+ stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
if (!stat->hash)
return -ENOMEM;
@@ -6830,9 +6830,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
struct task_struct *g, *t;
for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
- ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack_list[i] =
+ kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack_list[i]) {
start = 0;
end = i;
@@ -6904,9 +6905,9 @@ static int start_graph_tracing(void)
struct ftrace_ret_stack **ret_stack_list;
int ret, cpu;
- ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
- sizeof(struct ftrace_ret_stack *),
- GFP_KERNEL);
+ ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
+ sizeof(struct ftrace_ret_stack *),
+ GFP_KERNEL);
if (!ret_stack_list)
return -ENOMEM;
@@ -7088,9 +7089,10 @@ void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
ret_stack = per_cpu(idle_ret_stack, cpu);
if (!ret_stack) {
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack =
+ kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack)
return;
per_cpu(idle_ret_stack, cpu) = ret_stack;
@@ -7109,9 +7111,9 @@ void ftrace_graph_init_task(struct task_struct *t)
if (ftrace_graph_active) {
struct ftrace_ret_stack *ret_stack;
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack)
return;
graph_init_task(t, ret_stack);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 108ce3e1dc13..c9336e98ac59 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1751,12 +1751,13 @@ static inline void set_cmdline(int idx, const char *cmdline)
static int allocate_cmdlines_buffer(unsigned int val,
struct saved_cmdlines_buffer *s)
{
- s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
- GFP_KERNEL);
+ s->map_cmdline_to_pid = kmalloc_array(val,
+ sizeof(*s->map_cmdline_to_pid),
+ GFP_KERNEL);
if (!s->map_cmdline_to_pid)
return -ENOMEM;
- s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
+ s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
if (!s->saved_cmdlines) {
kfree(s->map_cmdline_to_pid);
return -ENOMEM;
@@ -4360,7 +4361,8 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (mask == TRACE_ITER_RECORD_TGID) {
if (!tgid_map)
- tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
+ tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
+ sizeof(*tgid_map),
GFP_KERNEL);
if (!tgid_map) {
tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
@@ -5063,7 +5065,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
* where the head holds the module and length of array, and the
* tail holds a pointer to the next list.
*/
- map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
+ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
if (!map_array) {
pr_warn("Unable to allocate trace eval mapping\n");
return;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0171407d231f..e1c818dbc0d7 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -436,15 +436,15 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
nr_preds += 2; /* For TRUE and FALSE */
- op_stack = kmalloc(sizeof(*op_stack) * nr_parens, GFP_KERNEL);
+ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL);
if (!op_stack)
return ERR_PTR(-ENOMEM);
- prog_stack = kmalloc(sizeof(*prog_stack) * nr_preds, GFP_KERNEL);
+ prog_stack = kmalloc_array(nr_preds, sizeof(*prog_stack), GFP_KERNEL);
if (!prog_stack) {
parse_error(pe, -ENOMEM, 0);
goto out_free;
}
- inverts = kmalloc(sizeof(*inverts) * nr_preds, GFP_KERNEL);
+ inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL);
if (!inverts) {
parse_error(pe, -ENOMEM, 0);
goto out_free;
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 5cadb1b8b5fe..752d8042bad4 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -1075,7 +1075,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
struct tracing_map_sort_entry *sort_entry, **entries;
int i, n_entries, ret;
- entries = vmalloc(map->max_elts * sizeof(sort_entry));
+ entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts));
if (!entries)
return -ENOMEM;
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 492c255e6c5a..c3d7583fcd21 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -764,8 +764,9 @@ static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
struct uid_gid_extent *forward;
/* Allocate memory for 340 mappings. */
- forward = kmalloc(sizeof(struct uid_gid_extent) *
- UID_GID_MAP_MAX_EXTENTS, GFP_KERNEL);
+ forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
+ sizeof(struct uid_gid_extent),
+ GFP_KERNEL);
if (!forward)
return -ENOMEM;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 465a28b4cd32..78b192071ef7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5638,7 +5638,7 @@ static void __init wq_numa_init(void)
* available. Build one from cpu_to_node() which should have been
* fully initialized by now.
*/
- tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
+ tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
BUG_ON(!tbl);
for_each_node(node)