summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorStefan Schmidt <stefan@datenfreihafen.org>2021-02-24 13:36:58 +0100
committerStefan Schmidt <stefan@datenfreihafen.org>2021-02-24 13:36:58 +0100
commitcdd38c5f1ce4398ec58fec95904b75824daab7b5 (patch)
tree639cf51fe8ee120a13e61b13d448aeaf4d044c74 /kernel
parent04052a318fb93491f1f3b4d282cb806f588e9326 (diff)
parentfcb3007371e1a4afb03280af1b336a83287fe115 (diff)
downloadlinux-cdd38c5f1ce4398ec58fec95904b75824daab7b5.tar.bz2
Merge remote-tracking branch 'net/master'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt19
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c7
-rw-r--r--kernel/audit.c9
-rw-r--r--kernel/audit_fsnotify.c2
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/auditsc.c42
-rw-r--r--kernel/bpf/Makefile7
-rw-r--r--kernel/bpf/arraymap.c30
-rw-r--r--kernel/bpf/bpf_inode_storage.c15
-rw-r--r--kernel/bpf/bpf_iter.c16
-rw-r--r--kernel/bpf/bpf_local_storage.c20
-rw-r--r--kernel/bpf/bpf_lru_list.c7
-rw-r--r--kernel/bpf/bpf_lsm.c162
-rw-r--r--kernel/bpf/bpf_struct_ops.c19
-rw-r--r--kernel/bpf/bpf_task_storage.c318
-rw-r--r--kernel/bpf/btf.c586
-rw-r--r--kernel/bpf/cgroup.c130
-rw-r--r--kernel/bpf/core.c133
-rw-r--r--kernel/bpf/cpumap.c83
-rw-r--r--kernel/bpf/devmap.c29
-rw-r--r--kernel/bpf/disasm.c43
-rw-r--r--kernel/bpf/hashtab.c224
-rw-r--r--kernel/bpf/helpers.c31
-rw-r--r--kernel/bpf/local_storage.c44
-rw-r--r--kernel/bpf/lpm_trie.c19
-rw-r--r--kernel/bpf/preload/Kconfig1
-rw-r--r--kernel/bpf/preload/Makefile5
-rw-r--r--kernel/bpf/preload/iterators/iterators.c2
-rw-r--r--kernel/bpf/queue_stack_maps.c16
-rw-r--r--kernel/bpf/reuseport_array.c12
-rw-r--r--kernel/bpf/ringbuf.c35
-rw-r--r--kernel/bpf/stackmap.c163
-rw-r--r--kernel/bpf/syscall.c359
-rw-r--r--kernel/bpf/sysfs_btf.c2
-rw-r--r--kernel/bpf/task_iter.c355
-rw-r--r--kernel/bpf/trampoline.c77
-rw-r--r--kernel/bpf/verifier.c1597
-rw-r--r--kernel/cgroup/cgroup-v1.c5
-rw-r--r--kernel/cgroup/cgroup.c48
-rw-r--r--kernel/cgroup/cpuset.c33
-rw-r--r--kernel/cgroup/namespace.c2
-rw-r--r--kernel/configs/android-recommended.config2
-rw-r--r--kernel/cpu.c22
-rw-r--r--kernel/crash_core.c3
-rw-r--r--kernel/debug/debug_core.c6
-rw-r--r--kernel/dma/Kconfig18
-rw-r--r--kernel/dma/Makefile2
-rw-r--r--kernel/dma/contiguous.c2
-rw-r--r--kernel/dma/direct.c1
-rw-r--r--kernel/dma/map_benchmark.c365
-rw-r--r--kernel/dma/mapping.c12
-rw-r--r--kernel/dma/pool.c3
-rw-r--r--kernel/dma/swiotlb.c42
-rw-r--r--kernel/dma/virt.c61
-rw-r--r--kernel/elfcore.c26
-rw-r--r--kernel/entry/Makefile2
-rw-r--r--kernel/entry/common.c217
-rw-r--r--kernel/entry/common.h7
-rw-r--r--kernel/entry/kvm.c3
-rw-r--r--kernel/entry/syscall_user_dispatch.c104
-rw-r--r--kernel/events/core.c473
-rw-r--r--kernel/events/internal.h16
-rw-r--r--kernel/events/ring_buffer.c20
-rw-r--r--kernel/events/uprobes.c82
-rw-r--r--kernel/exit.c23
-rw-r--r--kernel/fail_function.c11
-rw-r--r--kernel/fork.c62
-rw-r--r--kernel/futex.c259
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/gcov/gcc_4_7.c10
-rw-r--r--kernel/hung_task.c3
-rw-r--r--kernel/irq/Kconfig6
-rw-r--r--kernel/irq/chip.c29
-rw-r--r--kernel/irq/generic-chip.c2
-rw-r--r--kernel/irq/irqdesc.c96
-rw-r--r--kernel/irq/irqdomain.c58
-rw-r--r--kernel/irq/manage.c108
-rw-r--r--kernel/irq/msi.c46
-rw-r--r--kernel/irq/proc.c5
-rw-r--r--kernel/irq/resend.c4
-rw-r--r--kernel/irq_work.c45
-rw-r--r--kernel/jump_label.c8
-rw-r--r--kernel/kcmp.c59
-rw-r--r--kernel/kcov.c2
-rw-r--r--kernel/kcsan/core.c26
-rw-r--r--kernel/kcsan/encoding.h20
-rw-r--r--kernel/kcsan/selftest.c3
-rw-r--r--kernel/kexec_core.c5
-rw-r--r--kernel/kexec_file.c2
-rw-r--r--kernel/kprobes.c333
-rw-r--r--kernel/kthread.c82
-rw-r--r--kernel/livepatch/Kconfig2
-rw-r--r--kernel/livepatch/patch.c15
-rw-r--r--kernel/locking/Makefile1
-rw-r--r--kernel/locking/irqflag-debug.c13
-rw-r--r--kernel/locking/lock_events_list.h6
-rw-r--r--kernel/locking/lockdep.c240
-rw-r--r--kernel/locking/locktorture.c37
-rw-r--r--kernel/locking/mutex.c10
-rw-r--r--kernel/locking/rtmutex.c74
-rw-r--r--kernel/locking/rtmutex_common.h3
-rw-r--r--kernel/locking/rwsem.c383
-rw-r--r--kernel/locking/rwsem.h0
-rw-r--r--kernel/module.c236
-rw-r--r--kernel/nsproxy.c13
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/params.c12
-rw-r--r--kernel/pid.c6
-rw-r--r--kernel/pid_namespace.c13
-rw-r--r--kernel/power/Kconfig12
-rw-r--r--kernel/power/energy_model.c26
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/process.c4
-rw-r--r--kernel/power/snapshot.c52
-rw-r--r--kernel/power/suspend.c2
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/printk/printk.c303
-rw-r--r--kernel/printk/printk_ringbuffer.c38
-rw-r--r--kernel/ptrace.c34
-rw-r--r--kernel/rcu/Kconfig25
-rw-r--r--kernel/rcu/rcu.h32
-rw-r--r--kernel/rcu/rcu_segcblist.c216
-rw-r--r--kernel/rcu/rcu_segcblist.h57
-rw-r--r--kernel/rcu/rcuscale.c37
-rw-r--r--kernel/rcu/rcutorture.c443
-rw-r--r--kernel/rcu/refscale.c34
-rw-r--r--kernel/rcu/srcutiny.c77
-rw-r--r--kernel/rcu/srcutree.c153
-rw-r--r--kernel/rcu/tasks.h153
-rw-r--r--kernel/rcu/tree.c359
-rw-r--r--kernel/rcu/tree.h6
-rw-r--r--kernel/rcu/tree_exp.h2
-rw-r--r--kernel/rcu/tree_plugin.h400
-rw-r--r--kernel/rcu/tree_stall.h88
-rw-r--r--kernel/rcu/update.c4
-rw-r--r--kernel/reboot.c246
-rw-r--r--kernel/relay.c109
-rw-r--r--kernel/resource.c34
-rw-r--r--kernel/resource_kunit.c152
-rw-r--r--kernel/scftorture.c55
-rw-r--r--kernel/sched/core.c1601
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cpufreq_schedutil.c237
-rw-r--r--kernel/sched/cpupri.c52
-rw-r--r--kernel/sched/cpupri.h8
-rw-r--r--kernel/sched/cputime.c36
-rw-r--r--kernel/sched/deadline.c292
-rw-r--r--kernel/sched/debug.c14
-rw-r--r--kernel/sched/fair.c512
-rw-r--r--kernel/sched/features.h2
-rw-r--r--kernel/sched/idle.c36
-rw-r--r--kernel/sched/membarrier.c224
-rw-r--r--kernel/sched/rt.c101
-rw-r--r--kernel/sched/sched.h199
-rw-r--r--kernel/sched/stop_task.c5
-rw-r--r--kernel/sched/topology.c160
-rw-r--r--kernel/sched/wait.c17
-rw-r--r--kernel/scs.c71
-rw-r--r--kernel/seccomp.c307
-rw-r--r--kernel/signal.c105
-rw-r--r--kernel/smp.c56
-rw-r--r--kernel/smpboot.c1
-rw-r--r--kernel/softirq.c129
-rw-r--r--kernel/static_call.c60
-rw-r--r--kernel/stop_machine.c29
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/task_work.c13
-rw-r--r--kernel/taskstats.c2
-rw-r--r--kernel/time/Kconfig20
-rw-r--r--kernel/time/Makefile1
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/hrtimer.c7
-rw-r--r--kernel/time/itimer.c4
-rw-r--r--kernel/time/jiffies.c3
-rw-r--r--kernel/time/namespace.c21
-rw-r--r--kernel/time/ntp.c229
-rw-r--r--kernel/time/ntp_internal.h7
-rw-r--r--kernel/time/sched_clock.c4
-rw-r--r--kernel/time/tick-broadcast.c25
-rw-r--r--kernel/time/tick-common.c22
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-legacy.c37
-rw-r--r--kernel/time/tick-sched.c135
-rw-r--r--kernel/time/timeconv.c6
-rw-r--r--kernel/time/timekeeping.c129
-rw-r--r--kernel/time/timekeeping.h3
-rw-r--r--kernel/time/timer.c76
-rw-r--r--kernel/time/timer_list.c66
-rw-r--r--kernel/torture.c173
-rw-r--r--kernel/trace/Kconfig74
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/blktrace.c189
-rw-r--r--kernel/trace/bpf_trace.c70
-rw-r--r--kernel/trace/fgraph.c5
-rw-r--r--kernel/trace/ftrace.c79
-rw-r--r--kernel/trace/ring_buffer.c299
-rw-r--r--kernel/trace/synth_event_gen_test.c2
-rw-r--r--kernel/trace/trace.c76
-rw-r--r--kernel/trace/trace.h162
-rw-r--r--kernel/trace/trace_benchmark.c6
-rw-r--r--kernel/trace/trace_boot.c2
-rw-r--r--kernel/trace/trace_dynevent.c2
-rw-r--r--kernel/trace/trace_dynevent.h6
-rw-r--r--kernel/trace/trace_entries.h6
-rw-r--r--kernel/trace/trace_event_perf.c15
-rw-r--r--kernel/trace/trace_events.c20
-rw-r--r--kernel/trace/trace_events_filter.c23
-rw-r--r--kernel/trace/trace_events_hist.c2
-rw-r--r--kernel/trace/trace_events_synth.c21
-rw-r--r--kernel/trace/trace_export.c2
-rw-r--r--kernel/trace/trace_functions.c23
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_hwlat.c6
-rw-r--r--kernel/trace/trace_irqsoff.c6
-rw-r--r--kernel/trace/trace_kprobe.c24
-rw-r--r--kernel/trace/trace_output.c6
-rw-r--r--kernel/trace/trace_output.h1
-rw-r--r--kernel/trace/trace_recursion_record.c236
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_selftest.c38
-rw-r--r--kernel/trace/trace_stack.c3
-rw-r--r--kernel/trace/tracing_map.c6
-rw-r--r--kernel/trace/tracing_map.h2
-rw-r--r--kernel/tracepoint.c6
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/user_namespace.c4
-rw-r--r--kernel/utsname.c7
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/workqueue.c38
236 files changed, 12670 insertions, 5827 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index bf82259cff96..416017301660 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -40,6 +40,7 @@ config PREEMPT
depends on !ARCH_NO_PREEMPT
select PREEMPTION
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+ select PREEMPT_DYNAMIC if HAVE_PREEMPT_DYNAMIC
help
This option reduces the latency of the kernel by making
all kernel code (that is not executing in a critical section)
@@ -80,3 +81,21 @@ config PREEMPT_COUNT
config PREEMPTION
bool
select PREEMPT_COUNT
+
+config PREEMPT_DYNAMIC
+ bool
+ help
+ This option allows to define the preemption model on the kernel
+ command line parameter and thus override the default preemption
+ model defined during compile time.
+
+ The feature is primarily interesting for Linux distributions which
+ provide a pre-built kernel binary to reduce the number of kernel
+ flavors they offer while still offering different usecases.
+
+ The runtime overhead is negligible with HAVE_STATIC_CALL_INLINE enabled
+ but if runtime patching is not available for the specific architecture
+ then the potential overhead should be considered.
+
+ Interesting if you want the same pre-built kernel should be used for
+ both Server and Desktop workloads.
diff --git a/kernel/Makefile b/kernel/Makefile
index af601b9bda0e..aa7368c7eabf 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -34,8 +34,11 @@ KCOV_INSTRUMENT_extable.o := n
KCOV_INSTRUMENT_stacktrace.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
+# If sanitizers detect any issues in kcov, it may lead to recursion
+# via printk, etc.
KASAN_SANITIZE_kcov.o := n
KCSAN_SANITIZE_kcov.o := n
+UBSAN_SANITIZE_kcov.o := n
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
obj-y += sched/
@@ -97,7 +100,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
-obj-$(CONFIG_ELFCORE) += elfcore.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_TRACE_CLOCK) += trace/
@@ -123,6 +125,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomem.o
obj-$(CONFIG_RSEQ) += rseq.o
obj-$(CONFIG_WATCH_QUEUE) += watch_queue.o
+obj-$(CONFIG_RESOURCE_KUNIT_TEST) += resource_kunit.o
obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o
CFLAGS_stackleak.o += $(DISABLE_STACKLEAK_PLUGIN)
diff --git a/kernel/acct.c b/kernel/acct.c
index f175df8f6aa4..a64102be2bb0 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -381,9 +381,7 @@ static comp2_t encode_comp2_t(u64 value)
return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1));
}
}
-#endif
-
-#if ACCT_VERSION == 3
+#elif ACCT_VERSION == 3
/*
* encode an u64 into a 32 bit IEEE float
*/
@@ -500,8 +498,7 @@ static void do_acct_process(struct bsd_acct_struct *acct)
/* backward-compatible 16 bit fields */
ac.ac_uid16 = ac.ac_uid;
ac.ac_gid16 = ac.ac_gid;
-#endif
-#if ACCT_VERSION == 3
+#elif ACCT_VERSION == 3
{
struct pid_namespace *ns = acct->ns;
diff --git a/kernel/audit.c b/kernel/audit.c
index 68cee3bc8cfe..1ffc2e059027 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -67,7 +67,7 @@
#define AUDIT_DISABLED -1
#define AUDIT_UNINITIALIZED 0
#define AUDIT_INITIALIZED 1
-static int audit_initialized;
+static int audit_initialized = AUDIT_UNINITIALIZED;
u32 audit_enabled = AUDIT_OFF;
bool audit_ever_enabled = !!AUDIT_OFF;
@@ -523,7 +523,7 @@ static int auditd_set(struct pid *pid, u32 portid, struct net *net)
}
/**
- * kauditd_print_skb - Print the audit record to the ring buffer
+ * kauditd_printk_skb - Print the audit record to the ring buffer
* @skb: audit record
*
* Whatever the reason, this packet may not make it to the auditd connection
@@ -1779,7 +1779,7 @@ unsigned int audit_serial(void)
{
static atomic_t serial = ATOMIC_INIT(0);
- return atomic_add_return(1, &serial);
+ return atomic_inc_return(&serial);
}
static inline void audit_get_stamp(struct audit_context *ctx,
@@ -1865,6 +1865,9 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
}
audit_get_stamp(ab->ctx, &t, &serial);
+ /* cancel dummy context to enable supporting records */
+ if (ctx)
+ ctx->dummy = 0;
audit_log_format(ab, "audit(%llu.%03lu:%u): ",
(unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial);
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index bfcfcd61adb6..5b3f01da172b 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -154,7 +154,7 @@ static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark)
/* Update mark data in audit rules based on fsnotify events. */
static int audit_mark_handle_event(struct fsnotify_mark *inode_mark, u32 mask,
struct inode *inode, struct inode *dir,
- const struct qstr *dname)
+ const struct qstr *dname, u32 cookie)
{
struct audit_fsnotify_mark *audit_mark;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 83e1c07fc99e..6c91902f4f45 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1037,7 +1037,7 @@ static void evict_chunk(struct audit_chunk *chunk)
static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
struct inode *inode, struct inode *dir,
- const struct qstr *file_name)
+ const struct qstr *file_name, u32 cookie)
{
return 0;
}
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 246e5ba704c0..2acf7ca49154 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -466,7 +466,7 @@ void audit_remove_watch_rule(struct audit_krule *krule)
/* Update watch data in audit rules based on fsnotify events. */
static int audit_watch_handle_event(struct fsnotify_mark *inode_mark, u32 mask,
struct inode *inode, struct inode *dir,
- const struct qstr *dname)
+ const struct qstr *dname, u32 cookie)
{
struct audit_parent *parent;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 8dba8f0983b5..ce8c9e2279ba 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -102,8 +102,6 @@ struct audit_aux_data {
int type;
};
-#define AUDIT_AUX_IPCPERM 0
-
/* Number of target pids per aux struct. */
#define AUDIT_AUX_PIDS 16
@@ -552,11 +550,11 @@ static int audit_filter_rules(struct task_struct *tsk,
break;
case AUDIT_EXIT:
- if (ctx && ctx->return_valid)
+ if (ctx && ctx->return_valid != AUDITSC_INVALID)
result = audit_comparator(ctx->return_code, f->op, f->val);
break;
case AUDIT_SUCCESS:
- if (ctx && ctx->return_valid) {
+ if (ctx && ctx->return_valid != AUDITSC_INVALID) {
if (f->val)
result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS);
else
@@ -929,6 +927,8 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state)
context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
INIT_LIST_HEAD(&context->killed_trees);
INIT_LIST_HEAD(&context->names_list);
+ context->fds[0] = -1;
+ context->return_valid = AUDITSC_INVALID;
return context;
}
@@ -952,7 +952,7 @@ int audit_alloc(struct task_struct *tsk)
state = audit_filter_task(tsk, &key);
if (state == AUDIT_DISABLED) {
- clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
+ clear_task_syscall_work(tsk, SYSCALL_AUDIT);
return 0;
}
@@ -964,7 +964,7 @@ int audit_alloc(struct task_struct *tsk)
context->filterkey = key;
audit_set_context(tsk, context);
- set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
+ set_task_syscall_work(tsk, SYSCALL_AUDIT);
return 0;
}
@@ -1367,7 +1367,10 @@ static void audit_log_name(struct audit_context *context, struct audit_names *n,
/* name was specified as a relative path and the
* directory component is the cwd
*/
- audit_log_d_path(ab, " name=", &context->pwd);
+ if (context->pwd.dentry && context->pwd.mnt)
+ audit_log_d_path(ab, " name=", &context->pwd);
+ else
+ audit_log_format(ab, " name=(null)");
break;
default:
/* log the name's directory component */
@@ -1435,9 +1438,6 @@ static void audit_log_proctitle(void)
struct audit_context *context = audit_context();
struct audit_buffer *ab;
- if (!context || context->dummy)
- return;
-
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
if (!ab)
return; /* audit_panic or being filtered */
@@ -1487,7 +1487,7 @@ static void audit_log_exit(void)
context->arch, context->major);
if (context->personality != PER_LINUX)
audit_log_format(ab, " per=%lx", context->personality);
- if (context->return_valid)
+ if (context->return_valid != AUDITSC_INVALID)
audit_log_format(ab, " success=%s exit=%ld",
(context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
context->return_code);
@@ -1624,7 +1624,7 @@ void __audit_free(struct task_struct *tsk)
* need to log via audit_log_exit().
*/
if (tsk == current && !context->dummy && context->in_syscall) {
- context->return_valid = 0;
+ context->return_valid = AUDITSC_INVALID;
context->return_code = 0;
audit_filter_syscall(tsk, context,
@@ -1866,6 +1866,8 @@ static struct audit_names *audit_alloc_name(struct audit_context *context,
list_add_tail(&aname->list, &context->names_list);
context->name_count++;
+ if (!context->pwd.dentry)
+ get_fs_pwd(current->fs, &context->pwd);
return aname;
}
@@ -1894,20 +1896,6 @@ __audit_reusename(const __user char *uptr)
return NULL;
}
-inline void _audit_getcwd(struct audit_context *context)
-{
- if (!context->pwd.dentry)
- get_fs_pwd(current->fs, &context->pwd);
-}
-
-void __audit_getcwd(void)
-{
- struct audit_context *context = audit_context();
-
- if (context->in_syscall)
- _audit_getcwd(context);
-}
-
/**
* __audit_getname - add a name to the list
* @name: name to add
@@ -1931,8 +1919,6 @@ void __audit_getname(struct filename *name)
n->name_len = AUDIT_NAME_FULL;
name->aname = n;
name->refcnt++;
-
- _audit_getcwd(context);
}
static inline int audit_copy_fcaps(struct audit_names *name,
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index bdc8cd1b6767..d1249340fd6b 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,11 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
obj-y := core.o
-CFLAGS_core.o += $(call cc-disable-warning, override-init)
+ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
+# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
+cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
+endif
+CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
+obj-${CONFIG_BPF_LSM} += bpf_task_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
obj-$(CONFIG_BPF_JIT) += trampoline.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index c6c81eceb68f..1f8453343bf2 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -34,8 +34,8 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
int i;
for (i = 0; i < array->map.max_entries; i++) {
- ptr = __alloc_percpu_gfp(array->elem_size, 8,
- GFP_USER | __GFP_NOWARN);
+ ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
+ GFP_USER | __GFP_NOWARN);
if (!ptr) {
bpf_array_free_percpu(array);
return -ENOMEM;
@@ -81,11 +81,10 @@ int array_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
- int ret, numa_node = bpf_map_attr_numa_node(attr);
+ int numa_node = bpf_map_attr_numa_node(attr);
u32 elem_size, index_mask, max_entries;
bool bypass_spec_v1 = bpf_bypass_spec_v1();
- u64 cost, array_size, mask64;
- struct bpf_map_memory mem;
+ u64 array_size, mask64;
struct bpf_array *array;
elem_size = round_up(attr->value_size, 8);
@@ -126,44 +125,29 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
}
}
- /* make sure there is no u32 overflow later in round_up() */
- cost = array_size;
- if (percpu)
- cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
-
- ret = bpf_map_charge_init(&mem, cost);
- if (ret < 0)
- return ERR_PTR(ret);
-
/* allocate all map elements and zero-initialize them */
if (attr->map_flags & BPF_F_MMAPABLE) {
void *data;
/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
data = bpf_map_area_mmapable_alloc(array_size, numa_node);
- if (!data) {
- bpf_map_charge_finish(&mem);
+ if (!data)
return ERR_PTR(-ENOMEM);
- }
array = data + PAGE_ALIGN(sizeof(struct bpf_array))
- offsetof(struct bpf_array, value);
} else {
array = bpf_map_area_alloc(array_size, numa_node);
}
- if (!array) {
- bpf_map_charge_finish(&mem);
+ if (!array)
return ERR_PTR(-ENOMEM);
- }
array->index_mask = index_mask;
array->map.bypass_spec_v1 = bypass_spec_v1;
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
- bpf_map_charge_move(&array->map.memory, &mem);
array->elem_size = elem_size;
if (percpu && bpf_array_alloc_percpu(array)) {
- bpf_map_charge_finish(&array->map.memory);
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
@@ -1018,7 +1002,7 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
struct bpf_array_aux *aux;
struct bpf_map *map;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
if (!aux)
return ERR_PTR(-ENOMEM);
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 6edff97ad594..6639640523c0 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -125,8 +125,12 @@ static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
fd = *(int *)key;
f = fget_raw(fd);
- if (!f || !inode_storage_ptr(f->f_inode))
+ if (!f)
+ return -EBADF;
+ if (!inode_storage_ptr(f->f_inode)) {
+ fput(f);
return -EBADF;
+ }
sdata = bpf_local_storage_update(f->f_inode,
(struct bpf_local_storage_map *)map,
@@ -176,14 +180,14 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
* bpf_local_storage_update expects the owner to have a
* valid storage pointer.
*/
- if (!inode_storage_ptr(inode))
+ if (!inode || !inode_storage_ptr(inode))
return (unsigned long)NULL;
sdata = inode_storage_lookup(inode, map, true);
if (sdata)
return (unsigned long)sdata->data;
- /* This helper must only called from where the inode is gurranteed
+ /* This helper must only called from where the inode is guaranteed
* to have a refcount and cannot be freed.
*/
if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
@@ -200,7 +204,10 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
BPF_CALL_2(bpf_inode_storage_delete,
struct bpf_map *, map, struct inode *, inode)
{
- /* This helper must only called from where the inode is gurranteed
+ if (!inode)
+ return -EINVAL;
+
+ /* This helper must only called from where the inode is guaranteed
* to have a refcount and cannot be freed.
*/
return inode_storage_delete(inode, map);
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 8f10e30ea0b0..a0d9eade9c80 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -67,6 +67,15 @@ static void bpf_iter_done_stop(struct seq_file *seq)
iter_priv->done_stop = true;
}
+static bool bpf_iter_support_resched(struct seq_file *seq)
+{
+ struct bpf_iter_priv_data *iter_priv;
+
+ iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
+ target_private);
+ return iter_priv->tinfo->reg_info->feature & BPF_ITER_RESCHED;
+}
+
/* maximum visited objects before bailing out */
#define MAX_ITER_OBJECTS 1000000
@@ -83,6 +92,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
struct seq_file *seq = file->private_data;
size_t n, offs, copied = 0;
int err = 0, num_objs = 0;
+ bool can_resched;
void *p;
mutex_lock(&seq->lock);
@@ -135,6 +145,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
goto done;
}
+ can_resched = bpf_iter_support_resched(seq);
while (1) {
loff_t pos = seq->index;
@@ -180,6 +191,9 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
}
break;
}
+
+ if (can_resched)
+ cond_resched();
}
stop:
offs = seq->count;
@@ -273,7 +287,7 @@ int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
{
struct bpf_iter_target_info *tinfo;
- tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+ tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
if (!tinfo)
return -ENOMEM;
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 5d3a7af9ba9b..dd5aedee99e7 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -67,7 +67,8 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
if (charge_mem && mem_charge(smap, owner, smap->elem_size))
return NULL;
- selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+ selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
+ GFP_ATOMIC | __GFP_NOWARN);
if (selem) {
if (value)
memcpy(SDATA(selem)->data, value, smap->map.value_size);
@@ -264,7 +265,8 @@ int bpf_local_storage_alloc(void *owner,
if (err)
return err;
- storage = kzalloc(sizeof(*storage), GFP_ATOMIC | __GFP_NOWARN);
+ storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!storage) {
err = -ENOMEM;
goto uncharge;
@@ -543,10 +545,8 @@ struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
struct bpf_local_storage_map *smap;
unsigned int i;
u32 nbuckets;
- u64 cost;
- int ret;
- smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
+ smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
if (!smap)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&smap->map, attr);
@@ -555,18 +555,10 @@ struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
nbuckets = max_t(u32, 2, nbuckets);
smap->bucket_log = ilog2(nbuckets);
- cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
-
- ret = bpf_map_charge_init(&smap->map.memory, cost);
- if (ret < 0) {
- kfree(smap);
- return ERR_PTR(ret);
- }
smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
- GFP_USER | __GFP_NOWARN);
+ GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
if (!smap->buckets) {
- bpf_map_charge_finish(&smap->map.memory);
kfree(smap);
return ERR_PTR(-ENOMEM);
}
diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
index 1b6b9349cb85..d99e89f113c4 100644
--- a/kernel/bpf/bpf_lru_list.c
+++ b/kernel/bpf/bpf_lru_list.c
@@ -502,13 +502,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
static void bpf_common_lru_push_free(struct bpf_lru *lru,
struct bpf_lru_node *node)
{
+ u8 node_type = READ_ONCE(node->type);
unsigned long flags;
- if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
- WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
+ if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
+ WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
return;
- if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
+ if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
struct bpf_lru_locallist *loc_l;
loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 78ea8a7bd27f..1622a44d1617 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -7,12 +7,15 @@
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/btf.h>
+#include <linux/binfmts.h>
#include <linux/lsm_hooks.h>
#include <linux/bpf_lsm.h>
#include <linux/kallsyms.h>
#include <linux/bpf_verifier.h>
#include <net/bpf_sk_storage.h>
#include <linux/bpf_local_storage.h>
+#include <linux/btf_ids.h>
+#include <linux/ima.h>
/* For every LSM hook that allows attachment of BPF programs, declare a nop
* function where a BPF program can be attached.
@@ -26,7 +29,11 @@ noinline RET bpf_lsm_##NAME(__VA_ARGS__) \
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
-#define BPF_LSM_SYM_PREFX "bpf_lsm_"
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_lsm_##NAME)
+BTF_SET_START(bpf_lsm_hooks)
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+BTF_SET_END(bpf_lsm_hooks)
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
@@ -37,8 +44,7 @@ int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
return -EINVAL;
}
- if (strncmp(BPF_LSM_SYM_PREFX, prog->aux->attach_func_name,
- sizeof(BPF_LSM_SYM_PREFX) - 1)) {
+ if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) {
bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
prog->aux->attach_btf_id, prog->aux->attach_func_name);
return -EINVAL;
@@ -47,6 +53,52 @@ int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
return 0;
}
+/* Mask for all the currently supported BPRM option flags */
+#define BPF_F_BRPM_OPTS_MASK BPF_F_BPRM_SECUREEXEC
+
+BPF_CALL_2(bpf_bprm_opts_set, struct linux_binprm *, bprm, u64, flags)
+{
+ if (flags & ~BPF_F_BRPM_OPTS_MASK)
+ return -EINVAL;
+
+ bprm->secureexec = (flags & BPF_F_BPRM_SECUREEXEC);
+ return 0;
+}
+
+BTF_ID_LIST_SINGLE(bpf_bprm_opts_set_btf_ids, struct, linux_binprm)
+
+const static struct bpf_func_proto bpf_bprm_opts_set_proto = {
+ .func = bpf_bprm_opts_set,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_bprm_opts_set_btf_ids[0],
+ .arg2_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_ima_inode_hash, struct inode *, inode, void *, dst, u32, size)
+{
+ return ima_inode_hash(inode, dst, size);
+}
+
+static bool bpf_ima_inode_hash_allowed(const struct bpf_prog *prog)
+{
+ return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
+}
+
+BTF_ID_LIST_SINGLE(bpf_ima_inode_hash_btf_ids, struct, inode)
+
+const static struct bpf_func_proto bpf_ima_inode_hash_proto = {
+ .func = bpf_ima_inode_hash,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_ima_inode_hash_btf_ids[0],
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .allowed = bpf_ima_inode_hash_allowed,
+};
+
static const struct bpf_func_proto *
bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -59,11 +111,115 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
+ case BPF_FUNC_spin_lock:
+ return &bpf_spin_lock_proto;
+ case BPF_FUNC_spin_unlock:
+ return &bpf_spin_unlock_proto;
+ case BPF_FUNC_task_storage_get:
+ return &bpf_task_storage_get_proto;
+ case BPF_FUNC_task_storage_delete:
+ return &bpf_task_storage_delete_proto;
+ case BPF_FUNC_bprm_opts_set:
+ return &bpf_bprm_opts_set_proto;
+ case BPF_FUNC_ima_inode_hash:
+ return prog->aux->sleepable ? &bpf_ima_inode_hash_proto : NULL;
default:
return tracing_prog_func_proto(func_id, prog);
}
}
+/* The set of hooks which are called without pagefaults disabled and are allowed
+ * to "sleep" and thus can be used for sleeable BPF programs.
+ */
+BTF_SET_START(sleepable_lsm_hooks)
+BTF_ID(func, bpf_lsm_bpf)
+BTF_ID(func, bpf_lsm_bpf_map)
+BTF_ID(func, bpf_lsm_bpf_map_alloc_security)
+BTF_ID(func, bpf_lsm_bpf_map_free_security)
+BTF_ID(func, bpf_lsm_bpf_prog)
+BTF_ID(func, bpf_lsm_bprm_check_security)
+BTF_ID(func, bpf_lsm_bprm_committed_creds)
+BTF_ID(func, bpf_lsm_bprm_committing_creds)
+BTF_ID(func, bpf_lsm_bprm_creds_for_exec)
+BTF_ID(func, bpf_lsm_bprm_creds_from_file)
+BTF_ID(func, bpf_lsm_capget)
+BTF_ID(func, bpf_lsm_capset)
+BTF_ID(func, bpf_lsm_cred_prepare)
+BTF_ID(func, bpf_lsm_file_ioctl)
+BTF_ID(func, bpf_lsm_file_lock)
+BTF_ID(func, bpf_lsm_file_open)
+BTF_ID(func, bpf_lsm_file_receive)
+
+#ifdef CONFIG_SECURITY_NETWORK
+BTF_ID(func, bpf_lsm_inet_conn_established)
+#endif /* CONFIG_SECURITY_NETWORK */
+
+BTF_ID(func, bpf_lsm_inode_create)
+BTF_ID(func, bpf_lsm_inode_free_security)
+BTF_ID(func, bpf_lsm_inode_getattr)
+BTF_ID(func, bpf_lsm_inode_getxattr)
+BTF_ID(func, bpf_lsm_inode_mknod)
+BTF_ID(func, bpf_lsm_inode_need_killpriv)
+BTF_ID(func, bpf_lsm_inode_post_setxattr)
+BTF_ID(func, bpf_lsm_inode_readlink)
+BTF_ID(func, bpf_lsm_inode_rename)
+BTF_ID(func, bpf_lsm_inode_rmdir)
+BTF_ID(func, bpf_lsm_inode_setattr)
+BTF_ID(func, bpf_lsm_inode_setxattr)
+BTF_ID(func, bpf_lsm_inode_symlink)
+BTF_ID(func, bpf_lsm_inode_unlink)
+BTF_ID(func, bpf_lsm_kernel_module_request)
+BTF_ID(func, bpf_lsm_kernfs_init_security)
+
+#ifdef CONFIG_KEYS
+BTF_ID(func, bpf_lsm_key_free)
+#endif /* CONFIG_KEYS */
+
+BTF_ID(func, bpf_lsm_mmap_file)
+BTF_ID(func, bpf_lsm_netlink_send)
+BTF_ID(func, bpf_lsm_path_notify)
+BTF_ID(func, bpf_lsm_release_secctx)
+BTF_ID(func, bpf_lsm_sb_alloc_security)
+BTF_ID(func, bpf_lsm_sb_eat_lsm_opts)
+BTF_ID(func, bpf_lsm_sb_kern_mount)
+BTF_ID(func, bpf_lsm_sb_mount)
+BTF_ID(func, bpf_lsm_sb_remount)
+BTF_ID(func, bpf_lsm_sb_set_mnt_opts)
+BTF_ID(func, bpf_lsm_sb_show_options)
+BTF_ID(func, bpf_lsm_sb_statfs)
+BTF_ID(func, bpf_lsm_sb_umount)
+BTF_ID(func, bpf_lsm_settime)
+
+#ifdef CONFIG_SECURITY_NETWORK
+BTF_ID(func, bpf_lsm_socket_accept)
+BTF_ID(func, bpf_lsm_socket_bind)
+BTF_ID(func, bpf_lsm_socket_connect)
+BTF_ID(func, bpf_lsm_socket_create)
+BTF_ID(func, bpf_lsm_socket_getpeername)
+BTF_ID(func, bpf_lsm_socket_getpeersec_dgram)
+BTF_ID(func, bpf_lsm_socket_getsockname)
+BTF_ID(func, bpf_lsm_socket_getsockopt)
+BTF_ID(func, bpf_lsm_socket_listen)
+BTF_ID(func, bpf_lsm_socket_post_create)
+BTF_ID(func, bpf_lsm_socket_recvmsg)
+BTF_ID(func, bpf_lsm_socket_sendmsg)
+BTF_ID(func, bpf_lsm_socket_shutdown)
+BTF_ID(func, bpf_lsm_socket_socketpair)
+#endif /* CONFIG_SECURITY_NETWORK */
+
+BTF_ID(func, bpf_lsm_syslog)
+BTF_ID(func, bpf_lsm_task_alloc)
+BTF_ID(func, bpf_lsm_task_getsecid)
+BTF_ID(func, bpf_lsm_task_prctl)
+BTF_ID(func, bpf_lsm_task_setscheduler)
+BTF_ID(func, bpf_lsm_task_to_inode)
+BTF_SET_END(sleepable_lsm_hooks)
+
+bool bpf_lsm_is_sleepable_hook(u32 btf_id)
+{
+ return btf_id_set_contains(&sleepable_lsm_hooks, btf_id);
+}
+
const struct bpf_prog_ops lsm_prog_ops = {
};
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 4c3b543bb33b..1a666a975416 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -548,12 +548,10 @@ static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
{
const struct bpf_struct_ops *st_ops;
- size_t map_total_size, st_map_size;
+ size_t st_map_size;
struct bpf_struct_ops_map *st_map;
const struct btf_type *t, *vt;
- struct bpf_map_memory mem;
struct bpf_map *map;
- int err;
if (!bpf_capable())
return ERR_PTR(-EPERM);
@@ -573,20 +571,11 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
* struct bpf_struct_ops_tcp_congestions_ops
*/
(vt->size - sizeof(struct bpf_struct_ops_value));
- map_total_size = st_map_size +
- /* uvalue */
- sizeof(vt->size) +
- /* struct bpf_progs **progs */
- btf_type_vlen(t) * sizeof(struct bpf_prog *);
- err = bpf_map_charge_init(&mem, map_total_size);
- if (err < 0)
- return ERR_PTR(err);
st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
- if (!st_map) {
- bpf_map_charge_finish(&mem);
+ if (!st_map)
return ERR_PTR(-ENOMEM);
- }
+
st_map->st_ops = st_ops;
map = &st_map->map;
@@ -597,14 +586,12 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!st_map->uvalue || !st_map->progs || !st_map->image) {
bpf_struct_ops_map_free(map);
- bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
}
mutex_init(&st_map->lock);
set_vm_flush_reset_perms(st_map->image);
bpf_map_init_from_attr(map, attr);
- bpf_map_charge_move(&map->memory, &mem);
return map;
}
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
new file mode 100644
index 000000000000..e0da0258b732
--- /dev/null
+++ b/kernel/bpf/bpf_task_storage.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Facebook
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/pid.h>
+#include <linux/sched.h>
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <linux/bpf_local_storage.h>
+#include <linux/filter.h>
+#include <uapi/linux/btf.h>
+#include <linux/bpf_lsm.h>
+#include <linux/btf_ids.h>
+#include <linux/fdtable.h>
+
+DEFINE_BPF_STORAGE_CACHE(task_cache);
+
+static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
+{
+ struct task_struct *task = owner;
+ struct bpf_storage_blob *bsb;
+
+ bsb = bpf_task(task);
+ if (!bsb)
+ return NULL;
+ return &bsb->storage;
+}
+
+static struct bpf_local_storage_data *
+task_storage_lookup(struct task_struct *task, struct bpf_map *map,
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage *task_storage;
+ struct bpf_local_storage_map *smap;
+ struct bpf_storage_blob *bsb;
+
+ bsb = bpf_task(task);
+ if (!bsb)
+ return NULL;
+
+ task_storage = rcu_dereference(bsb->storage);
+ if (!task_storage)
+ return NULL;
+
+ smap = (struct bpf_local_storage_map *)map;
+ return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
+}
+
+void bpf_task_storage_free(struct task_struct *task)
+{
+ struct bpf_local_storage_elem *selem;
+ struct bpf_local_storage *local_storage;
+ bool free_task_storage = false;
+ struct bpf_storage_blob *bsb;
+ struct hlist_node *n;
+
+ bsb = bpf_task(task);
+ if (!bsb)
+ return;
+
+ rcu_read_lock();
+
+ local_storage = rcu_dereference(bsb->storage);
+ if (!local_storage) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /* Neither the bpf_prog nor the bpf-map's syscall
+ * could be modifying the local_storage->list now.
+ * Thus, no elem can be added-to or deleted-from the
+ * local_storage->list by the bpf_prog or by the bpf-map's syscall.
+ *
+ * It is racing with bpf_local_storage_map_free() alone
+ * when unlinking elem from the local_storage->list and
+ * the map's bucket->list.
+ */
+ raw_spin_lock_bh(&local_storage->lock);
+ hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
+ /* Always unlink from map before unlinking from
+ * local_storage.
+ */
+ bpf_selem_unlink_map(selem);
+ free_task_storage = bpf_selem_unlink_storage_nolock(
+ local_storage, selem, false);
+ }
+ raw_spin_unlock_bh(&local_storage->lock);
+ rcu_read_unlock();
+
+ /* free_task_storage should always be true as long as
+ * local_storage->list was non-empty.
+ */
+ if (free_task_storage)
+ kfree_rcu(local_storage, rcu);
+}
+
+static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_local_storage_data *sdata;
+ struct task_struct *task;
+ unsigned int f_flags;
+ struct pid *pid;
+ int fd, err;
+
+ fd = *(int *)key;
+ pid = pidfd_get_pid(fd, &f_flags);
+ if (IS_ERR(pid))
+ return ERR_CAST(pid);
+
+ /* We should be in an RCU read side critical section, it should be safe
+ * to call pid_task.
+ */
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ task = pid_task(pid, PIDTYPE_PID);
+ if (!task) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ sdata = task_storage_lookup(task, map, true);
+ put_pid(pid);
+ return sdata ? sdata->data : NULL;
+out:
+ put_pid(pid);
+ return ERR_PTR(err);
+}
+
+static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ struct bpf_local_storage_data *sdata;
+ struct task_struct *task;
+ unsigned int f_flags;
+ struct pid *pid;
+ int fd, err;
+
+ fd = *(int *)key;
+ pid = pidfd_get_pid(fd, &f_flags);
+ if (IS_ERR(pid))
+ return PTR_ERR(pid);
+
+ /* We should be in an RCU read side critical section, it should be safe
+ * to call pid_task.
+ */
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ task = pid_task(pid, PIDTYPE_PID);
+ if (!task || !task_storage_ptr(task)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ sdata = bpf_local_storage_update(
+ task, (struct bpf_local_storage_map *)map, value, map_flags);
+
+ err = PTR_ERR_OR_ZERO(sdata);
+out:
+ put_pid(pid);
+ return err;
+}
+
+static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
+{
+ struct bpf_local_storage_data *sdata;
+
+ sdata = task_storage_lookup(task, map, false);
+ if (!sdata)
+ return -ENOENT;
+
+ bpf_selem_unlink(SELEM(sdata));
+
+ return 0;
+}
+
+static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
+{
+ struct task_struct *task;
+ unsigned int f_flags;
+ struct pid *pid;
+ int fd, err;
+
+ fd = *(int *)key;
+ pid = pidfd_get_pid(fd, &f_flags);
+ if (IS_ERR(pid))
+ return PTR_ERR(pid);
+
+ /* We should be in an RCU read side critical section, it should be safe
+ * to call pid_task.
+ */
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ task = pid_task(pid, PIDTYPE_PID);
+ if (!task) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ err = task_storage_delete(task, map);
+out:
+ put_pid(pid);
+ return err;
+}
+
+BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
+ task, void *, value, u64, flags)
+{
+ struct bpf_local_storage_data *sdata;
+
+ if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
+ return (unsigned long)NULL;
+
+ /* explicitly check that the task_storage_ptr is not
+ * NULL as task_storage_lookup returns NULL in this case and
+ * bpf_local_storage_update expects the owner to have a
+ * valid storage pointer.
+ */
+ if (!task || !task_storage_ptr(task))
+ return (unsigned long)NULL;
+
+ sdata = task_storage_lookup(task, map, true);
+ if (sdata)
+ return (unsigned long)sdata->data;
+
+ /* This helper must only be called from places where the lifetime of the task
+ * is guaranteed. Either by being refcounted or by being protected
+ * by an RCU read-side critical section.
+ */
+ if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
+ sdata = bpf_local_storage_update(
+ task, (struct bpf_local_storage_map *)map, value,
+ BPF_NOEXIST);
+ return IS_ERR(sdata) ? (unsigned long)NULL :
+ (unsigned long)sdata->data;
+ }
+
+ return (unsigned long)NULL;
+}
+
+BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
+ task)
+{
+ if (!task)
+ return -EINVAL;
+
+ /* This helper must only be called from places where the lifetime of the task
+ * is guaranteed. Either by being refcounted or by being protected
+ * by an RCU read-side critical section.
+ */
+ return task_storage_delete(task, map);
+}
+
+static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ return -ENOTSUPP;
+}
+
+static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_local_storage_map *smap;
+
+ smap = bpf_local_storage_map_alloc(attr);
+ if (IS_ERR(smap))
+ return ERR_CAST(smap);
+
+ smap->cache_idx = bpf_local_storage_cache_idx_get(&task_cache);
+ return &smap->map;
+}
+
+static void task_storage_map_free(struct bpf_map *map)
+{
+ struct bpf_local_storage_map *smap;
+
+ smap = (struct bpf_local_storage_map *)map;
+ bpf_local_storage_cache_idx_free(&task_cache, smap->cache_idx);
+ bpf_local_storage_map_free(smap);
+}
+
+static int task_storage_map_btf_id;
+const struct bpf_map_ops task_storage_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
+ .map_alloc_check = bpf_local_storage_map_alloc_check,
+ .map_alloc = task_storage_map_alloc,
+ .map_free = task_storage_map_free,
+ .map_get_next_key = notsupp_get_next_key,
+ .map_lookup_elem = bpf_pid_task_storage_lookup_elem,
+ .map_update_elem = bpf_pid_task_storage_update_elem,
+ .map_delete_elem = bpf_pid_task_storage_delete_elem,
+ .map_check_btf = bpf_local_storage_map_check_btf,
+ .map_btf_name = "bpf_local_storage_map",
+ .map_btf_id = &task_storage_map_btf_id,
+ .map_owner_storage_ptr = task_storage_ptr,
+};
+
+BTF_ID_LIST_SINGLE(bpf_task_storage_btf_ids, struct, task_struct)
+
+const struct bpf_func_proto bpf_task_storage_get_proto = {
+ .func = bpf_task_storage_get,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &bpf_task_storage_btf_ids[0],
+ .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg4_type = ARG_ANYTHING,
+};
+
+const struct bpf_func_proto bpf_task_storage_delete_proto = {
+ .func = bpf_task_storage_delete,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &bpf_task_storage_btf_ids[0],
+};
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index ed7d02e8bc93..2efeb5f4b343 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -22,7 +22,8 @@
#include <linux/skmsg.h>
#include <linux/perf_event.h>
#include <linux/bsearch.h>
-#include <linux/btf_ids.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
#include <net/sock.h>
/* BTF (BPF Type Format) is the meta data format which describes
@@ -204,12 +205,19 @@ struct btf {
const char *strings;
void *nohdr_data;
struct btf_header hdr;
- u32 nr_types;
+ u32 nr_types; /* includes VOID for base BTF */
u32 types_size;
u32 data_size;
refcount_t refcnt;
u32 id;
struct rcu_head rcu;
+
+ /* split BTF support */
+ struct btf *base_btf;
+ u32 start_id; /* first type ID in this BTF (0 for base BTF) */
+ u32 start_str_off; /* first string offset (0 for base BTF) */
+ char name[MODULE_NAME_LEN];
+ bool kernel_btf;
};
enum verifier_phase {
@@ -450,14 +458,27 @@ static bool btf_type_is_datasec(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
}
+u32 btf_nr_types(const struct btf *btf)
+{
+ u32 total = 0;
+
+ while (btf) {
+ total += btf->nr_types;
+ btf = btf->base_btf;
+ }
+
+ return total;
+}
+
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
{
const struct btf_type *t;
const char *tname;
- u32 i;
+ u32 i, total;
- for (i = 1; i <= btf->nr_types; i++) {
- t = btf->types[i];
+ total = btf_nr_types(btf);
+ for (i = 1; i < total; i++) {
+ t = btf_type_by_id(btf, i);
if (BTF_INFO_KIND(t->info) != kind)
continue;
@@ -600,8 +621,14 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{
- return BTF_STR_OFFSET_VALID(offset) &&
- offset < btf->hdr.str_len;
+ if (!BTF_STR_OFFSET_VALID(offset))
+ return false;
+
+ while (offset < btf->start_str_off)
+ btf = btf->base_btf;
+
+ offset -= btf->start_str_off;
+ return offset < btf->hdr.str_len;
}
static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
@@ -615,10 +642,22 @@ static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
return true;
}
+static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
+{
+ while (offset < btf->start_str_off)
+ btf = btf->base_btf;
+
+ offset -= btf->start_str_off;
+ if (offset < btf->hdr.str_len)
+ return &btf->strings[offset];
+
+ return NULL;
+}
+
static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
{
/* offset must be valid */
- const char *src = &btf->strings[offset];
+ const char *src = btf_str_by_offset(btf, offset);
const char *src_limit;
if (!__btf_name_char_ok(*src, true, dot_ok))
@@ -651,27 +690,28 @@ static bool btf_name_valid_section(const struct btf *btf, u32 offset)
static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
{
+ const char *name;
+
if (!offset)
return "(anon)";
- else if (offset < btf->hdr.str_len)
- return &btf->strings[offset];
- else
- return "(invalid-name-offset)";
+
+ name = btf_str_by_offset(btf, offset);
+ return name ?: "(invalid-name-offset)";
}
const char *btf_name_by_offset(const struct btf *btf, u32 offset)
{
- if (offset < btf->hdr.str_len)
- return &btf->strings[offset];
-
- return NULL;
+ return btf_str_by_offset(btf, offset);
}
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
{
- if (type_id > btf->nr_types)
- return NULL;
+ while (type_id < btf->start_id)
+ btf = btf->base_btf;
+ type_id -= btf->start_id;
+ if (type_id >= btf->nr_types)
+ return NULL;
return btf->types[type_id];
}
@@ -1391,17 +1431,13 @@ static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
{
struct btf *btf = env->btf;
- /* < 2 because +1 for btf_void which is always in btf->types[0].
- * btf_void is not accounted in btf->nr_types because btf_void
- * does not come from the BTF file.
- */
- if (btf->types_size - btf->nr_types < 2) {
+ if (btf->types_size == btf->nr_types) {
/* Expand 'types' array */
struct btf_type **new_types;
u32 expand_by, new_size;
- if (btf->types_size == BTF_MAX_TYPE) {
+ if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
btf_verifier_log(env, "Exceeded max num of types");
return -E2BIG;
}
@@ -1415,18 +1451,23 @@ static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
if (!new_types)
return -ENOMEM;
- if (btf->nr_types == 0)
- new_types[0] = &btf_void;
- else
+ if (btf->nr_types == 0) {
+ if (!btf->base_btf) {
+ /* lazily init VOID type */
+ new_types[0] = &btf_void;
+ btf->nr_types++;
+ }
+ } else {
memcpy(new_types, btf->types,
- sizeof(*btf->types) * (btf->nr_types + 1));
+ sizeof(*btf->types) * btf->nr_types);
+ }
kvfree(btf->types);
btf->types = new_types;
btf->types_size = new_size;
}
- btf->types[++(btf->nr_types)] = t;
+ btf->types[btf->nr_types++] = t;
return 0;
}
@@ -1483,6 +1524,11 @@ static void btf_free_rcu(struct rcu_head *rcu)
btf_free(btf);
}
+void btf_get(struct btf *btf)
+{
+ refcount_inc(&btf->refcnt);
+}
+
void btf_put(struct btf *btf)
{
if (btf && refcount_dec_and_test(&btf->refcnt)) {
@@ -1499,18 +1545,17 @@ static int env_resolve_init(struct btf_verifier_env *env)
u32 *resolved_ids = NULL;
u8 *visit_states = NULL;
- /* +1 for btf_void */
- resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
+ resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_sizes)
goto nomem;
- resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
+ resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_ids)
goto nomem;
- visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
+ visit_states = kvcalloc(nr_types, sizeof(*visit_states),
GFP_KERNEL | __GFP_NOWARN);
if (!visit_states)
goto nomem;
@@ -1562,21 +1607,27 @@ static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
static bool env_type_is_resolved(const struct btf_verifier_env *env,
u32 type_id)
{
- return env->visit_states[type_id] == RESOLVED;
+ /* base BTF types should be resolved by now */
+ if (type_id < env->btf->start_id)
+ return true;
+
+ return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
}
static int env_stack_push(struct btf_verifier_env *env,
const struct btf_type *t, u32 type_id)
{
+ const struct btf *btf = env->btf;
struct resolve_vertex *v;
if (env->top_stack == MAX_RESOLVE_DEPTH)
return -E2BIG;
- if (env->visit_states[type_id] != NOT_VISITED)
+ if (type_id < btf->start_id
+ || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
return -EEXIST;
- env->visit_states[type_id] = VISITED;
+ env->visit_states[type_id - btf->start_id] = VISITED;
v = &env->stack[env->top_stack++];
v->t = t;
@@ -1606,6 +1657,7 @@ static void env_stack_pop_resolved(struct btf_verifier_env *env,
u32 type_id = env->stack[--(env->top_stack)].type_id;
struct btf *btf = env->btf;
+ type_id -= btf->start_id; /* adjust to local type id */
btf->resolved_sizes[type_id] = resolved_size;
btf->resolved_ids[type_id] = resolved_type_id;
env->visit_states[type_id] = RESOLVED;
@@ -1710,14 +1762,30 @@ btf_resolve_size(const struct btf *btf, const struct btf_type *type,
return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
}
+static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
+{
+ while (type_id < btf->start_id)
+ btf = btf->base_btf;
+
+ return btf->resolved_ids[type_id - btf->start_id];
+}
+
/* The input param "type_id" must point to a needs_resolve type */
static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
u32 *type_id)
{
- *type_id = btf->resolved_ids[*type_id];
+ *type_id = btf_resolved_type_id(btf, *type_id);
return btf_type_by_id(btf, *type_id);
}
+static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
+{
+ while (type_id < btf->start_id)
+ btf = btf->base_btf;
+
+ return btf->resolved_sizes[type_id - btf->start_id];
+}
+
const struct btf_type *btf_type_id_size(const struct btf *btf,
u32 *type_id, u32 *ret_size)
{
@@ -1732,7 +1800,7 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
if (btf_type_has_size(size_type)) {
size = size_type->size;
} else if (btf_type_is_array(size_type)) {
- size = btf->resolved_sizes[size_type_id];
+ size = btf_resolved_type_size(btf, size_type_id);
} else if (btf_type_is_ptr(size_type)) {
size = sizeof(void *);
} else {
@@ -1740,14 +1808,14 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
!btf_type_is_var(size_type)))
return NULL;
- size_type_id = btf->resolved_ids[size_type_id];
+ size_type_id = btf_resolved_type_id(btf, size_type_id);
size_type = btf_type_by_id(btf, size_type_id);
if (btf_type_nosize_or_null(size_type))
return NULL;
else if (btf_type_has_size(size_type))
size = size_type->size;
else if (btf_type_is_array(size_type))
- size = btf->resolved_sizes[size_type_id];
+ size = btf_resolved_type_size(btf, size_type_id);
else if (btf_type_is_ptr(size_type))
size = sizeof(void *);
else
@@ -3472,11 +3540,6 @@ static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
- if (!btf_type_vlen(t)) {
- btf_verifier_log_type(env, t, "vlen == 0");
- return -EINVAL;
- }
-
if (!t->size) {
btf_verifier_log_type(env, t, "size == 0");
return -EINVAL;
@@ -3799,7 +3862,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
cur = btf->nohdr_data + hdr->type_off;
end = cur + hdr->type_len;
- env->log_type_id = 1;
+ env->log_type_id = btf->base_btf ? btf->start_id : 1;
while (cur < end) {
struct btf_type *t = cur;
s32 meta_size;
@@ -3826,8 +3889,8 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
return false;
if (btf_type_is_struct(t) || btf_type_is_datasec(t))
- return !btf->resolved_ids[type_id] &&
- !btf->resolved_sizes[type_id];
+ return !btf_resolved_type_id(btf, type_id) &&
+ !btf_resolved_type_size(btf, type_id);
if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
btf_type_is_var(t)) {
@@ -3847,7 +3910,7 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
return elem_type && !btf_type_is_modifier(elem_type) &&
(array->nelems * elem_size ==
- btf->resolved_sizes[type_id]);
+ btf_resolved_type_size(btf, type_id));
}
return false;
@@ -3889,7 +3952,8 @@ static int btf_resolve(struct btf_verifier_env *env,
static int btf_check_all_types(struct btf_verifier_env *env)
{
struct btf *btf = env->btf;
- u32 type_id;
+ const struct btf_type *t;
+ u32 type_id, i;
int err;
err = env_resolve_init(env);
@@ -3897,8 +3961,9 @@ static int btf_check_all_types(struct btf_verifier_env *env)
return err;
env->phase++;
- for (type_id = 1; type_id <= btf->nr_types; type_id++) {
- const struct btf_type *t = btf_type_by_id(btf, type_id);
+ for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
+ type_id = btf->start_id + i;
+ t = btf_type_by_id(btf, type_id);
env->log_type_id = type_id;
if (btf_type_needs_resolve(t) &&
@@ -3935,7 +4000,7 @@ static int btf_parse_type_sec(struct btf_verifier_env *env)
return -EINVAL;
}
- if (!hdr->type_len) {
+ if (!env->btf->base_btf && !hdr->type_len) {
btf_verifier_log(env, "No type found");
return -EINVAL;
}
@@ -3962,13 +4027,18 @@ static int btf_parse_str_sec(struct btf_verifier_env *env)
return -EINVAL;
}
- if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
- start[0] || end[-1]) {
+ btf->strings = start;
+
+ if (btf->base_btf && !hdr->str_len)
+ return 0;
+ if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
+ btf_verifier_log(env, "Invalid string section");
+ return -EINVAL;
+ }
+ if (!btf->base_btf && start[0]) {
btf_verifier_log(env, "Invalid string section");
return -EINVAL;
}
-
- btf->strings = start;
return 0;
}
@@ -4097,7 +4167,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
return -ENOTSUPP;
}
- if (btf_data_size == hdr->hdr_len) {
+ if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
btf_verifier_log(env, "No data");
return -EINVAL;
}
@@ -4363,6 +4433,8 @@ struct btf *btf_parse_vmlinux(void)
btf->data = __start_BTF;
btf->data_size = __stop_BTF - __start_BTF;
+ btf->kernel_btf = true;
+ snprintf(btf->name, sizeof(btf->name), "vmlinux");
err = btf_parse_hdr(env);
if (err)
@@ -4388,6 +4460,81 @@ struct btf *btf_parse_vmlinux(void)
bpf_struct_ops_init(btf, log);
+ refcount_set(&btf->refcnt, 1);
+
+ err = btf_alloc_id(btf);
+ if (err)
+ goto errout;
+
+ btf_verifier_env_free(env);
+ return btf;
+
+errout:
+ btf_verifier_env_free(env);
+ if (btf) {
+ kvfree(btf->types);
+ kfree(btf);
+ }
+ return ERR_PTR(err);
+}
+
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+
+static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size)
+{
+ struct btf_verifier_env *env = NULL;
+ struct bpf_verifier_log *log;
+ struct btf *btf = NULL, *base_btf;
+ int err;
+
+ base_btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(base_btf))
+ return base_btf;
+ if (!base_btf)
+ return ERR_PTR(-EINVAL);
+
+ env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
+ if (!env)
+ return ERR_PTR(-ENOMEM);
+
+ log = &env->log;
+ log->level = BPF_LOG_KERNEL;
+
+ btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
+ if (!btf) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ env->btf = btf;
+
+ btf->base_btf = base_btf;
+ btf->start_id = base_btf->nr_types;
+ btf->start_str_off = base_btf->hdr.str_len;
+ btf->kernel_btf = true;
+ snprintf(btf->name, sizeof(btf->name), "%s", module_name);
+
+ btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
+ if (!btf->data) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ memcpy(btf->data, data, data_size);
+ btf->data_size = data_size;
+
+ err = btf_parse_hdr(env);
+ if (err)
+ goto errout;
+
+ btf->nohdr_data = btf->data + btf->hdr.hdr_len;
+
+ err = btf_parse_str_sec(env);
+ if (err)
+ goto errout;
+
+ err = btf_check_all_metas(env);
+ if (err)
+ goto errout;
+
btf_verifier_env_free(env);
refcount_set(&btf->refcnt, 1);
return btf;
@@ -4395,21 +4542,23 @@ struct btf *btf_parse_vmlinux(void)
errout:
btf_verifier_env_free(env);
if (btf) {
+ kvfree(btf->data);
kvfree(btf->types);
kfree(btf);
}
return ERR_PTR(err);
}
+#endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
+
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
{
struct bpf_prog *tgt_prog = prog->aux->dst_prog;
- if (tgt_prog) {
+ if (tgt_prog)
return tgt_prog->aux->btf;
- } else {
- return btf_vmlinux;
- }
+ else
+ return prog->aux->attach_btf;
}
static bool is_string_ptr(struct btf *btf, const struct btf_type *t)
@@ -4550,6 +4699,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (ctx_arg_info->offset == off) {
info->reg_type = ctx_arg_info->reg_type;
+ info->btf = btf_vmlinux;
info->btf_id = ctx_arg_info->btf_id;
return true;
}
@@ -4566,6 +4716,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
if (ret > 0) {
+ info->btf = btf_vmlinux;
info->btf_id = ret;
return true;
} else {
@@ -4573,6 +4724,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
}
}
+ info->btf = btf;
info->btf_id = t->type;
t = btf_type_by_id(btf, t->type);
/* skip modifiers */
@@ -4599,7 +4751,7 @@ enum bpf_struct_walk_result {
WALK_STRUCT,
};
-static int btf_struct_walk(struct bpf_verifier_log *log,
+static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
u32 *next_btf_id)
{
@@ -4610,7 +4762,7 @@ static int btf_struct_walk(struct bpf_verifier_log *log,
u32 vlen, elem_id, mid;
again:
- tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
+ tname = __btf_name_by_offset(btf, t->name_off);
if (!btf_type_is_struct(t)) {
bpf_log(log, "Type '%s' is not a struct\n", tname);
return -EINVAL;
@@ -4627,7 +4779,7 @@ again:
goto error;
member = btf_type_member(t) + vlen - 1;
- mtype = btf_type_skip_modifiers(btf_vmlinux, member->type,
+ mtype = btf_type_skip_modifiers(btf, member->type,
NULL);
if (!btf_type_is_array(mtype))
goto error;
@@ -4643,7 +4795,7 @@ again:
/* Only allow structure for now, can be relaxed for
* other types later.
*/
- t = btf_type_skip_modifiers(btf_vmlinux, array_elem->type,
+ t = btf_type_skip_modifiers(btf, array_elem->type,
NULL);
if (!btf_type_is_struct(t))
goto error;
@@ -4701,10 +4853,10 @@ error:
/* type of the field */
mid = member->type;
- mtype = btf_type_by_id(btf_vmlinux, member->type);
- mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
+ mtype = btf_type_by_id(btf, member->type);
+ mname = __btf_name_by_offset(btf, member->name_off);
- mtype = __btf_resolve_size(btf_vmlinux, mtype, &msize,
+ mtype = __btf_resolve_size(btf, mtype, &msize,
&elem_type, &elem_id, &total_nelems,
&mid);
if (IS_ERR(mtype)) {
@@ -4799,7 +4951,7 @@ error:
mname, moff, tname, off, size);
return -EACCES;
}
- stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
+ stype = btf_type_skip_modifiers(btf, mtype->type, &id);
if (btf_type_is_struct(stype)) {
*next_btf_id = id;
return WALK_PTR;
@@ -4825,7 +4977,7 @@ error:
return -EINVAL;
}
-int btf_struct_access(struct bpf_verifier_log *log,
+int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype __maybe_unused,
u32 *next_btf_id)
@@ -4834,7 +4986,7 @@ int btf_struct_access(struct bpf_verifier_log *log,
u32 id;
do {
- err = btf_struct_walk(log, t, off, size, &id);
+ err = btf_struct_walk(log, btf, t, off, size, &id);
switch (err) {
case WALK_PTR:
@@ -4850,7 +5002,7 @@ int btf_struct_access(struct bpf_verifier_log *log,
* by diving in it. At this point the offset is
* aligned with the new type, so set it to 0.
*/
- t = btf_type_by_id(btf_vmlinux, id);
+ t = btf_type_by_id(btf, id);
off = 0;
break;
default:
@@ -4866,21 +5018,37 @@ int btf_struct_access(struct bpf_verifier_log *log,
return -EINVAL;
}
+/* Check that two BTF types, each specified as an BTF object + id, are exactly
+ * the same. Trivial ID check is not enough due to module BTFs, because we can
+ * end up with two different module BTFs, but IDs point to the common type in
+ * vmlinux BTF.
+ */
+static bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2)
+{
+ if (id1 != id2)
+ return false;
+ if (btf1 == btf2)
+ return true;
+ return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
+}
+
bool btf_struct_ids_match(struct bpf_verifier_log *log,
- int off, u32 id, u32 need_type_id)
+ const struct btf *btf, u32 id, int off,
+ const struct btf *need_btf, u32 need_type_id)
{
const struct btf_type *type;
int err;
/* Are we already done? */
- if (need_type_id == id && off == 0)
+ if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
return true;
again:
- type = btf_type_by_id(btf_vmlinux, id);
+ type = btf_type_by_id(btf, id);
if (!type)
return false;
- err = btf_struct_walk(log, type, off, 1, &id);
+ err = btf_struct_walk(log, btf, type, off, 1, &id);
if (err != WALK_STRUCT)
return false;
@@ -4889,7 +5057,7 @@ again:
* continue the search with offset 0 in the new
* type.
*/
- if (need_type_id != id) {
+ if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
off = 0;
goto again;
}
@@ -4909,7 +5077,7 @@ static int __get_type_size(struct btf *btf, u32 btf_id,
while (t && btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (!t) {
- *bad_type = btf->types[0];
+ *bad_type = btf_type_by_id(btf, 0);
return -EINVAL;
}
if (btf_type_is_ptr(t))
@@ -5123,15 +5291,16 @@ int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *pr
* Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
*/
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg)
+ struct bpf_reg_state *regs)
{
struct bpf_verifier_log *log = &env->log;
struct bpf_prog *prog = env->prog;
struct btf *btf = prog->aux->btf;
const struct btf_param *args;
- const struct btf_type *t;
- u32 i, nargs, btf_id;
+ const struct btf_type *t, *ref_t;
+ u32 i, nargs, btf_id, type_size;
const char *tname;
+ bool is_global;
if (!prog->aux->func_info)
return -EINVAL;
@@ -5165,38 +5334,57 @@ int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
goto out;
}
+
+ is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
/* check that BTF function arguments match actual types that the
* verifier sees.
*/
for (i = 0; i < nargs; i++) {
+ struct bpf_reg_state *reg = &regs[i + 1];
+
t = btf_type_by_id(btf, args[i].type);
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (btf_type_is_int(t) || btf_type_is_enum(t)) {
- if (reg[i + 1].type == SCALAR_VALUE)
+ if (reg->type == SCALAR_VALUE)
continue;
bpf_log(log, "R%d is not a scalar\n", i + 1);
goto out;
}
if (btf_type_is_ptr(t)) {
- if (reg[i + 1].type == SCALAR_VALUE) {
- bpf_log(log, "R%d is not a pointer\n", i + 1);
- goto out;
- }
/* If function expects ctx type in BTF check that caller
* is passing PTR_TO_CTX.
*/
if (btf_get_prog_ctx_type(log, btf, t, prog->type, i)) {
- if (reg[i + 1].type != PTR_TO_CTX) {
+ if (reg->type != PTR_TO_CTX) {
bpf_log(log,
"arg#%d expected pointer to ctx, but got %s\n",
i, btf_kind_str[BTF_INFO_KIND(t->info)]);
goto out;
}
- if (check_ctx_reg(env, &reg[i + 1], i + 1))
+ if (check_ctx_reg(env, reg, i + 1))
goto out;
continue;
}
+
+ if (!is_global)
+ goto out;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ ref_t = btf_resolve_size(btf, t, &type_size);
+ if (IS_ERR(ref_t)) {
+ bpf_log(log,
+ "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
+ i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
+ PTR_ERR(ref_t));
+ goto out;
+ }
+
+ if (check_mem_reg(env, reg, i + 1, type_size))
+ goto out;
+
+ continue;
}
bpf_log(log, "Unrecognized arg#%d type %s\n",
i, btf_kind_str[BTF_INFO_KIND(t->info)]);
@@ -5220,14 +5408,14 @@ out:
* (either PTR_TO_CTX or SCALAR_VALUE).
*/
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg)
+ struct bpf_reg_state *regs)
{
struct bpf_verifier_log *log = &env->log;
struct bpf_prog *prog = env->prog;
enum bpf_prog_type prog_type = prog->type;
struct btf *btf = prog->aux->btf;
const struct btf_param *args;
- const struct btf_type *t;
+ const struct btf_type *t, *ref_t;
u32 i, nargs, btf_id;
const char *tname;
@@ -5291,16 +5479,35 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
* Only PTR_TO_CTX and SCALAR are supported atm.
*/
for (i = 0; i < nargs; i++) {
+ struct bpf_reg_state *reg = &regs[i + 1];
+
t = btf_type_by_id(btf, args[i].type);
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (btf_type_is_int(t) || btf_type_is_enum(t)) {
- reg[i + 1].type = SCALAR_VALUE;
+ reg->type = SCALAR_VALUE;
continue;
}
- if (btf_type_is_ptr(t) &&
- btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
- reg[i + 1].type = PTR_TO_CTX;
+ if (btf_type_is_ptr(t)) {
+ if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
+ reg->type = PTR_TO_CTX;
+ continue;
+ }
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ ref_t = btf_resolve_size(btf, t, &reg->mem_size);
+ if (IS_ERR(ref_t)) {
+ bpf_log(log,
+ "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
+ i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
+ PTR_ERR(ref_t));
+ return -EINVAL;
+ }
+
+ reg->type = PTR_TO_MEM_OR_NULL;
+ reg->id = ++env->id_gen;
+
continue;
}
bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
@@ -5487,7 +5694,9 @@ int btf_get_info_by_fd(const struct btf *btf,
struct bpf_btf_info info;
u32 info_copy, btf_copy;
void __user *ubtf;
- u32 uinfo_len;
+ char __user *uname;
+ u32 uinfo_len, uname_len, name_len;
+ int ret = 0;
uinfo = u64_to_user_ptr(attr->info.info);
uinfo_len = attr->info.info_len;
@@ -5504,11 +5713,37 @@ int btf_get_info_by_fd(const struct btf *btf,
return -EFAULT;
info.btf_size = btf->data_size;
+ info.kernel_btf = btf->kernel_btf;
+
+ uname = u64_to_user_ptr(info.name);
+ uname_len = info.name_len;
+ if (!uname ^ !uname_len)
+ return -EINVAL;
+
+ name_len = strlen(btf->name);
+ info.name_len = name_len;
+
+ if (uname) {
+ if (uname_len >= name_len + 1) {
+ if (copy_to_user(uname, btf->name, name_len + 1))
+ return -EFAULT;
+ } else {
+ char zero = '\0';
+
+ if (copy_to_user(uname, btf->name, uname_len - 1))
+ return -EFAULT;
+ if (put_user(zero, uname + uname_len - 1))
+ return -EFAULT;
+ /* let user-space know about too short buffer */
+ ret = -ENOSPC;
+ }
+ }
+
if (copy_to_user(uinfo, &info, info_copy) ||
put_user(info_copy, &uattr->info.info_len))
return -EFAULT;
- return 0;
+ return ret;
}
int btf_get_fd_by_id(u32 id)
@@ -5532,11 +5767,21 @@ int btf_get_fd_by_id(u32 id)
return fd;
}
-u32 btf_id(const struct btf *btf)
+u32 btf_obj_id(const struct btf *btf)
{
return btf->id;
}
+bool btf_is_kernel(const struct btf *btf)
+{
+ return btf->kernel_btf;
+}
+
+bool btf_is_module(const struct btf *btf)
+{
+ return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
+}
+
static int btf_id_cmp_func(const void *a, const void *b)
{
const int *pa = a, *pb = b;
@@ -5548,3 +5793,148 @@ bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
{
return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
}
+
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+struct btf_module {
+ struct list_head list;
+ struct module *module;
+ struct btf *btf;
+ struct bin_attribute *sysfs_attr;
+};
+
+static LIST_HEAD(btf_modules);
+static DEFINE_MUTEX(btf_module_mutex);
+
+static ssize_t
+btf_module_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t len)
+{
+ const struct btf *btf = bin_attr->private;
+
+ memcpy(buf, btf->data + off, len);
+ return len;
+}
+
+static int btf_module_notify(struct notifier_block *nb, unsigned long op,
+ void *module)
+{
+ struct btf_module *btf_mod, *tmp;
+ struct module *mod = module;
+ struct btf *btf;
+ int err = 0;
+
+ if (mod->btf_data_size == 0 ||
+ (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
+ goto out;
+
+ switch (op) {
+ case MODULE_STATE_COMING:
+ btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
+ if (!btf_mod) {
+ err = -ENOMEM;
+ goto out;
+ }
+ btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size);
+ if (IS_ERR(btf)) {
+ pr_warn("failed to validate module [%s] BTF: %ld\n",
+ mod->name, PTR_ERR(btf));
+ kfree(btf_mod);
+ err = PTR_ERR(btf);
+ goto out;
+ }
+ err = btf_alloc_id(btf);
+ if (err) {
+ btf_free(btf);
+ kfree(btf_mod);
+ goto out;
+ }
+
+ mutex_lock(&btf_module_mutex);
+ btf_mod->module = module;
+ btf_mod->btf = btf;
+ list_add(&btf_mod->list, &btf_modules);
+ mutex_unlock(&btf_module_mutex);
+
+ if (IS_ENABLED(CONFIG_SYSFS)) {
+ struct bin_attribute *attr;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ goto out;
+
+ sysfs_bin_attr_init(attr);
+ attr->attr.name = btf->name;
+ attr->attr.mode = 0444;
+ attr->size = btf->data_size;
+ attr->private = btf;
+ attr->read = btf_module_read;
+
+ err = sysfs_create_bin_file(btf_kobj, attr);
+ if (err) {
+ pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
+ mod->name, err);
+ kfree(attr);
+ err = 0;
+ goto out;
+ }
+
+ btf_mod->sysfs_attr = attr;
+ }
+
+ break;
+ case MODULE_STATE_GOING:
+ mutex_lock(&btf_module_mutex);
+ list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
+ if (btf_mod->module != module)
+ continue;
+
+ list_del(&btf_mod->list);
+ if (btf_mod->sysfs_attr)
+ sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
+ btf_put(btf_mod->btf);
+ kfree(btf_mod->sysfs_attr);
+ kfree(btf_mod);
+ break;
+ }
+ mutex_unlock(&btf_module_mutex);
+ break;
+ }
+out:
+ return notifier_from_errno(err);
+}
+
+static struct notifier_block btf_module_nb = {
+ .notifier_call = btf_module_notify,
+};
+
+static int __init btf_module_init(void)
+{
+ register_module_notifier(&btf_module_nb);
+ return 0;
+}
+
+fs_initcall(btf_module_init);
+#endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
+
+struct module *btf_try_get_module(const struct btf *btf)
+{
+ struct module *res = NULL;
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+ struct btf_module *btf_mod, *tmp;
+
+ mutex_lock(&btf_module_mutex);
+ list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
+ if (btf_mod->btf != btf)
+ continue;
+
+ if (try_module_get(btf_mod->module))
+ res = btf_mod->module;
+
+ break;
+ }
+ mutex_unlock(&btf_module_mutex);
+#endif
+
+ return res;
+}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 6ec088a96302..b567ca46555c 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -19,7 +19,7 @@
#include "../cgroup/cgroup-internal.h"
-DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
+DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_BPF_ATTACH_TYPE);
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
void cgroup_bpf_offline(struct cgroup *cgrp)
@@ -128,7 +128,7 @@ static void cgroup_bpf_release(struct work_struct *work)
if (pl->link)
bpf_cgroup_link_auto_detach(pl->link);
kfree(pl);
- static_branch_dec(&cgroup_bpf_enabled_key);
+ static_branch_dec(&cgroup_bpf_enabled_key[type]);
}
old_array = rcu_dereference_protected(
cgrp->bpf.effective[type],
@@ -499,7 +499,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
if (old_prog)
bpf_prog_put(old_prog);
else
- static_branch_inc(&cgroup_bpf_enabled_key);
+ static_branch_inc(&cgroup_bpf_enabled_key[type]);
bpf_cgroup_storages_link(new_storage, cgrp, type);
return 0;
@@ -698,7 +698,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
cgrp->bpf.flags[type] = 0;
if (old_prog)
bpf_prog_put(old_prog);
- static_branch_dec(&cgroup_bpf_enabled_key);
+ static_branch_dec(&cgroup_bpf_enabled_key[type]);
return 0;
cleanup:
@@ -1055,6 +1055,8 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
* @uaddr: sockaddr struct provided by user
* @type: The type of program to be exectuted
* @t_ctx: Pointer to attach type specific context
+ * @flags: Pointer to u32 which contains higher bits of BPF program
+ * return value (OR'ed together).
*
* socket is expected to be of type INET or INET6.
*
@@ -1064,7 +1066,8 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
enum bpf_attach_type type,
- void *t_ctx)
+ void *t_ctx,
+ u32 *flags)
{
struct bpf_sock_addr_kern ctx = {
.sk = sk,
@@ -1087,7 +1090,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
}
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
+ ret = BPF_PROG_RUN_ARRAY_FLAGS(cgrp->bpf.effective[type], &ctx,
+ BPF_PROG_RUN, flags);
return ret == 1 ? 0 : -EPERM;
}
@@ -1298,7 +1302,8 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
return empty;
}
-static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
+static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
+ struct bpf_sockopt_buf *buf)
{
if (unlikely(max_optlen < 0))
return -EINVAL;
@@ -1310,6 +1315,15 @@ static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
max_optlen = PAGE_SIZE;
}
+ if (max_optlen <= sizeof(buf->data)) {
+ /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
+ * bytes avoid the cost of kzalloc.
+ */
+ ctx->optval = buf->data;
+ ctx->optval_end = ctx->optval + max_optlen;
+ return max_optlen;
+ }
+
ctx->optval = kzalloc(max_optlen, GFP_USER);
if (!ctx->optval)
return -ENOMEM;
@@ -1319,16 +1333,26 @@ static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
return max_optlen;
}
-static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
+static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
+ struct bpf_sockopt_buf *buf)
{
+ if (ctx->optval == buf->data)
+ return;
kfree(ctx->optval);
}
+static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
+ struct bpf_sockopt_buf *buf)
+{
+ return ctx->optval != buf->data;
+}
+
int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
int *optname, char __user *optval,
int *optlen, char **kernel_optval)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_sockopt_buf buf = {};
struct bpf_sockopt_kern ctx = {
.sk = sk,
.level = *level,
@@ -1340,8 +1364,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
* attached to the hook so we don't waste time allocating
* memory and locking the socket.
*/
- if (!cgroup_bpf_enabled ||
- __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
+ if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
return 0;
/* Allocate a bit more than the initial user buffer for
@@ -1350,7 +1373,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
*/
max_optlen = max_t(int, 16, *optlen);
- max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+ max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
if (max_optlen < 0)
return max_optlen;
@@ -1390,13 +1413,31 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
*/
if (ctx.optlen != 0) {
*optlen = ctx.optlen;
- *kernel_optval = ctx.optval;
+ /* We've used bpf_sockopt_kern->buf as an intermediary
+ * storage, but the BPF program indicates that we need
+ * to pass this data to the kernel setsockopt handler.
+ * No way to export on-stack buf, have to allocate a
+ * new buffer.
+ */
+ if (!sockopt_buf_allocated(&ctx, &buf)) {
+ void *p = kmalloc(ctx.optlen, GFP_USER);
+
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(p, ctx.optval, ctx.optlen);
+ *kernel_optval = p;
+ } else {
+ *kernel_optval = ctx.optval;
+ }
+ /* export and don't free sockopt buf */
+ return 0;
}
}
out:
- if (ret)
- sockopt_free_buf(&ctx);
+ sockopt_free_buf(&ctx, &buf);
return ret;
}
@@ -1406,6 +1447,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int retval)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_sockopt_buf buf = {};
struct bpf_sockopt_kern ctx = {
.sk = sk,
.level = level,
@@ -1418,13 +1460,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
* attached to the hook so we don't waste time allocating
* memory and locking the socket.
*/
- if (!cgroup_bpf_enabled ||
- __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
+ if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
return retval;
ctx.optlen = max_optlen;
- max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+ max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
if (max_optlen < 0)
return max_optlen;
@@ -1441,6 +1482,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
goto out;
}
+ if (ctx.optlen < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
if (copy_from_user(ctx.optval, optval,
min(ctx.optlen, max_optlen)) != 0) {
ret = -EFAULT;
@@ -1458,7 +1504,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
goto out;
}
- if (ctx.optlen > max_optlen) {
+ if (ctx.optlen > max_optlen || ctx.optlen < 0) {
ret = -EFAULT;
goto out;
}
@@ -1482,9 +1528,55 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
ret = ctx.retval;
out:
- sockopt_free_buf(&ctx);
+ sockopt_free_buf(&ctx, &buf);
return ret;
}
+
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval)
+{
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_sockopt_kern ctx = {
+ .sk = sk,
+ .level = level,
+ .optname = optname,
+ .retval = retval,
+ .optlen = *optlen,
+ .optval = optval,
+ .optval_end = optval + *optlen,
+ };
+ int ret;
+
+ /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
+ * user data back into BPF buffer when reval != 0. This is
+ * done as an optimization to avoid extra copy, assuming
+ * kernel won't populate the data in case of an error.
+ * Here we always pass the data and memset() should
+ * be called if that data shouldn't be "exported".
+ */
+
+ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
+ &ctx, BPF_PROG_RUN);
+ if (!ret)
+ return -EPERM;
+
+ if (ctx.optlen > *optlen)
+ return -EFAULT;
+
+ /* BPF programs only allowed to set retval to 0, not some
+ * arbitrary value.
+ */
+ if (ctx.retval != 0 && ctx.retval != retval)
+ return -EFAULT;
+
+ /* BPF programs can shrink the buffer, export the modifications.
+ */
+ if (ctx.optlen != 0)
+ *optlen = ctx.optlen;
+
+ return ctx.retval;
+}
#endif
static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 9268d77898b7..0ae015ad1e05 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -77,7 +77,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
+ gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
struct bpf_prog_aux *aux;
struct bpf_prog *fp;
@@ -86,11 +86,17 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
if (fp == NULL)
return NULL;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
+ aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
if (aux == NULL) {
vfree(fp);
return NULL;
}
+ fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
+ if (!fp->active) {
+ vfree(fp);
+ kfree(aux);
+ return NULL;
+ }
fp->pages = size / PAGE_SIZE;
fp->aux = aux;
@@ -106,7 +112,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
+ gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
struct bpf_prog *prog;
int cpu;
@@ -114,8 +120,9 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
if (!prog)
return NULL;
- prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
- if (!prog->aux->stats) {
+ prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
+ if (!prog->stats) {
+ free_percpu(prog->active);
kfree(prog->aux);
vfree(prog);
return NULL;
@@ -124,7 +131,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
for_each_possible_cpu(cpu) {
struct bpf_prog_stats *pstats;
- pstats = per_cpu_ptr(prog->aux->stats, cpu);
+ pstats = per_cpu_ptr(prog->stats, cpu);
u64_stats_init(&pstats->syncp);
}
return prog;
@@ -138,7 +145,7 @@ int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
sizeof(*prog->aux->jited_linfo),
- GFP_KERNEL | __GFP_NOWARN);
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!prog->aux->jited_linfo)
return -ENOMEM;
@@ -219,25 +226,17 @@ void bpf_prog_free_linfo(struct bpf_prog *prog)
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
+ gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
struct bpf_prog *fp;
- u32 pages, delta;
- int ret;
+ u32 pages;
size = round_up(size, PAGE_SIZE);
pages = size / PAGE_SIZE;
if (pages <= fp_old->pages)
return fp_old;
- delta = pages - fp_old->pages;
- ret = __bpf_prog_charge(fp_old->aux->user, delta);
- if (ret)
- return NULL;
-
fp = __vmalloc(size, gfp_flags);
- if (fp == NULL) {
- __bpf_prog_uncharge(fp_old->aux->user, delta);
- } else {
+ if (fp) {
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = pages;
fp->aux->prog = fp;
@@ -246,6 +245,8 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
* reallocated structure.
*/
fp_old->aux = NULL;
+ fp_old->stats = NULL;
+ fp_old->active = NULL;
__bpf_prog_free(fp_old);
}
@@ -257,10 +258,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
if (fp->aux) {
mutex_destroy(&fp->aux->used_maps_mutex);
mutex_destroy(&fp->aux->dst_mutex);
- free_percpu(fp->aux->stats);
kfree(fp->aux->poke_tab);
kfree(fp->aux);
}
+ free_percpu(fp->stats);
+ free_percpu(fp->active);
vfree(fp);
}
@@ -1317,8 +1319,8 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
INSN_3(STX, MEM, H), \
INSN_3(STX, MEM, W), \
INSN_3(STX, MEM, DW), \
- INSN_3(STX, XADD, W), \
- INSN_3(STX, XADD, DW), \
+ INSN_3(STX, ATOMIC, W), \
+ INSN_3(STX, ATOMIC, DW), \
/* Immediate based. */ \
INSN_3(ST, MEM, B), \
INSN_3(ST, MEM, H), \
@@ -1369,7 +1371,7 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
*
* Decode and execute eBPF instructions.
*/
-static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
+static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
@@ -1626,13 +1628,59 @@ out:
LDX_PROBE(DW, 8)
#undef LDX_PROBE
- STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
- atomic_add((u32) SRC, (atomic_t *)(unsigned long)
- (DST + insn->off));
- CONT;
- STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
- atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
- (DST + insn->off));
+#define ATOMIC_ALU_OP(BOP, KOP) \
+ case BOP: \
+ if (BPF_SIZE(insn->code) == BPF_W) \
+ atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
+ (DST + insn->off)); \
+ else \
+ atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
+ (DST + insn->off)); \
+ break; \
+ case BOP | BPF_FETCH: \
+ if (BPF_SIZE(insn->code) == BPF_W) \
+ SRC = (u32) atomic_fetch_##KOP( \
+ (u32) SRC, \
+ (atomic_t *)(unsigned long) (DST + insn->off)); \
+ else \
+ SRC = (u64) atomic64_fetch_##KOP( \
+ (u64) SRC, \
+ (atomic64_t *)(unsigned long) (DST + insn->off)); \
+ break;
+
+ STX_ATOMIC_DW:
+ STX_ATOMIC_W:
+ switch (IMM) {
+ ATOMIC_ALU_OP(BPF_ADD, add)
+ ATOMIC_ALU_OP(BPF_AND, and)
+ ATOMIC_ALU_OP(BPF_OR, or)
+ ATOMIC_ALU_OP(BPF_XOR, xor)
+#undef ATOMIC_ALU_OP
+
+ case BPF_XCHG:
+ if (BPF_SIZE(insn->code) == BPF_W)
+ SRC = (u32) atomic_xchg(
+ (atomic_t *)(unsigned long) (DST + insn->off),
+ (u32) SRC);
+ else
+ SRC = (u64) atomic64_xchg(
+ (atomic64_t *)(unsigned long) (DST + insn->off),
+ (u64) SRC);
+ break;
+ case BPF_CMPXCHG:
+ if (BPF_SIZE(insn->code) == BPF_W)
+ BPF_R0 = (u32) atomic_cmpxchg(
+ (atomic_t *)(unsigned long) (DST + insn->off),
+ (u32) BPF_R0, (u32) SRC);
+ else
+ BPF_R0 = (u64) atomic64_cmpxchg(
+ (atomic64_t *)(unsigned long) (DST + insn->off),
+ (u64) BPF_R0, (u64) SRC);
+ break;
+
+ default:
+ goto default_label;
+ }
CONT;
default_label:
@@ -1642,7 +1690,8 @@ out:
*
* Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
*/
- pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
+ pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
+ insn->code, insn->imm);
BUG_ON(1);
return 0;
}
@@ -2127,6 +2176,28 @@ static void bpf_free_used_maps(struct bpf_prog_aux *aux)
kfree(aux->used_maps);
}
+void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
+ struct btf_mod_pair *used_btfs, u32 len)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ struct btf_mod_pair *btf_mod;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ btf_mod = &used_btfs[i];
+ if (btf_mod->module)
+ module_put(btf_mod->module);
+ btf_put(btf_mod->btf);
+ }
+#endif
+}
+
+static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
+{
+ __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
+ kfree(aux->used_btfs);
+}
+
static void bpf_prog_free_deferred(struct work_struct *work)
{
struct bpf_prog_aux *aux;
@@ -2134,6 +2205,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)
aux = container_of(work, struct bpf_prog_aux, work);
bpf_free_used_maps(aux);
+ bpf_free_used_btfs(aux);
if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog);
#ifdef CONFIG_PERF_EVENTS
@@ -2211,6 +2283,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
+const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index c61a23b564aa..5d1469de6921 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -84,8 +84,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
u32 value_size = attr->value_size;
struct bpf_cpu_map *cmap;
int err = -ENOMEM;
- u64 cost;
- int ret;
if (!bpf_capable())
return ERR_PTR(-EPERM);
@@ -97,7 +95,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
attr->map_flags & ~BPF_F_NUMA_NODE)
return ERR_PTR(-EINVAL);
- cmap = kzalloc(sizeof(*cmap), GFP_USER);
+ cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT);
if (!cmap)
return ERR_PTR(-ENOMEM);
@@ -109,26 +107,14 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
goto free_cmap;
}
- /* make sure page count doesn't overflow */
- cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
-
- /* Notice returns -EPERM on if map size is larger than memlock limit */
- ret = bpf_map_charge_init(&cmap->map.memory, cost);
- if (ret) {
- err = ret;
- goto free_cmap;
- }
-
/* Alloc array for possible remote "destination" CPUs */
cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
sizeof(struct bpf_cpu_map_entry *),
cmap->map.numa_node);
if (!cmap->cpu_map)
- goto free_charge;
+ goto free_cmap;
return &cmap->map;
-free_charge:
- bpf_map_charge_finish(&cmap->map.memory);
free_cmap:
kfree(cmap);
return ERR_PTR(err);
@@ -155,49 +141,6 @@ static void cpu_map_kthread_stop(struct work_struct *work)
kthread_stop(rcpu->kthread);
}
-static struct sk_buff *cpu_map_build_skb(struct xdp_frame *xdpf,
- struct sk_buff *skb)
-{
- unsigned int hard_start_headroom;
- unsigned int frame_size;
- void *pkt_data_start;
-
- /* Part of headroom was reserved to xdpf */
- hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
-
- /* Memory size backing xdp_frame data already have reserved
- * room for build_skb to place skb_shared_info in tailroom.
- */
- frame_size = xdpf->frame_sz;
-
- pkt_data_start = xdpf->data - hard_start_headroom;
- skb = build_skb_around(skb, pkt_data_start, frame_size);
- if (unlikely(!skb))
- return NULL;
-
- skb_reserve(skb, hard_start_headroom);
- __skb_put(skb, xdpf->len);
- if (xdpf->metasize)
- skb_metadata_set(skb, xdpf->metasize);
-
- /* Essential SKB info: protocol and skb->dev */
- skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
-
- /* Optional SKB info, currently missing:
- * - HW checksum info (skb->ip_summed)
- * - HW RX hash (skb_set_hash)
- * - RX ring dev queue index (skb_record_rx_queue)
- */
-
- /* Until page_pool get SKB return path, release DMA here */
- xdp_release_frame(xdpf);
-
- /* Allow SKB to reuse area used by xdp_frame */
- xdp_scrub_frame(xdpf);
-
- return skb;
-}
-
static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
{
/* The tear-down procedure should have made sure that queue is
@@ -364,7 +307,8 @@ static int cpu_map_kthread_run(void *data)
struct sk_buff *skb = skbs[i];
int ret;
- skb = cpu_map_build_skb(xdpf, skb);
+ skb = __xdp_build_skb_from_frame(xdpf, skb,
+ xdpf->dev_rx);
if (!skb) {
xdp_return_frame(xdpf);
continue;
@@ -412,7 +356,8 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
}
static struct bpf_cpu_map_entry *
-__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
+__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+ u32 cpu)
{
int numa, err, i, fd = value->bpf_prog.fd;
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
@@ -422,13 +367,13 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
/* Have map->numa_node, but choose node of redirect target CPU */
numa = cpu_to_node(cpu);
- rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
+ rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
if (!rcpu)
return NULL;
/* Alloc percpu bulkq */
- rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq),
- sizeof(void *), gfp);
+ rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
+ sizeof(void *), gfp);
if (!rcpu->bulkq)
goto free_rcu;
@@ -438,7 +383,8 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
}
/* Alloc queue */
- rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
+ rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
+ numa);
if (!rcpu->queue)
goto free_bulkq;
@@ -447,7 +393,7 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
goto free_queue;
rcpu->cpu = cpu;
- rcpu->map_id = map_id;
+ rcpu->map_id = map->id;
rcpu->value.qsize = value->qsize;
if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
@@ -455,7 +401,8 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
/* Setup kthread */
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
- "cpumap/%d/map:%d", cpu, map_id);
+ "cpumap/%d/map:%d", cpu,
+ map->id);
if (IS_ERR(rcpu->kthread))
goto free_prog;
@@ -571,7 +518,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
rcpu = NULL; /* Same as deleting */
} else {
/* Updating qsize cause re-allocation of bpf_cpu_map_entry */
- rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
+ rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
if (!rcpu)
return -ENOMEM;
rcpu->cmap = cmap;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 2b5ca93c17de..85d9d1b72a33 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -109,8 +109,6 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
u32 valsize = attr->value_size;
- u64 cost = 0;
- int err;
/* check sanity of attributes. 2 value sizes supported:
* 4 bytes: ifindex
@@ -135,21 +133,13 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
if (!dtab->n_buckets) /* Overflow check */
return -EINVAL;
- cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
- } else {
- cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
}
- /* if map size is larger than memlock limit, reject it */
- err = bpf_map_charge_init(&dtab->map.memory, cost);
- if (err)
- return -EINVAL;
-
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
dtab->map.numa_node);
if (!dtab->dev_index_head)
- goto free_charge;
+ return -ENOMEM;
spin_lock_init(&dtab->index_lock);
} else {
@@ -157,14 +147,10 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
- goto free_charge;
+ return -ENOMEM;
}
return 0;
-
-free_charge:
- bpf_map_charge_finish(&dtab->map.memory);
- return -ENOMEM;
}
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
@@ -175,7 +161,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
- dtab = kzalloc(sizeof(*dtab), GFP_USER);
+ dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
if (!dtab)
return ERR_PTR(-ENOMEM);
@@ -602,8 +588,9 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
struct bpf_prog *prog = NULL;
struct bpf_dtab_netdev *dev;
- dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
- dtab->map.numa_node);
+ dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
+ GFP_ATOMIC | __GFP_NOWARN,
+ dtab->map.numa_node);
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -815,9 +802,7 @@ static int dev_map_notification(struct notifier_block *notifier,
break;
/* will be freed in free_netdev() */
- netdev->xdp_bulkq =
- __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue),
- sizeof(void *), GFP_ATOMIC);
+ netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
if (!netdev->xdp_bulkq)
return NOTIFY_BAD;
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index b44d8c447afd..3acc7e0b6916 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -80,6 +80,13 @@ const char *const bpf_alu_string[16] = {
[BPF_END >> 4] = "endian",
};
+static const char *const bpf_atomic_alu_string[16] = {
+ [BPF_ADD >> 4] = "add",
+ [BPF_AND >> 4] = "and",
+ [BPF_OR >> 4] = "or",
+ [BPF_XOR >> 4] = "or",
+};
+
static const char *const bpf_ldst_string[] = {
[BPF_W >> 3] = "u32",
[BPF_H >> 3] = "u16",
@@ -153,14 +160,44 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->src_reg);
- else if (BPF_MODE(insn->code) == BPF_XADD)
- verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
+ else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+ (insn->imm == BPF_ADD || insn->imm == BPF_AND ||
+ insn->imm == BPF_OR || insn->imm == BPF_XOR)) {
+ verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) %s r%d\n",
+ insn->code,
+ bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+ insn->dst_reg, insn->off,
+ bpf_alu_string[BPF_OP(insn->imm) >> 4],
+ insn->src_reg);
+ } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+ (insn->imm == (BPF_ADD | BPF_FETCH) ||
+ insn->imm == (BPF_AND | BPF_FETCH) ||
+ insn->imm == (BPF_OR | BPF_FETCH) ||
+ insn->imm == (BPF_XOR | BPF_FETCH))) {
+ verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_%s((%s *)(r%d %+d), r%d)\n",
+ insn->code, insn->src_reg,
+ BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
+ bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4],
+ bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+ insn->dst_reg, insn->off, insn->src_reg);
+ } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+ insn->imm == BPF_CMPXCHG) {
+ verbose(cbs->private_data, "(%02x) r0 = atomic%s_cmpxchg((%s *)(r%d %+d), r0, r%d)\n",
insn->code,
+ BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg, insn->off,
insn->src_reg);
- else
+ } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+ insn->imm == BPF_XCHG) {
+ verbose(cbs->private_data, "(%02x) r%d = atomic%s_xchg((%s *)(r%d %+d), r%d)\n",
+ insn->code, insn->src_reg,
+ BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
+ bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+ insn->dst_reg, insn->off, insn->src_reg);
+ } else {
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
+ }
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 1815e97d4c9c..d63912e73ad9 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -86,6 +86,9 @@ struct bucket {
};
};
+#define HASHTAB_MAP_LOCK_COUNT 8
+#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
+
struct bpf_htab {
struct bpf_map map;
struct bucket *buckets;
@@ -99,6 +102,8 @@ struct bpf_htab {
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
u32 hashrnd;
+ struct lock_class_key lockdep_key;
+ int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
};
/* each htab element is struct htab_elem + key + value */
@@ -138,33 +143,54 @@ static void htab_init_buckets(struct bpf_htab *htab)
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
- if (htab_use_raw_lock(htab))
+ if (htab_use_raw_lock(htab)) {
raw_spin_lock_init(&htab->buckets[i].raw_lock);
- else
+ lockdep_set_class(&htab->buckets[i].raw_lock,
+ &htab->lockdep_key);
+ } else {
spin_lock_init(&htab->buckets[i].lock);
+ lockdep_set_class(&htab->buckets[i].lock,
+ &htab->lockdep_key);
+ }
+ cond_resched();
}
}
-static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab,
- struct bucket *b)
+static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ struct bucket *b, u32 hash,
+ unsigned long *pflags)
{
unsigned long flags;
+ hash = hash & HASHTAB_MAP_LOCK_MASK;
+
+ migrate_disable();
+ if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+ __this_cpu_dec(*(htab->map_locked[hash]));
+ migrate_enable();
+ return -EBUSY;
+ }
+
if (htab_use_raw_lock(htab))
raw_spin_lock_irqsave(&b->raw_lock, flags);
else
spin_lock_irqsave(&b->lock, flags);
- return flags;
+ *pflags = flags;
+
+ return 0;
}
static inline void htab_unlock_bucket(const struct bpf_htab *htab,
- struct bucket *b,
+ struct bucket *b, u32 hash,
unsigned long flags)
{
+ hash = hash & HASHTAB_MAP_LOCK_MASK;
if (htab_use_raw_lock(htab))
raw_spin_unlock_irqrestore(&b->raw_lock, flags);
else
spin_unlock_irqrestore(&b->lock, flags);
+ __this_cpu_dec(*(htab->map_locked[hash]));
+ migrate_enable();
}
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
@@ -199,7 +225,7 @@ static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{
- return (struct htab_elem *) (htab->elems + i * htab->elem_size);
+ return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
}
static void htab_free_elems(struct bpf_htab *htab)
@@ -255,7 +281,7 @@ static int prealloc_init(struct bpf_htab *htab)
if (!htab_is_percpu(htab) && !htab_is_lru(htab))
num_entries += num_possible_cpus();
- htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
+ htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
htab->map.numa_node);
if (!htab->elems)
return -ENOMEM;
@@ -267,7 +293,8 @@ static int prealloc_init(struct bpf_htab *htab)
u32 size = round_up(htab->map.value_size, 8);
void __percpu *pptr;
- pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
+ pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
+ GFP_USER | __GFP_NOWARN);
if (!pptr)
goto free_elems;
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
@@ -321,8 +348,8 @@ static int alloc_extra_elems(struct bpf_htab *htab)
struct pcpu_freelist_node *l;
int cpu;
- pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
- GFP_USER | __GFP_NOWARN);
+ pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
+ GFP_USER | __GFP_NOWARN);
if (!pptr)
return -ENOMEM;
@@ -390,17 +417,11 @@ static int htab_map_alloc_check(union bpf_attr *attr)
attr->value_size == 0)
return -EINVAL;
- if (attr->key_size > MAX_BPF_STACK)
- /* eBPF programs initialize keys on stack, so they cannot be
- * larger than max stack size
- */
- return -E2BIG;
-
- if (attr->value_size >= KMALLOC_MAX_SIZE -
- MAX_BPF_STACK - sizeof(struct htab_elem))
- /* if value_size is bigger, the user space won't be able to
- * access the elements via bpf syscall. This check also makes
- * sure that the elem_size doesn't overflow and it's
+ if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
+ sizeof(struct htab_elem))
+ /* if key_size + value_size is bigger, the user space won't be
+ * able to access the elements via bpf syscall. This check
+ * also makes sure that the elem_size doesn't overflow and it's
* kmalloc-able later in htab_map_update_elem()
*/
return -E2BIG;
@@ -422,13 +443,14 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab;
- u64 cost;
- int err;
+ int err, i;
- htab = kzalloc(sizeof(*htab), GFP_USER);
+ htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT);
if (!htab)
return ERR_PTR(-ENOMEM);
+ lockdep_register_key(&htab->lockdep_key);
+
bpf_map_init_from_attr(&htab->map, attr);
if (percpu_lru) {
@@ -459,26 +481,21 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
htab->n_buckets > U32_MAX / sizeof(struct bucket))
goto free_htab;
- cost = (u64) htab->n_buckets * sizeof(struct bucket) +
- (u64) htab->elem_size * htab->map.max_entries;
-
- if (percpu)
- cost += (u64) round_up(htab->map.value_size, 8) *
- num_possible_cpus() * htab->map.max_entries;
- else
- cost += (u64) htab->elem_size * num_possible_cpus();
-
- /* if map size is larger than memlock limit, reject it */
- err = bpf_map_charge_init(&htab->map.memory, cost);
- if (err)
- goto free_htab;
-
err = -ENOMEM;
htab->buckets = bpf_map_area_alloc(htab->n_buckets *
sizeof(struct bucket),
htab->map.numa_node);
if (!htab->buckets)
- goto free_charge;
+ goto free_htab;
+
+ for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
+ htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
+ sizeof(int),
+ sizeof(int),
+ GFP_USER);
+ if (!htab->map_locked[i])
+ goto free_map_locked;
+ }
if (htab->map.map_flags & BPF_F_ZERO_SEED)
htab->hashrnd = 0;
@@ -490,7 +507,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (prealloc) {
err = prealloc_init(htab);
if (err)
- goto free_buckets;
+ goto free_map_locked;
if (!percpu && !lru) {
/* lru itself can remove the least used element, so
@@ -506,11 +523,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
free_prealloc:
prealloc_destroy(htab);
-free_buckets:
+free_map_locked:
+ for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
+ free_percpu(htab->map_locked[i]);
bpf_map_area_free(htab->buckets);
-free_charge:
- bpf_map_charge_finish(&htab->map.memory);
free_htab:
+ lockdep_unregister_key(&htab->lockdep_key);
kfree(htab);
return ERR_PTR(err);
}
@@ -687,12 +705,15 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
struct hlist_nulls_node *n;
unsigned long flags;
struct bucket *b;
+ int ret;
tgt_l = container_of(node, struct htab_elem, lru_node);
b = __select_bucket(htab, tgt_l->hash);
head = &b->head;
- flags = htab_lock_bucket(htab, b);
+ ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
+ if (ret)
+ return false;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
@@ -700,7 +721,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
break;
}
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, tgt_l->hash, flags);
return l == tgt_l;
}
@@ -821,6 +842,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
}
}
+static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
+ void *value, bool onallcpus)
+{
+ /* When using prealloc and not setting the initial value on all cpus,
+ * zero-fill element values for other cpus (just as what happens when
+ * not using prealloc). Otherwise, bpf program has no way to ensure
+ * known initial values for cpus other than current one
+ * (onallcpus=false always when coming from bpf prog).
+ */
+ if (htab_is_prealloc(htab) && !onallcpus) {
+ u32 size = round_up(htab->map.value_size, 8);
+ int current_cpu = raw_smp_processor_id();
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == current_cpu)
+ bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
+ size);
+ else
+ memset(per_cpu_ptr(pptr, cpu), 0, size);
+ }
+ } else {
+ pcpu_copy_value(htab, pptr, value, onallcpus);
+ }
+}
+
static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
{
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
@@ -865,8 +912,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new = ERR_PTR(-E2BIG);
goto dec_count;
}
- l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
- htab->map.numa_node);
+ l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
+ GFP_ATOMIC | __GFP_NOWARN,
+ htab->map.numa_node);
if (!l_new) {
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
@@ -882,8 +930,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
pptr = htab_elem_get_ptr(l_new, key_size);
} else {
/* alloc_percpu zero-fills */
- pptr = __alloc_percpu_gfp(size, 8,
- GFP_ATOMIC | __GFP_NOWARN);
+ pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
+ GFP_ATOMIC | __GFP_NOWARN);
if (!pptr) {
kfree(l_new);
l_new = ERR_PTR(-ENOMEM);
@@ -891,7 +939,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
}
}
- pcpu_copy_value(htab, pptr, value, onallcpus);
+ pcpu_init_value(htab, pptr, value, onallcpus);
if (!prealloc)
htab_elem_set_ptr(l_new, key_size, pptr);
@@ -972,7 +1020,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
*/
}
- flags = htab_lock_bucket(htab, b);
+ ret = htab_lock_bucket(htab, b, hash, &flags);
+ if (ret)
+ return ret;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1013,7 +1063,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
}
ret = 0;
err:
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, hash, flags);
return ret;
}
@@ -1051,7 +1101,9 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
return -ENOMEM;
memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
- flags = htab_lock_bucket(htab, b);
+ ret = htab_lock_bucket(htab, b, hash, &flags);
+ if (ret)
+ return ret;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1070,7 +1122,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
ret = 0;
err:
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, hash, flags);
if (ret)
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
@@ -1096,7 +1148,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
key_size = map->key_size;
@@ -1105,7 +1157,9 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
b = __select_bucket(htab, hash);
head = &b->head;
- flags = htab_lock_bucket(htab, b);
+ ret = htab_lock_bucket(htab, b, hash, &flags);
+ if (ret)
+ return ret;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1128,7 +1182,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
}
ret = 0;
err:
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, hash, flags);
return ret;
}
@@ -1148,7 +1202,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
key_size = map->key_size;
@@ -1168,7 +1222,9 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
return -ENOMEM;
}
- flags = htab_lock_bucket(htab, b);
+ ret = htab_lock_bucket(htab, b, hash, &flags);
+ if (ret)
+ return ret;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1183,14 +1239,14 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
value, onallcpus);
} else {
- pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
+ pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
value, onallcpus);
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
l_new = NULL;
}
ret = 0;
err:
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, hash, flags);
if (l_new)
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
return ret;
@@ -1218,7 +1274,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
struct htab_elem *l;
unsigned long flags;
u32 hash, key_size;
- int ret = -ENOENT;
+ int ret;
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
@@ -1228,17 +1284,20 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- flags = htab_lock_bucket(htab, b);
+ ret = htab_lock_bucket(htab, b, hash, &flags);
+ if (ret)
+ return ret;
l = lookup_elem_raw(head, hash, key, key_size);
if (l) {
hlist_nulls_del_rcu(&l->hash_node);
free_htab_elem(htab, l);
- ret = 0;
+ } else {
+ ret = -ENOENT;
}
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, hash, flags);
return ret;
}
@@ -1250,7 +1309,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
struct htab_elem *l;
unsigned long flags;
u32 hash, key_size;
- int ret = -ENOENT;
+ int ret;
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
@@ -1260,16 +1319,18 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- flags = htab_lock_bucket(htab, b);
+ ret = htab_lock_bucket(htab, b, hash, &flags);
+ if (ret)
+ return ret;
l = lookup_elem_raw(head, hash, key, key_size);
- if (l) {
+ if (l)
hlist_nulls_del_rcu(&l->hash_node);
- ret = 0;
- }
+ else
+ ret = -ENOENT;
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, hash, flags);
if (l)
bpf_lru_push_free(&htab->lru, &l->lru_node);
return ret;
@@ -1295,6 +1356,7 @@ static void delete_all_elements(struct bpf_htab *htab)
static void htab_map_free(struct bpf_map *map)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ int i;
/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
* bpf_free_used_maps() is called after bpf prog is no longer executing.
@@ -1312,6 +1374,9 @@ static void htab_map_free(struct bpf_map *map)
free_percpu(htab->extra_elems);
bpf_map_area_free(htab->buckets);
+ for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
+ free_percpu(htab->map_locked[i]);
+ lockdep_unregister_key(&htab->lockdep_key);
kfree(htab);
}
@@ -1348,7 +1413,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
void __user *uvalues = u64_to_user_ptr(attr->batch.values);
void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
- void *ubatch = u64_to_user_ptr(attr->batch.in_batch);
+ void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
u32 batch, max_count, size, bucket_size;
struct htab_elem *node_to_free = NULL;
u64 elem_map_flags, map_flags;
@@ -1415,8 +1480,11 @@ again_nocopy:
b = &htab->buckets[batch];
head = &b->head;
/* do not grab the lock unless need it (bucket_cnt > 0). */
- if (locked)
- flags = htab_lock_bucket(htab, b);
+ if (locked) {
+ ret = htab_lock_bucket(htab, b, batch, &flags);
+ if (ret)
+ goto next_batch;
+ }
bucket_cnt = 0;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
@@ -1433,7 +1501,7 @@ again_nocopy:
/* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it.
*/
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, batch, flags);
rcu_read_unlock();
bpf_enable_instrumentation();
goto after_loop;
@@ -1444,7 +1512,7 @@ again_nocopy:
/* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it.
*/
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, batch, flags);
rcu_read_unlock();
bpf_enable_instrumentation();
kvfree(keys);
@@ -1497,7 +1565,7 @@ again_nocopy:
dst_val += value_size;
}
- htab_unlock_bucket(htab, b, flags);
+ htab_unlock_bucket(htab, b, batch, flags);
locked = false;
while (node_to_free) {
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 25520f5eeaf6..308427fe03a3 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -108,7 +108,7 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
}
const struct bpf_func_proto bpf_map_peek_elem_proto = {
- .func = bpf_map_pop_elem,
+ .func = bpf_map_peek_elem,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
@@ -167,6 +167,17 @@ const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
.ret_type = RET_INTEGER,
};
+BPF_CALL_0(bpf_ktime_get_coarse_ns)
+{
+ return ktime_get_coarse_ns();
+}
+
+const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
+ .func = bpf_ktime_get_coarse_ns,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+};
+
BPF_CALL_0(bpf_get_current_pid_tgid)
{
struct task_struct *task = current;
@@ -685,6 +696,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns:
return &bpf_ktime_get_boot_ns_proto;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ return &bpf_ktime_get_coarse_ns_proto;
case BPF_FUNC_ringbuf_output:
return &bpf_ringbuf_output_proto;
case BPF_FUNC_ringbuf_reserve:
@@ -707,19 +720,11 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_spin_lock_proto;
case BPF_FUNC_spin_unlock:
return &bpf_spin_unlock_proto;
- case BPF_FUNC_trace_printk:
- if (!perfmon_capable())
- return NULL;
- return bpf_get_trace_printk_proto();
- case BPF_FUNC_snprintf_btf:
- if (!perfmon_capable())
- return NULL;
- return &bpf_snprintf_btf_proto;
case BPF_FUNC_jiffies64:
return &bpf_jiffies64_proto;
- case BPF_FUNC_bpf_per_cpu_ptr:
+ case BPF_FUNC_per_cpu_ptr:
return &bpf_per_cpu_ptr_proto;
- case BPF_FUNC_bpf_this_cpu_ptr:
+ case BPF_FUNC_this_cpu_ptr:
return &bpf_this_cpu_ptr_proto;
default:
break;
@@ -729,6 +734,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return NULL;
switch (func_id) {
+ case BPF_FUNC_trace_printk:
+ return bpf_get_trace_printk_proto();
case BPF_FUNC_get_current_task:
return &bpf_get_current_task_proto;
case BPF_FUNC_probe_read_user:
@@ -739,6 +746,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
return &bpf_probe_read_kernel_str_proto;
+ case BPF_FUNC_snprintf_btf:
+ return &bpf_snprintf_btf_proto;
default:
return NULL;
}
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 571bb351ed3b..2d4f9ac12377 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -164,10 +164,10 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
return 0;
}
- new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
- map->value_size,
- __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
- map->numa_node);
+ new = bpf_map_kmalloc_node(map, sizeof(struct bpf_storage_buffer) +
+ map->value_size,
+ __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
+ map->numa_node);
if (!new)
return -ENOMEM;
@@ -287,8 +287,6 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_cgroup_storage_map *map;
- struct bpf_map_memory mem;
- int ret;
if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
attr->key_size != sizeof(__u64))
@@ -308,18 +306,10 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
/* max_entries is not used and enforced to be 0 */
return ERR_PTR(-EINVAL);
- ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
- if (ret < 0)
- return ERR_PTR(ret);
-
map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
- __GFP_ZERO | GFP_USER, numa_node);
- if (!map) {
- bpf_map_charge_finish(&mem);
+ __GFP_ZERO | GFP_USER | __GFP_ACCOUNT, numa_node);
+ if (!map)
return ERR_PTR(-ENOMEM);
- }
-
- bpf_map_charge_move(&map->map.memory, &mem);
/* copy mandatory map attributes */
bpf_map_init_from_attr(&map->map, attr);
@@ -496,9 +486,9 @@ static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype)
{
+ const gfp_t gfp = __GFP_ZERO | GFP_USER;
struct bpf_cgroup_storage *storage;
struct bpf_map *map;
- gfp_t flags;
size_t size;
u32 pages;
@@ -508,23 +498,19 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
size = bpf_cgroup_storage_calculate_size(map, &pages);
- if (bpf_map_charge_memlock(map, pages))
- return ERR_PTR(-EPERM);
-
- storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
- __GFP_ZERO | GFP_USER, map->numa_node);
+ storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
+ gfp, map->numa_node);
if (!storage)
goto enomem;
- flags = __GFP_ZERO | GFP_USER;
-
if (stype == BPF_CGROUP_STORAGE_SHARED) {
- storage->buf = kmalloc_node(size, flags, map->numa_node);
+ storage->buf = bpf_map_kmalloc_node(map, size, gfp,
+ map->numa_node);
if (!storage->buf)
goto enomem;
check_and_init_map_lock(map, storage->buf->data);
} else {
- storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
+ storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
if (!storage->percpu_buf)
goto enomem;
}
@@ -534,7 +520,6 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
return storage;
enomem:
- bpf_map_uncharge_memlock(map, pages);
kfree(storage);
return ERR_PTR(-ENOMEM);
}
@@ -561,16 +546,11 @@ void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
{
enum bpf_cgroup_storage_type stype;
struct bpf_map *map;
- u32 pages;
if (!storage)
return;
map = &storage->map->map;
-
- bpf_cgroup_storage_calculate_size(map, &pages);
- bpf_map_uncharge_memlock(map, pages);
-
stype = cgroup_storage_type(map);
if (stype == BPF_CGROUP_STORAGE_SHARED)
call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 00e32f2ec3e6..cec792a17e5f 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -282,8 +282,8 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
if (value)
size += trie->map.value_size;
- node = kmalloc_node(size, GFP_ATOMIC | __GFP_NOWARN,
- trie->map.numa_node);
+ node = bpf_map_kmalloc_node(&trie->map, size, GFP_ATOMIC | __GFP_NOWARN,
+ trie->map.numa_node);
if (!node)
return NULL;
@@ -540,8 +540,6 @@ out:
static struct bpf_map *trie_alloc(union bpf_attr *attr)
{
struct lpm_trie *trie;
- u64 cost = sizeof(*trie), cost_per_node;
- int ret;
if (!bpf_capable())
return ERR_PTR(-EPERM);
@@ -557,7 +555,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
attr->value_size > LPM_VAL_SIZE_MAX)
return ERR_PTR(-EINVAL);
- trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN);
+ trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
if (!trie)
return ERR_PTR(-ENOMEM);
@@ -567,20 +565,9 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8;
- cost_per_node = sizeof(struct lpm_trie_node) +
- attr->value_size + trie->data_size;
- cost += (u64) attr->max_entries * cost_per_node;
-
- ret = bpf_map_charge_init(&trie->map.memory, cost);
- if (ret)
- goto out_err;
-
spin_lock_init(&trie->lock);
return &trie->map;
-out_err:
- kfree(trie);
- return ERR_PTR(ret);
}
static void trie_free(struct bpf_map *map)
diff --git a/kernel/bpf/preload/Kconfig b/kernel/bpf/preload/Kconfig
index ace49111d3a3..26bced262473 100644
--- a/kernel/bpf/preload/Kconfig
+++ b/kernel/bpf/preload/Kconfig
@@ -6,6 +6,7 @@ config USERMODE_DRIVER
menuconfig BPF_PRELOAD
bool "Preload BPF file system with kernel specific program and map iterators"
depends on BPF
+ depends on BPF_SYSCALL
# The dependency on !COMPILE_TEST prevents it from being enabled
# in allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
diff --git a/kernel/bpf/preload/Makefile b/kernel/bpf/preload/Makefile
index 23ee310b6eb4..1951332dd15f 100644
--- a/kernel/bpf/preload/Makefile
+++ b/kernel/bpf/preload/Makefile
@@ -4,8 +4,11 @@ LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
LIBBPF_A = $(obj)/libbpf.a
LIBBPF_OUT = $(abspath $(obj))
+# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test
+# in tools/scripts/Makefile.include always succeeds when building the kernel
+# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg".
$(LIBBPF_A):
- $(Q)$(MAKE) -C $(LIBBPF_SRCS) OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
+ $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
-I $(srctree)/tools/lib/ -Wno-unused-result
diff --git a/kernel/bpf/preload/iterators/iterators.c b/kernel/bpf/preload/iterators/iterators.c
index b7ff87939172..5d872a705470 100644
--- a/kernel/bpf/preload/iterators/iterators.c
+++ b/kernel/bpf/preload/iterators/iterators.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include <argp.h>
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 0ee2347ba510..f9c734aaa990 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -66,29 +66,21 @@ static int queue_stack_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
{
- int ret, numa_node = bpf_map_attr_numa_node(attr);
- struct bpf_map_memory mem = {0};
+ int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_queue_stack *qs;
- u64 size, queue_size, cost;
+ u64 size, queue_size;
size = (u64) attr->max_entries + 1;
- cost = queue_size = sizeof(*qs) + size * attr->value_size;
-
- ret = bpf_map_charge_init(&mem, cost);
- if (ret < 0)
- return ERR_PTR(ret);
+ queue_size = sizeof(*qs) + size * attr->value_size;
qs = bpf_map_area_alloc(queue_size, numa_node);
- if (!qs) {
- bpf_map_charge_finish(&mem);
+ if (!qs)
return ERR_PTR(-ENOMEM);
- }
memset(qs, 0, sizeof(*qs));
bpf_map_init_from_attr(&qs->map, attr);
- bpf_map_charge_move(&qs->map.memory, &mem);
qs->size = size;
raw_spin_lock_init(&qs->lock);
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index a55cd542f2ce..4838922f723d 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -150,9 +150,8 @@ static void reuseport_array_free(struct bpf_map *map)
static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
{
- int err, numa_node = bpf_map_attr_numa_node(attr);
+ int numa_node = bpf_map_attr_numa_node(attr);
struct reuseport_array *array;
- struct bpf_map_memory mem;
u64 array_size;
if (!bpf_capable())
@@ -161,20 +160,13 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
array_size = sizeof(*array);
array_size += (u64)attr->max_entries * sizeof(struct sock *);
- err = bpf_map_charge_init(&mem, array_size);
- if (err)
- return ERR_PTR(err);
-
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
- if (!array) {
- bpf_map_charge_finish(&mem);
+ if (!array)
return ERR_PTR(-ENOMEM);
- }
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
- bpf_map_charge_move(&array->map.memory, &mem);
return &array->map;
}
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 31cb04a4dd2d..f25b719ac786 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -48,7 +48,6 @@ struct bpf_ringbuf {
struct bpf_ringbuf_map {
struct bpf_map map;
- struct bpf_map_memory memory;
struct bpf_ringbuf *rb;
};
@@ -60,8 +59,8 @@ struct bpf_ringbuf_hdr {
static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
{
- const gfp_t flags = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
- __GFP_ZERO;
+ const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN | __GFP_ZERO;
int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
int nr_data_pages = data_sz >> PAGE_SHIFT;
int nr_pages = nr_meta_pages + nr_data_pages;
@@ -88,10 +87,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
* user-space implementations significantly.
*/
array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
- if (array_size > PAGE_SIZE)
- pages = vmalloc_node(array_size, numa_node);
- else
- pages = kmalloc_node(array_size, flags, numa_node);
+ pages = bpf_map_area_alloc(array_size, numa_node);
if (!pages)
return NULL;
@@ -134,7 +130,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
if (!rb)
- return ERR_PTR(-ENOMEM);
+ return NULL;
spin_lock_init(&rb->spinlock);
init_waitqueue_head(&rb->waitq);
@@ -150,8 +146,6 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
{
struct bpf_ringbuf_map *rb_map;
- u64 cost;
- int err;
if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
@@ -167,32 +161,19 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
return ERR_PTR(-E2BIG);
#endif
- rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
+ rb_map = kzalloc(sizeof(*rb_map), GFP_USER | __GFP_ACCOUNT);
if (!rb_map)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&rb_map->map, attr);
- cost = sizeof(struct bpf_ringbuf_map) +
- sizeof(struct bpf_ringbuf) +
- attr->max_entries;
- err = bpf_map_charge_init(&rb_map->map.memory, cost);
- if (err)
- goto err_free_map;
-
rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
- if (IS_ERR(rb_map->rb)) {
- err = PTR_ERR(rb_map->rb);
- goto err_uncharge;
+ if (!rb_map->rb) {
+ kfree(rb_map);
+ return ERR_PTR(-ENOMEM);
}
return &rb_map->map;
-
-err_uncharge:
- bpf_map_charge_finish(&rb_map->map.memory);
-err_free_map:
- kfree(rb_map);
- return ERR_PTR(err);
}
static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 06065fa27124..be35bfb7fb13 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,10 +7,9 @@
#include <linux/kernel.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
-#include <linux/elf.h>
-#include <linux/pagemap.h>
#include <linux/irq_work.h>
#include <linux/btf_ids.h>
+#include <linux/buildid.h>
#include "percpu_freelist.h"
#define STACK_CREATE_FLAG_MASK \
@@ -90,7 +89,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
{
u32 value_size = attr->value_size;
struct bpf_stack_map *smap;
- struct bpf_map_memory mem;
u64 cost, n_buckets;
int err;
@@ -116,18 +114,14 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */
n_buckets = roundup_pow_of_two(attr->max_entries);
+ if (!n_buckets)
+ return ERR_PTR(-E2BIG);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
- err = bpf_map_charge_init(&mem, cost);
- if (err)
- return ERR_PTR(err);
-
smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
- if (!smap) {
- bpf_map_charge_finish(&mem);
+ if (!smap)
return ERR_PTR(-ENOMEM);
- }
bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
@@ -135,158 +129,21 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
err = get_callchain_buffers(sysctl_perf_event_max_stack);
if (err)
- goto free_charge;
+ goto free_smap;
err = prealloc_elems_and_freelist(smap);
if (err)
goto put_buffers;
- bpf_map_charge_move(&smap->map.memory, &mem);
-
return &smap->map;
put_buffers:
put_callchain_buffers();
-free_charge:
- bpf_map_charge_finish(&mem);
+free_smap:
bpf_map_area_free(smap);
return ERR_PTR(err);
}
-#define BPF_BUILD_ID 3
-/*
- * Parse build id from the note segment. This logic can be shared between
- * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
- * identical.
- */
-static inline int stack_map_parse_build_id(void *page_addr,
- unsigned char *build_id,
- void *note_start,
- Elf32_Word note_size)
-{
- Elf32_Word note_offs = 0, new_offs;
-
- /* check for overflow */
- if (note_start < page_addr || note_start + note_size < note_start)
- return -EINVAL;
-
- /* only supports note that fits in the first page */
- if (note_start + note_size > page_addr + PAGE_SIZE)
- return -EINVAL;
-
- while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
- Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
-
- if (nhdr->n_type == BPF_BUILD_ID &&
- nhdr->n_namesz == sizeof("GNU") &&
- nhdr->n_descsz > 0 &&
- nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
- memcpy(build_id,
- note_start + note_offs +
- ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- nhdr->n_descsz);
- memset(build_id + nhdr->n_descsz, 0,
- BPF_BUILD_ID_SIZE - nhdr->n_descsz);
- return 0;
- }
- new_offs = note_offs + sizeof(Elf32_Nhdr) +
- ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
- if (new_offs <= note_offs) /* overflow */
- break;
- note_offs = new_offs;
- }
- return -EINVAL;
-}
-
-/* Parse build ID from 32-bit ELF */
-static int stack_map_get_build_id_32(void *page_addr,
- unsigned char *build_id)
-{
- Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
- Elf32_Phdr *phdr;
- int i;
-
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
- return -EINVAL;
-
- phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
-
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !stack_map_parse_build_id(page_addr, build_id,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
- return 0;
- }
- return -EINVAL;
-}
-
-/* Parse build ID from 64-bit ELF */
-static int stack_map_get_build_id_64(void *page_addr,
- unsigned char *build_id)
-{
- Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
- Elf64_Phdr *phdr;
- int i;
-
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
- return -EINVAL;
-
- phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
-
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !stack_map_parse_build_id(page_addr, build_id,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
- return 0;
- }
- return -EINVAL;
-}
-
-/* Parse build ID of ELF file mapped to vma */
-static int stack_map_get_build_id(struct vm_area_struct *vma,
- unsigned char *build_id)
-{
- Elf32_Ehdr *ehdr;
- struct page *page;
- void *page_addr;
- int ret;
-
- /* only works for page backed storage */
- if (!vma->vm_file)
- return -EINVAL;
-
- page = find_get_page(vma->vm_file->f_mapping, 0);
- if (!page)
- return -EFAULT; /* page not mapped */
-
- ret = -EINVAL;
- page_addr = kmap_atomic(page);
- ehdr = (Elf32_Ehdr *)page_addr;
-
- /* compare magic x7f "ELF" */
- if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
- goto out;
-
- /* only support executable file and shared object file */
- if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
- goto out;
-
- if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
- ret = stack_map_get_build_id_32(page_addr, build_id);
- else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
- ret = stack_map_get_build_id_64(page_addr, build_id);
-out:
- kunmap_atomic(page_addr);
- put_page(page);
- return ret;
-}
-
static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
u64 *ips, u32 trace_nr, bool user)
{
@@ -298,7 +155,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
if (irqs_disabled()) {
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
work = this_cpu_ptr(&up_read_work);
- if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
+ if (irq_work_is_busy(&work->irq_work)) {
/* cannot queue more up_read, fallback */
irq_work_busy = true;
}
@@ -327,18 +184,18 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
- memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
+ memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
}
return;
}
for (i = 0; i < trace_nr; i++) {
vma = find_vma(current->mm, ips[i]);
- if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
+ if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
- memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
+ memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
continue;
}
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 8f50c9c19f1b..c859bc46d06c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include <linux/license.h>
#include <linux/filter.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/cred.h>
@@ -31,6 +30,7 @@
#include <linux/poll.h>
#include <linux/bpf-netns.h>
#include <linux/rcupdate_trace.h>
+#include <linux/memcontrol.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
@@ -127,7 +127,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
return map;
}
-static u32 bpf_map_value_size(struct bpf_map *map)
+static u32 bpf_map_value_size(const struct bpf_map *map)
{
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
@@ -267,6 +267,10 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
return err;
}
+/* Please, do not use this function outside from the map creation path
+ * (e.g. in map update path) without taking care of setting the active
+ * memory cgroup (see at bpf_map_kmalloc_node() for example).
+ */
static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
{
/* We really just want to fail instead of triggering OOM killer
@@ -279,7 +283,7 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
* __GFP_RETRY_MAYFAIL to avoid such situations.
*/
- const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
+ const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
unsigned int flags = 0;
unsigned long align = 1;
void *area;
@@ -341,77 +345,6 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
map->numa_node = bpf_map_attr_numa_node(attr);
}
-static int bpf_charge_memlock(struct user_struct *user, u32 pages)
-{
- unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
- atomic_long_sub(pages, &user->locked_vm);
- return -EPERM;
- }
- return 0;
-}
-
-static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
-{
- if (user)
- atomic_long_sub(pages, &user->locked_vm);
-}
-
-int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
-{
- u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
- struct user_struct *user;
- int ret;
-
- if (size >= U32_MAX - PAGE_SIZE)
- return -E2BIG;
-
- user = get_current_user();
- ret = bpf_charge_memlock(user, pages);
- if (ret) {
- free_uid(user);
- return ret;
- }
-
- mem->pages = pages;
- mem->user = user;
-
- return 0;
-}
-
-void bpf_map_charge_finish(struct bpf_map_memory *mem)
-{
- bpf_uncharge_memlock(mem->user, mem->pages);
- free_uid(mem->user);
-}
-
-void bpf_map_charge_move(struct bpf_map_memory *dst,
- struct bpf_map_memory *src)
-{
- *dst = *src;
-
- /* Make sure src will not be used for the redundant uncharging. */
- memset(src, 0, sizeof(struct bpf_map_memory));
-}
-
-int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
-{
- int ret;
-
- ret = bpf_charge_memlock(map->memory.user, pages);
- if (ret)
- return ret;
- map->memory.pages += pages;
- return ret;
-}
-
-void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
-{
- bpf_uncharge_memlock(map->memory.user, pages);
- map->memory.pages -= pages;
-}
-
static int bpf_map_alloc_id(struct bpf_map *map)
{
int id;
@@ -456,17 +389,74 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
__release(&map_idr_lock);
}
+#ifdef CONFIG_MEMCG_KMEM
+static void bpf_map_save_memcg(struct bpf_map *map)
+{
+ map->memcg = get_mem_cgroup_from_mm(current->mm);
+}
+
+static void bpf_map_release_memcg(struct bpf_map *map)
+{
+ mem_cgroup_put(map->memcg);
+}
+
+void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node)
+{
+ struct mem_cgroup *old_memcg;
+ void *ptr;
+
+ old_memcg = set_active_memcg(map->memcg);
+ ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
+ set_active_memcg(old_memcg);
+
+ return ptr;
+}
+
+void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+{
+ struct mem_cgroup *old_memcg;
+ void *ptr;
+
+ old_memcg = set_active_memcg(map->memcg);
+ ptr = kzalloc(size, flags | __GFP_ACCOUNT);
+ set_active_memcg(old_memcg);
+
+ return ptr;
+}
+
+void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ size_t align, gfp_t flags)
+{
+ struct mem_cgroup *old_memcg;
+ void __percpu *ptr;
+
+ old_memcg = set_active_memcg(map->memcg);
+ ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
+ set_active_memcg(old_memcg);
+
+ return ptr;
+}
+
+#else
+static void bpf_map_save_memcg(struct bpf_map *map)
+{
+}
+
+static void bpf_map_release_memcg(struct bpf_map *map)
+{
+}
+#endif
+
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
- struct bpf_map_memory mem;
- bpf_map_charge_move(&mem, &map->memory);
security_bpf_map_free(map);
+ bpf_map_release_memcg(map);
/* implementation dependent freeing */
map->ops->map_free(map);
- bpf_map_charge_finish(&mem);
}
static void bpf_map_put_uref(struct bpf_map *map)
@@ -527,6 +517,19 @@ static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
}
#ifdef CONFIG_PROC_FS
+/* Provides an approximation of the map's memory footprint.
+ * Used only to provide a backward compatibility and display
+ * a reasonable "memlock" info.
+ */
+static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
+{
+ unsigned long size;
+
+ size = round_up(map->key_size + bpf_map_value_size(map), 8);
+
+ return round_up(map->max_entries * size, PAGE_SIZE);
+}
+
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_map *map = filp->private_data;
@@ -545,7 +548,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
"value_size:\t%u\n"
"max_entries:\t%u\n"
"map_flags:\t%#x\n"
- "memlock:\t%llu\n"
+ "memlock:\t%lu\n"
"map_id:\t%u\n"
"frozen:\t%u\n",
map->map_type,
@@ -553,7 +556,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
map->value_size,
map->max_entries,
map->map_flags,
- map->memory.pages * 1ULL << PAGE_SHIFT,
+ bpf_map_memory_footprint(map),
map->id,
READ_ONCE(map->frozen));
if (type) {
@@ -773,7 +776,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
map->map_type != BPF_MAP_TYPE_ARRAY &&
map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
- map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
+ map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
+ map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
return -ENOTSUPP;
if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
map->value_size) {
@@ -795,7 +799,6 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
static int map_create(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
- struct bpf_map_memory mem;
struct bpf_map *map;
int f_flags;
int err;
@@ -874,6 +877,8 @@ static int map_create(union bpf_attr *attr)
if (err)
goto free_map_sec;
+ bpf_map_save_memcg(map);
+
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
/* failed to allocate fd.
@@ -892,9 +897,7 @@ free_map_sec:
security_bpf_map_free(map);
free_map:
btf_put(map->btf);
- bpf_map_charge_move(&mem, &map->memory);
map->ops->map_free(map);
- bpf_map_charge_finish(&mem);
return err;
}
@@ -1628,51 +1631,6 @@ static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
audit_log_end(ab);
}
-int __bpf_prog_charge(struct user_struct *user, u32 pages)
-{
- unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- unsigned long user_bufs;
-
- if (user) {
- user_bufs = atomic_long_add_return(pages, &user->locked_vm);
- if (user_bufs > memlock_limit) {
- atomic_long_sub(pages, &user->locked_vm);
- return -EPERM;
- }
- }
-
- return 0;
-}
-
-void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
-{
- if (user)
- atomic_long_sub(pages, &user->locked_vm);
-}
-
-static int bpf_prog_charge_memlock(struct bpf_prog *prog)
-{
- struct user_struct *user = get_current_user();
- int ret;
-
- ret = __bpf_prog_charge(user, prog->pages);
- if (ret) {
- free_uid(user);
- return ret;
- }
-
- prog->aux->user = user;
- return 0;
-}
-
-static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
-{
- struct user_struct *user = prog->aux->user;
-
- __bpf_prog_uncharge(user, prog->pages);
- free_uid(user);
-}
-
static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
int id;
@@ -1722,7 +1680,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
kvfree(aux->func_info);
kfree(aux->func_info_aux);
- bpf_prog_uncharge_memlock(aux->prog);
+ free_uid(aux->user);
security_bpf_prog_free(aux);
bpf_prog_free(aux->prog);
}
@@ -1732,6 +1690,8 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
bpf_prog_kallsyms_del_all(prog);
btf_put(prog->aux->btf);
bpf_prog_free_linfo(prog);
+ if (prog->aux->attach_btf)
+ btf_put(prog->aux->attach_btf);
if (deferred) {
if (prog->aux->sleepable)
@@ -1771,25 +1731,28 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
static void bpf_prog_get_stats(const struct bpf_prog *prog,
struct bpf_prog_stats *stats)
{
- u64 nsecs = 0, cnt = 0;
+ u64 nsecs = 0, cnt = 0, misses = 0;
int cpu;
for_each_possible_cpu(cpu) {
const struct bpf_prog_stats *st;
unsigned int start;
- u64 tnsecs, tcnt;
+ u64 tnsecs, tcnt, tmisses;
- st = per_cpu_ptr(prog->aux->stats, cpu);
+ st = per_cpu_ptr(prog->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&st->syncp);
tnsecs = st->nsecs;
tcnt = st->cnt;
+ tmisses = st->misses;
} while (u64_stats_fetch_retry_irq(&st->syncp, start));
nsecs += tnsecs;
cnt += tcnt;
+ misses += tmisses;
}
stats->nsecs = nsecs;
stats->cnt = cnt;
+ stats->misses = misses;
}
#ifdef CONFIG_PROC_FS
@@ -1808,14 +1771,16 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
"memlock:\t%llu\n"
"prog_id:\t%u\n"
"run_time_ns:\t%llu\n"
- "run_cnt:\t%llu\n",
+ "run_cnt:\t%llu\n"
+ "recursion_misses:\t%llu\n",
prog->type,
prog->jited,
prog_tag,
prog->pages * 1ULL << PAGE_SHIFT,
prog->aux->id,
stats.nsecs,
- stats.cnt);
+ stats.cnt,
+ stats.misses);
}
#endif
@@ -1965,12 +1930,16 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
static int
bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
enum bpf_attach_type expected_attach_type,
- u32 btf_id, u32 prog_fd)
+ struct btf *attach_btf, u32 btf_id,
+ struct bpf_prog *dst_prog)
{
if (btf_id) {
if (btf_id > BTF_MAX_TYPE)
return -EINVAL;
+ if (!attach_btf && !dst_prog)
+ return -EINVAL;
+
switch (prog_type) {
case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_LSM:
@@ -1982,7 +1951,10 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
}
}
- if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
+ if (attach_btf && (!btf_id || dst_prog))
+ return -EINVAL;
+
+ if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
prog_type != BPF_PROG_TYPE_EXT)
return -EINVAL;
@@ -2099,7 +2071,8 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
{
enum bpf_prog_type type = attr->prog_type;
- struct bpf_prog *prog;
+ struct bpf_prog *prog, *dst_prog = NULL;
+ struct btf *attach_btf = NULL;
int err;
char license[128];
bool is_gpl;
@@ -2141,47 +2114,73 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (is_perfmon_prog_type(type) && !perfmon_capable())
return -EPERM;
+ /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
+ * or btf, we need to check which one it is
+ */
+ if (attr->attach_prog_fd) {
+ dst_prog = bpf_prog_get(attr->attach_prog_fd);
+ if (IS_ERR(dst_prog)) {
+ dst_prog = NULL;
+ attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
+ if (IS_ERR(attach_btf))
+ return -EINVAL;
+ if (!btf_is_kernel(attach_btf)) {
+ /* attaching through specifying bpf_prog's BTF
+ * objects directly might be supported eventually
+ */
+ btf_put(attach_btf);
+ return -ENOTSUPP;
+ }
+ }
+ } else if (attr->attach_btf_id) {
+ /* fall back to vmlinux BTF, if BTF type ID is specified */
+ attach_btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(attach_btf))
+ return PTR_ERR(attach_btf);
+ if (!attach_btf)
+ return -EINVAL;
+ btf_get(attach_btf);
+ }
+
bpf_prog_load_fixup_attach_type(attr);
if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
- attr->attach_btf_id,
- attr->attach_prog_fd))
+ attach_btf, attr->attach_btf_id,
+ dst_prog)) {
+ if (dst_prog)
+ bpf_prog_put(dst_prog);
+ if (attach_btf)
+ btf_put(attach_btf);
return -EINVAL;
+ }
/* plain bpf_prog allocation */
prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
- if (!prog)
+ if (!prog) {
+ if (dst_prog)
+ bpf_prog_put(dst_prog);
+ if (attach_btf)
+ btf_put(attach_btf);
return -ENOMEM;
+ }
prog->expected_attach_type = attr->expected_attach_type;
+ prog->aux->attach_btf = attach_btf;
prog->aux->attach_btf_id = attr->attach_btf_id;
- if (attr->attach_prog_fd) {
- struct bpf_prog *dst_prog;
-
- dst_prog = bpf_prog_get(attr->attach_prog_fd);
- if (IS_ERR(dst_prog)) {
- err = PTR_ERR(dst_prog);
- goto free_prog_nouncharge;
- }
- prog->aux->dst_prog = dst_prog;
- }
-
+ prog->aux->dst_prog = dst_prog;
prog->aux->offload_requested = !!attr->prog_ifindex;
prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
err = security_bpf_prog_alloc(prog->aux);
if (err)
- goto free_prog_nouncharge;
-
- err = bpf_prog_charge_memlock(prog);
- if (err)
- goto free_prog_sec;
+ goto free_prog;
+ prog->aux->user = get_current_user();
prog->len = attr->insn_cnt;
err = -EFAULT;
if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
bpf_prog_insn_size(prog)) != 0)
- goto free_prog;
+ goto free_prog_sec;
prog->orig_prog = NULL;
prog->jited = 0;
@@ -2192,19 +2191,19 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (bpf_prog_is_dev_bound(prog->aux)) {
err = bpf_prog_offload_init(prog, attr);
if (err)
- goto free_prog;
+ goto free_prog_sec;
}
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
if (err < 0)
- goto free_prog;
+ goto free_prog_sec;
prog->aux->load_time = ktime_get_boottime_ns();
err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
sizeof(attr->prog_name));
if (err < 0)
- goto free_prog;
+ goto free_prog_sec;
/* run eBPF verifier */
err = bpf_check(&prog, attr, uattr);
@@ -2249,11 +2248,12 @@ free_used_maps:
*/
__bpf_prog_put_noref(prog, prog->aux->func_cnt);
return err;
-free_prog:
- bpf_prog_uncharge_memlock(prog);
free_prog_sec:
+ free_uid(prog->aux->user);
security_bpf_prog_free(prog->aux);
-free_prog_nouncharge:
+free_prog:
+ if (prog->aux->attach_btf)
+ btf_put(prog->aux->attach_btf);
bpf_prog_free(prog);
return err;
}
@@ -2611,7 +2611,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
goto out_put_prog;
}
- key = bpf_trampoline_compute_key(tgt_prog, btf_id);
+ key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
}
link = kzalloc(sizeof(*link), GFP_USER);
@@ -2717,7 +2717,6 @@ out_unlock:
out_put_prog:
if (tgt_prog_fd && tgt_prog)
bpf_prog_put(tgt_prog);
- bpf_prog_put(prog);
return err;
}
@@ -2830,7 +2829,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
tp_name = prog->aux->attach_func_name;
break;
}
- return bpf_tracing_prog_attach(prog, 0, 0);
+ err = bpf_tracing_prog_attach(prog, 0, 0);
+ if (err >= 0)
+ return err;
+ goto out_put_prog;
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
if (strncpy_from_user(buf,
@@ -3441,6 +3443,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
bpf_prog_get_stats(prog, &stats);
info.run_time_ns = stats.nsecs;
info.run_cnt = stats.cnt;
+ info.recursion_misses = stats.misses;
if (!bpf_capable()) {
info.jited_prog_len = 0;
@@ -3588,7 +3591,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
}
if (prog->aux->btf)
- info.btf_id = btf_id(prog->aux->btf);
+ info.btf_id = btf_obj_id(prog->aux->btf);
ulen = info.nr_func_info;
info.nr_func_info = prog->aux->func_info_cnt;
@@ -3691,7 +3694,7 @@ static int bpf_map_get_info_by_fd(struct file *file,
memcpy(info.name, map->name, sizeof(map->name));
if (map->btf) {
- info.btf_id = btf_id(map->btf);
+ info.btf_id = btf_obj_id(map->btf);
info.btf_key_type_id = map->btf_key_type_id;
info.btf_value_type_id = map->btf_value_type_id;
}
@@ -3878,7 +3881,6 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
pid_t pid = attr->task_fd_query.pid;
u32 fd = attr->task_fd_query.fd;
const struct perf_event *event;
- struct files_struct *files;
struct task_struct *task;
struct file *file;
int err;
@@ -3896,23 +3898,11 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
if (!task)
return -ENOENT;
- files = get_files_struct(task);
- put_task_struct(task);
- if (!files)
- return -ENOENT;
-
err = 0;
- spin_lock(&files->file_lock);
- file = fcheck_files(files, fd);
+ file = fget_task(task, fd);
+ put_task_struct(task);
if (!file)
- err = -EBADF;
- else
- get_file(file);
- spin_unlock(&files->file_lock);
- put_files_struct(files);
-
- if (err)
- goto out;
+ return -EBADF;
if (file->f_op == &bpf_link_fops) {
struct bpf_link *link = file->private_data;
@@ -3952,7 +3942,6 @@ out_not_supp:
err = -ENOTSUPP;
put_file:
fput(file);
-out:
return err;
}
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
index 11b3380887fa..ef6911aee3bb 100644
--- a/kernel/bpf/sysfs_btf.c
+++ b/kernel/bpf/sysfs_btf.c
@@ -26,7 +26,7 @@ static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = {
.read = btf_vmlinux_read,
};
-static struct kobject *btf_kobj;
+struct kobject *btf_kobj;
static int __init btf_vmlinux_init(void)
{
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 5b6af30bfbcd..b68cb5d6d6eb 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -37,7 +37,7 @@ retry:
if (!task) {
++*tid;
goto retry;
- } else if (skip_if_dup_files && task->tgid != task->pid &&
+ } else if (skip_if_dup_files && !thread_group_leader(task) &&
task->files == task->group_leader->files) {
put_task_struct(task);
task = NULL;
@@ -130,46 +130,36 @@ struct bpf_iter_seq_task_file_info {
*/
struct bpf_iter_seq_task_common common;
struct task_struct *task;
- struct files_struct *files;
u32 tid;
u32 fd;
};
static struct file *
-task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info,
- struct task_struct **task, struct files_struct **fstruct)
+task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
{
struct pid_namespace *ns = info->common.ns;
- u32 curr_tid = info->tid, max_fds;
- struct files_struct *curr_files;
+ u32 curr_tid = info->tid;
struct task_struct *curr_task;
- int curr_fd = info->fd;
+ unsigned int curr_fd = info->fd;
/* If this function returns a non-NULL file object,
- * it held a reference to the task/files_struct/file.
+ * it held a reference to the task/file.
* Otherwise, it does not hold any reference.
*/
again:
- if (*task) {
- curr_task = *task;
- curr_files = *fstruct;
+ if (info->task) {
+ curr_task = info->task;
curr_fd = info->fd;
} else {
- curr_task = task_seq_get_next(ns, &curr_tid, true);
- if (!curr_task)
- return NULL;
-
- curr_files = get_files_struct(curr_task);
- if (!curr_files) {
- put_task_struct(curr_task);
- curr_tid = ++(info->tid);
- info->fd = 0;
- goto again;
- }
-
- /* set *fstruct, *task and info->tid */
- *fstruct = curr_files;
- *task = curr_task;
+ curr_task = task_seq_get_next(ns, &curr_tid, true);
+ if (!curr_task) {
+ info->task = NULL;
+ info->tid = curr_tid;
+ return NULL;
+ }
+
+ /* set info->task and info->tid */
+ info->task = curr_task;
if (curr_tid == info->tid) {
curr_fd = info->fd;
} else {
@@ -179,13 +169,11 @@ again:
}
rcu_read_lock();
- max_fds = files_fdtable(curr_files)->max_fds;
- for (; curr_fd < max_fds; curr_fd++) {
+ for (;; curr_fd++) {
struct file *f;
-
- f = fcheck_files(curr_files, curr_fd);
+ f = task_lookup_next_fd_rcu(curr_task, &curr_fd);
if (!f)
- continue;
+ break;
if (!get_file_rcu(f))
continue;
@@ -197,10 +185,8 @@ again:
/* the current task is done, go to the next task */
rcu_read_unlock();
- put_files_struct(curr_files);
put_task_struct(curr_task);
- *task = NULL;
- *fstruct = NULL;
+ info->task = NULL;
info->fd = 0;
curr_tid = ++(info->tid);
goto again;
@@ -209,21 +195,12 @@ again:
static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
{
struct bpf_iter_seq_task_file_info *info = seq->private;
- struct files_struct *files = NULL;
- struct task_struct *task = NULL;
struct file *file;
- file = task_file_seq_get_next(info, &task, &files);
- if (!file) {
- info->files = NULL;
- info->task = NULL;
- return NULL;
- }
-
- if (*pos == 0)
+ info->task = NULL;
+ file = task_file_seq_get_next(info);
+ if (file && *pos == 0)
++*pos;
- info->task = task;
- info->files = files;
return file;
}
@@ -231,24 +208,11 @@ static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bpf_iter_seq_task_file_info *info = seq->private;
- struct files_struct *files = info->files;
- struct task_struct *task = info->task;
- struct file *file;
++*pos;
++info->fd;
fput((struct file *)v);
- file = task_file_seq_get_next(info, &task, &files);
- if (!file) {
- info->files = NULL;
- info->task = NULL;
- return NULL;
- }
-
- info->task = task;
- info->files = files;
-
- return file;
+ return task_file_seq_get_next(info);
}
struct bpf_iter__task_file {
@@ -295,9 +259,7 @@ static void task_file_seq_stop(struct seq_file *seq, void *v)
(void)__task_file_seq_show(seq, v, true);
} else {
fput((struct file *)v);
- put_files_struct(info->files);
put_task_struct(info->task);
- info->files = NULL;
info->task = NULL;
}
}
@@ -324,9 +286,248 @@ static const struct seq_operations task_file_seq_ops = {
.show = task_file_seq_show,
};
+struct bpf_iter_seq_task_vma_info {
+ /* The first field must be struct bpf_iter_seq_task_common.
+ * this is assumed by {init, fini}_seq_pidns() callback functions.
+ */
+ struct bpf_iter_seq_task_common common;
+ struct task_struct *task;
+ struct vm_area_struct *vma;
+ u32 tid;
+ unsigned long prev_vm_start;
+ unsigned long prev_vm_end;
+};
+
+enum bpf_task_vma_iter_find_op {
+ task_vma_iter_first_vma, /* use mm->mmap */
+ task_vma_iter_next_vma, /* use curr_vma->vm_next */
+ task_vma_iter_find_vma, /* use find_vma() to find next vma */
+};
+
+static struct vm_area_struct *
+task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
+{
+ struct pid_namespace *ns = info->common.ns;
+ enum bpf_task_vma_iter_find_op op;
+ struct vm_area_struct *curr_vma;
+ struct task_struct *curr_task;
+ u32 curr_tid = info->tid;
+
+ /* If this function returns a non-NULL vma, it holds a reference to
+ * the task_struct, and holds read lock on vma->mm->mmap_lock.
+ * If this function returns NULL, it does not hold any reference or
+ * lock.
+ */
+ if (info->task) {
+ curr_task = info->task;
+ curr_vma = info->vma;
+ /* In case of lock contention, drop mmap_lock to unblock
+ * the writer.
+ *
+ * After relock, call find(mm, prev_vm_end - 1) to find
+ * new vma to process.
+ *
+ * +------+------+-----------+
+ * | VMA1 | VMA2 | VMA3 |
+ * +------+------+-----------+
+ * | | | |
+ * 4k 8k 16k 400k
+ *
+ * For example, curr_vma == VMA2. Before unlock, we set
+ *
+ * prev_vm_start = 8k
+ * prev_vm_end = 16k
+ *
+ * There are a few cases:
+ *
+ * 1) VMA2 is freed, but VMA3 exists.
+ *
+ * find_vma() will return VMA3, just process VMA3.
+ *
+ * 2) VMA2 still exists.
+ *
+ * find_vma() will return VMA2, process VMA2->next.
+ *
+ * 3) no more vma in this mm.
+ *
+ * Process the next task.
+ *
+ * 4) find_vma() returns a different vma, VMA2'.
+ *
+ * 4.1) If VMA2 covers same range as VMA2', skip VMA2',
+ * because we already covered the range;
+ * 4.2) VMA2 and VMA2' covers different ranges, process
+ * VMA2'.
+ */
+ if (mmap_lock_is_contended(curr_task->mm)) {
+ info->prev_vm_start = curr_vma->vm_start;
+ info->prev_vm_end = curr_vma->vm_end;
+ op = task_vma_iter_find_vma;
+ mmap_read_unlock(curr_task->mm);
+ if (mmap_read_lock_killable(curr_task->mm))
+ goto finish;
+ } else {
+ op = task_vma_iter_next_vma;
+ }
+ } else {
+again:
+ curr_task = task_seq_get_next(ns, &curr_tid, true);
+ if (!curr_task) {
+ info->tid = curr_tid + 1;
+ goto finish;
+ }
+
+ if (curr_tid != info->tid) {
+ info->tid = curr_tid;
+ /* new task, process the first vma */
+ op = task_vma_iter_first_vma;
+ } else {
+ /* Found the same tid, which means the user space
+ * finished data in previous buffer and read more.
+ * We dropped mmap_lock before returning to user
+ * space, so it is necessary to use find_vma() to
+ * find the next vma to process.
+ */
+ op = task_vma_iter_find_vma;
+ }
+
+ if (!curr_task->mm)
+ goto next_task;
+
+ if (mmap_read_lock_killable(curr_task->mm))
+ goto finish;
+ }
+
+ switch (op) {
+ case task_vma_iter_first_vma:
+ curr_vma = curr_task->mm->mmap;
+ break;
+ case task_vma_iter_next_vma:
+ curr_vma = curr_vma->vm_next;
+ break;
+ case task_vma_iter_find_vma:
+ /* We dropped mmap_lock so it is necessary to use find_vma
+ * to find the next vma. This is similar to the mechanism
+ * in show_smaps_rollup().
+ */
+ curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
+ /* case 1) and 4.2) above just use curr_vma */
+
+ /* check for case 2) or case 4.1) above */
+ if (curr_vma &&
+ curr_vma->vm_start == info->prev_vm_start &&
+ curr_vma->vm_end == info->prev_vm_end)
+ curr_vma = curr_vma->vm_next;
+ break;
+ }
+ if (!curr_vma) {
+ /* case 3) above, or case 2) 4.1) with vma->next == NULL */
+ mmap_read_unlock(curr_task->mm);
+ goto next_task;
+ }
+ info->task = curr_task;
+ info->vma = curr_vma;
+ return curr_vma;
+
+next_task:
+ put_task_struct(curr_task);
+ info->task = NULL;
+ curr_tid++;
+ goto again;
+
+finish:
+ if (curr_task)
+ put_task_struct(curr_task);
+ info->task = NULL;
+ info->vma = NULL;
+ return NULL;
+}
+
+static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct bpf_iter_seq_task_vma_info *info = seq->private;
+ struct vm_area_struct *vma;
+
+ vma = task_vma_seq_get_next(info);
+ if (vma && *pos == 0)
+ ++*pos;
+
+ return vma;
+}
+
+static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct bpf_iter_seq_task_vma_info *info = seq->private;
+
+ ++*pos;
+ return task_vma_seq_get_next(info);
+}
+
+struct bpf_iter__task_vma {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct task_struct *, task);
+ __bpf_md_ptr(struct vm_area_struct *, vma);
+};
+
+DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
+ struct task_struct *task, struct vm_area_struct *vma)
+
+static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
+{
+ struct bpf_iter_seq_task_vma_info *info = seq->private;
+ struct bpf_iter__task_vma ctx;
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, in_stop);
+ if (!prog)
+ return 0;
+
+ ctx.meta = &meta;
+ ctx.task = info->task;
+ ctx.vma = info->vma;
+ return bpf_iter_run_prog(prog, &ctx);
+}
+
+static int task_vma_seq_show(struct seq_file *seq, void *v)
+{
+ return __task_vma_seq_show(seq, false);
+}
+
+static void task_vma_seq_stop(struct seq_file *seq, void *v)
+{
+ struct bpf_iter_seq_task_vma_info *info = seq->private;
+
+ if (!v) {
+ (void)__task_vma_seq_show(seq, true);
+ } else {
+ /* info->vma has not been seen by the BPF program. If the
+ * user space reads more, task_vma_seq_get_next should
+ * return this vma again. Set prev_vm_start to ~0UL,
+ * so that we don't skip the vma returned by the next
+ * find_vma() (case task_vma_iter_find_vma in
+ * task_vma_seq_get_next()).
+ */
+ info->prev_vm_start = ~0UL;
+ info->prev_vm_end = info->vma->vm_end;
+ mmap_read_unlock(info->task->mm);
+ put_task_struct(info->task);
+ info->task = NULL;
+ }
+}
+
+static const struct seq_operations task_vma_seq_ops = {
+ .start = task_vma_seq_start,
+ .next = task_vma_seq_next,
+ .stop = task_vma_seq_stop,
+ .show = task_vma_seq_show,
+};
+
BTF_ID_LIST(btf_task_file_ids)
BTF_ID(struct, task_struct)
BTF_ID(struct, file)
+BTF_ID(struct, vm_area_struct)
static const struct bpf_iter_seq_info task_seq_info = {
.seq_ops = &task_seq_ops,
@@ -337,6 +538,7 @@ static const struct bpf_iter_seq_info task_seq_info = {
static struct bpf_iter_reg task_reg_info = {
.target = "task",
+ .feature = BPF_ITER_RESCHED,
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__task, task),
@@ -354,6 +556,7 @@ static const struct bpf_iter_seq_info task_file_seq_info = {
static struct bpf_iter_reg task_file_reg_info = {
.target = "task_file",
+ .feature = BPF_ITER_RESCHED,
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__task_file, task),
@@ -364,6 +567,26 @@ static struct bpf_iter_reg task_file_reg_info = {
.seq_info = &task_file_seq_info,
};
+static const struct bpf_iter_seq_info task_vma_seq_info = {
+ .seq_ops = &task_vma_seq_ops,
+ .init_seq_private = init_seq_pidns,
+ .fini_seq_private = fini_seq_pidns,
+ .seq_priv_size = sizeof(struct bpf_iter_seq_task_vma_info),
+};
+
+static struct bpf_iter_reg task_vma_reg_info = {
+ .target = "task_vma",
+ .feature = BPF_ITER_RESCHED,
+ .ctx_arg_info_size = 2,
+ .ctx_arg_info = {
+ { offsetof(struct bpf_iter__task_vma, task),
+ PTR_TO_BTF_ID_OR_NULL },
+ { offsetof(struct bpf_iter__task_vma, vma),
+ PTR_TO_BTF_ID_OR_NULL },
+ },
+ .seq_info = &task_vma_seq_info,
+};
+
static int __init task_iter_init(void)
{
int ret;
@@ -375,6 +598,12 @@ static int __init task_iter_init(void)
task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
- return bpf_iter_reg_target(&task_file_reg_info);
+ ret = bpf_iter_reg_target(&task_file_reg_info);
+ if (ret)
+ return ret;
+
+ task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
+ task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[2];
+ return bpf_iter_reg_target(&task_vma_reg_info);
}
late_initcall(task_iter_init);
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 35c5887d82ff..7bc3b3209224 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -381,55 +381,100 @@ out:
mutex_unlock(&trampoline_mutex);
}
+#define NO_START_TIME 1
+static u64 notrace bpf_prog_start_time(void)
+{
+ u64 start = NO_START_TIME;
+
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ start = sched_clock();
+ if (unlikely(!start))
+ start = NO_START_TIME;
+ }
+ return start;
+}
+
+static void notrace inc_misses_counter(struct bpf_prog *prog)
+{
+ struct bpf_prog_stats *stats;
+
+ stats = this_cpu_ptr(prog->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->misses++;
+ u64_stats_update_end(&stats->syncp);
+}
+
/* The logic is similar to BPF_PROG_RUN, but with an explicit
* rcu_read_lock() and migrate_disable() which are required
* for the trampoline. The macro is split into
- * call _bpf_prog_enter
+ * call __bpf_prog_enter
* call prog->bpf_func
* call __bpf_prog_exit
+ *
+ * __bpf_prog_enter returns:
+ * 0 - skip execution of the bpf prog
+ * 1 - execute bpf prog
+ * [2..MAX_U64] - excute bpf prog and record execution time.
+ * This is start time.
*/
-u64 notrace __bpf_prog_enter(void)
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
__acquires(RCU)
{
- u64 start = 0;
-
rcu_read_lock();
migrate_disable();
- if (static_branch_unlikely(&bpf_stats_enabled_key))
- start = sched_clock();
- return start;
+ if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+ inc_misses_counter(prog);
+ return 0;
+ }
+ return bpf_prog_start_time();
}
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
- __releases(RCU)
+static void notrace update_prog_stats(struct bpf_prog *prog,
+ u64 start)
{
struct bpf_prog_stats *stats;
if (static_branch_unlikely(&bpf_stats_enabled_key) &&
- /* static_key could be enabled in __bpf_prog_enter
- * and disabled in __bpf_prog_exit.
+ /* static_key could be enabled in __bpf_prog_enter*
+ * and disabled in __bpf_prog_exit*.
* And vice versa.
- * Hence check that 'start' is not zero.
+ * Hence check that 'start' is valid.
*/
- start) {
- stats = this_cpu_ptr(prog->aux->stats);
+ start > NO_START_TIME) {
+ stats = this_cpu_ptr(prog->stats);
u64_stats_update_begin(&stats->syncp);
stats->cnt++;
stats->nsecs += sched_clock() - start;
u64_stats_update_end(&stats->syncp);
}
+}
+
+void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
+ __releases(RCU)
+{
+ update_prog_stats(prog, start);
+ __this_cpu_dec(*(prog->active));
migrate_enable();
rcu_read_unlock();
}
-void notrace __bpf_prog_enter_sleepable(void)
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
{
rcu_read_lock_trace();
+ migrate_disable();
might_fault();
+ if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+ inc_misses_counter(prog);
+ return 0;
+ }
+ return bpf_prog_start_time();
}
-void notrace __bpf_prog_exit_sleepable(void)
+void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
{
+ update_prog_stats(prog, start);
+ __this_cpu_dec(*(prog->active));
+ migrate_enable();
rcu_read_unlock_trace();
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 6200519582a6..1dda9d81f12c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -228,6 +228,12 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
+static bool bpf_pseudo_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == BPF_PSEUDO_CALL;
+}
+
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
@@ -238,7 +244,9 @@ struct bpf_call_arg_meta {
u64 msize_max_value;
int ref_obj_id;
int func_id;
+ struct btf *btf;
u32 btf_id;
+ struct btf *ret_btf;
u32 ret_btf_id;
};
@@ -556,10 +564,9 @@ static struct bpf_func_state *func(struct bpf_verifier_env *env,
return cur->frame[reg->frameno];
}
-const char *kernel_type_name(u32 id)
+static const char *kernel_type_name(const struct btf* btf, u32 id)
{
- return btf_name_by_offset(btf_vmlinux,
- btf_type_by_id(btf_vmlinux, id)->name_off);
+ return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
}
static void print_verifier_state(struct bpf_verifier_env *env,
@@ -589,7 +596,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
if (t == PTR_TO_BTF_ID ||
t == PTR_TO_BTF_ID_OR_NULL ||
t == PTR_TO_PERCPU_BTF_ID)
- verbose(env, "%s", kernel_type_name(reg->btf_id));
+ verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
@@ -1072,6 +1079,51 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
__mark_reg_known_zero(regs + regno);
}
+static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
+{
+ switch (reg->type) {
+ case PTR_TO_MAP_VALUE_OR_NULL: {
+ const struct bpf_map *map = reg->map_ptr;
+
+ if (map->inner_map_meta) {
+ reg->type = CONST_PTR_TO_MAP;
+ reg->map_ptr = map->inner_map_meta;
+ } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
+ reg->type = PTR_TO_XDP_SOCK;
+ } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
+ map->map_type == BPF_MAP_TYPE_SOCKHASH) {
+ reg->type = PTR_TO_SOCKET;
+ } else {
+ reg->type = PTR_TO_MAP_VALUE;
+ }
+ break;
+ }
+ case PTR_TO_SOCKET_OR_NULL:
+ reg->type = PTR_TO_SOCKET;
+ break;
+ case PTR_TO_SOCK_COMMON_OR_NULL:
+ reg->type = PTR_TO_SOCK_COMMON;
+ break;
+ case PTR_TO_TCP_SOCK_OR_NULL:
+ reg->type = PTR_TO_TCP_SOCK;
+ break;
+ case PTR_TO_BTF_ID_OR_NULL:
+ reg->type = PTR_TO_BTF_ID;
+ break;
+ case PTR_TO_MEM_OR_NULL:
+ reg->type = PTR_TO_MEM;
+ break;
+ case PTR_TO_RDONLY_BUF_OR_NULL:
+ reg->type = PTR_TO_RDONLY_BUF;
+ break;
+ case PTR_TO_RDWR_BUF_OR_NULL:
+ reg->type = PTR_TO_RDWR_BUF;
+ break;
+ default:
+ WARN_ON("unknown nullable register type");
+ }
+}
+
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{
return type_is_pkt_pointer(reg->type);
@@ -1298,9 +1350,7 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
static bool __reg64_bound_s32(s64 a)
{
- if (a > S32_MIN && a < S32_MAX)
- return true;
- return false;
+ return a > S32_MIN && a < S32_MAX;
}
static bool __reg64_bound_u32(u64 a)
@@ -1314,10 +1364,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
{
__mark_reg32_unbounded(reg);
- if (__reg64_bound_s32(reg->smin_value))
+ if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
reg->s32_min_value = (s32)reg->smin_value;
- if (__reg64_bound_s32(reg->smax_value))
reg->s32_max_value = (s32)reg->smax_value;
+ }
if (__reg64_bound_u32(reg->umin_value))
reg->u32_min_value = (u32)reg->umin_value;
if (__reg64_bound_u32(reg->umax_value))
@@ -1383,7 +1433,8 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
static void mark_btf_ld_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno,
- enum bpf_reg_type reg_type, u32 btf_id)
+ enum bpf_reg_type reg_type,
+ struct btf *btf, u32 btf_id)
{
if (reg_type == SCALAR_VALUE) {
mark_reg_unknown(env, regs, regno);
@@ -1391,6 +1442,7 @@ static void mark_btf_ld_reg(struct bpf_verifier_env *env,
}
mark_reg_known_zero(env, regs, regno);
regs[regno].type = PTR_TO_BTF_ID;
+ regs[regno].btf = btf;
regs[regno].btf_id = btf_id;
}
@@ -1485,9 +1537,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
/* determine subprog starts. The end is one before the next starts */
for (i = 0; i < insn_cnt; i++) {
- if (insn[i].code != (BPF_JMP | BPF_CALL))
- continue;
- if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ if (!bpf_pseudo_call(insn + i))
continue;
if (!env->bpf_capable) {
verbose(env,
@@ -2216,6 +2266,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_RDWR_BUF:
case PTR_TO_RDWR_BUF_OR_NULL:
case PTR_TO_PERCPU_BTF_ID:
+ case PTR_TO_MEM:
+ case PTR_TO_MEM_OR_NULL:
return true;
default:
return false;
@@ -2268,12 +2320,14 @@ static void save_register_state(struct bpf_func_state *state,
state->stack[spi].slot_type[i] = STACK_SPILL;
}
-/* check_stack_read/write functions track spill/fill of registers,
+/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
-static int check_stack_write(struct bpf_verifier_env *env,
- struct bpf_func_state *state, /* func where register points to */
- int off, int size, int value_regno, int insn_idx)
+static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ /* stack frame we're writing to */
+ struct bpf_func_state *state,
+ int off, int size, int value_regno,
+ int insn_idx)
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
@@ -2399,9 +2453,175 @@ static int check_stack_write(struct bpf_verifier_env *env,
return 0;
}
-static int check_stack_read(struct bpf_verifier_env *env,
- struct bpf_func_state *reg_state /* func where register points to */,
- int off, int size, int value_regno)
+/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
+ * known to contain a variable offset.
+ * This function checks whether the write is permitted and conservatively
+ * tracks the effects of the write, considering that each stack slot in the
+ * dynamic range is potentially written to.
+ *
+ * 'off' includes 'regno->off'.
+ * 'value_regno' can be -1, meaning that an unknown value is being written to
+ * the stack.
+ *
+ * Spilled pointers in range are not marked as written because we don't know
+ * what's going to be actually written. This means that read propagation for
+ * future reads cannot be terminated by this write.
+ *
+ * For privileged programs, uninitialized stack slots are considered
+ * initialized by this write (even though we don't know exactly what offsets
+ * are going to be written to). The idea is that we don't want the verifier to
+ * reject future reads that access slots written to through variable offsets.
+ */
+static int check_stack_write_var_off(struct bpf_verifier_env *env,
+ /* func where register points to */
+ struct bpf_func_state *state,
+ int ptr_regno, int off, int size,
+ int value_regno, int insn_idx)
+{
+ struct bpf_func_state *cur; /* state of the current function */
+ int min_off, max_off;
+ int i, err;
+ struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
+ bool writing_zero = false;
+ /* set if the fact that we're writing a zero is used to let any
+ * stack slots remain STACK_ZERO
+ */
+ bool zero_used = false;
+
+ cur = env->cur_state->frame[env->cur_state->curframe];
+ ptr_reg = &cur->regs[ptr_regno];
+ min_off = ptr_reg->smin_value + off;
+ max_off = ptr_reg->smax_value + off + size;
+ if (value_regno >= 0)
+ value_reg = &cur->regs[value_regno];
+ if (value_reg && register_is_null(value_reg))
+ writing_zero = true;
+
+ err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE),
+ state->acquired_refs, true);
+ if (err)
+ return err;
+
+
+ /* Variable offset writes destroy any spilled pointers in range. */
+ for (i = min_off; i < max_off; i++) {
+ u8 new_type, *stype;
+ int slot, spi;
+
+ slot = -i - 1;
+ spi = slot / BPF_REG_SIZE;
+ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+
+ if (!env->allow_ptr_leaks
+ && *stype != NOT_INIT
+ && *stype != SCALAR_VALUE) {
+ /* Reject the write if there's are spilled pointers in
+ * range. If we didn't reject here, the ptr status
+ * would be erased below (even though not all slots are
+ * actually overwritten), possibly opening the door to
+ * leaks.
+ */
+ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
+ insn_idx, i);
+ return -EINVAL;
+ }
+
+ /* Erase all spilled pointers. */
+ state->stack[spi].spilled_ptr.type = NOT_INIT;
+
+ /* Update the slot type. */
+ new_type = STACK_MISC;
+ if (writing_zero && *stype == STACK_ZERO) {
+ new_type = STACK_ZERO;
+ zero_used = true;
+ }
+ /* If the slot is STACK_INVALID, we check whether it's OK to
+ * pretend that it will be initialized by this write. The slot
+ * might not actually be written to, and so if we mark it as
+ * initialized future reads might leak uninitialized memory.
+ * For privileged programs, we will accept such reads to slots
+ * that may or may not be written because, if we're reject
+ * them, the error would be too confusing.
+ */
+ if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
+ verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
+ insn_idx, i);
+ return -EINVAL;
+ }
+ *stype = new_type;
+ }
+ if (zero_used) {
+ /* backtracking doesn't work for STACK_ZERO yet. */
+ err = mark_chain_precision(env, value_regno);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/* When register 'dst_regno' is assigned some values from stack[min_off,
+ * max_off), we set the register's type according to the types of the
+ * respective stack slots. If all the stack values are known to be zeros, then
+ * so is the destination reg. Otherwise, the register is considered to be
+ * SCALAR. This function does not deal with register filling; the caller must
+ * ensure that all spilled registers in the stack range have been marked as
+ * read.
+ */
+static void mark_reg_stack_read(struct bpf_verifier_env *env,
+ /* func where src register points to */
+ struct bpf_func_state *ptr_state,
+ int min_off, int max_off, int dst_regno)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ int i, slot, spi;
+ u8 *stype;
+ int zeros = 0;
+
+ for (i = min_off; i < max_off; i++) {
+ slot = -i - 1;
+ spi = slot / BPF_REG_SIZE;
+ stype = ptr_state->stack[spi].slot_type;
+ if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
+ break;
+ zeros++;
+ }
+ if (zeros == max_off - min_off) {
+ /* any access_size read into register is zero extended,
+ * so the whole register == const_zero
+ */
+ __mark_reg_const_zero(&state->regs[dst_regno]);
+ /* backtracking doesn't support STACK_ZERO yet,
+ * so mark it precise here, so that later
+ * backtracking can stop here.
+ * Backtracking may not need this if this register
+ * doesn't participate in pointer adjustment.
+ * Forward propagation of precise flag is not
+ * necessary either. This mark is only to stop
+ * backtracking. Any register that contributed
+ * to const 0 was marked precise before spill.
+ */
+ state->regs[dst_regno].precise = true;
+ } else {
+ /* have read misc data from the stack */
+ mark_reg_unknown(env, state->regs, dst_regno);
+ }
+ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
+}
+
+/* Read the stack at 'off' and put the results into the register indicated by
+ * 'dst_regno'. It handles reg filling if the addressed stack slot is a
+ * spilled reg.
+ *
+ * 'dst_regno' can be -1, meaning that the read value is not going to a
+ * register.
+ *
+ * The access is assumed to be within the current stack bounds.
+ */
+static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ /* func where src register points to */
+ struct bpf_func_state *reg_state,
+ int off, int size, int dst_regno)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
@@ -2409,11 +2629,6 @@ static int check_stack_read(struct bpf_verifier_env *env,
struct bpf_reg_state *reg;
u8 *stype;
- if (reg_state->allocated_stack <= slot) {
- verbose(env, "invalid read from stack off %d+0 size %d\n",
- off, size);
- return -EACCES;
- }
stype = reg_state->stack[spi].slot_type;
reg = &reg_state->stack[spi].spilled_ptr;
@@ -2424,9 +2639,9 @@ static int check_stack_read(struct bpf_verifier_env *env,
verbose(env, "invalid size of register fill\n");
return -EACCES;
}
- if (value_regno >= 0) {
- mark_reg_unknown(env, state->regs, value_regno);
- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
+ if (dst_regno >= 0) {
+ mark_reg_unknown(env, state->regs, dst_regno);
+ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
return 0;
@@ -2438,16 +2653,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
}
}
- if (value_regno >= 0) {
+ if (dst_regno >= 0) {
/* restore register state from stack */
- state->regs[value_regno] = *reg;
+ state->regs[dst_regno] = *reg;
/* mark reg as written since spilled pointer state likely
* has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions
*/
- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
+ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
- /* If value_regno==-1, the caller is asking us whether
+ /* If dst_regno==-1, the caller is asking us whether
* it is acceptable to use this value as a SCALAR_VALUE
* (e.g. for XADD).
* We must not allow unprivileged callers to do that
@@ -2459,70 +2674,167 @@ static int check_stack_read(struct bpf_verifier_env *env,
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else {
- int zeros = 0;
+ u8 type;
for (i = 0; i < size; i++) {
- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
+ type = stype[(slot - i) % BPF_REG_SIZE];
+ if (type == STACK_MISC)
continue;
- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
- zeros++;
+ if (type == STACK_ZERO)
continue;
- }
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
- if (value_regno >= 0) {
- if (zeros == size) {
- /* any size read into register is zero extended,
- * so the whole register == const_zero
- */
- __mark_reg_const_zero(&state->regs[value_regno]);
- /* backtracking doesn't support STACK_ZERO yet,
- * so mark it precise here, so that later
- * backtracking can stop here.
- * Backtracking may not need this if this register
- * doesn't participate in pointer adjustment.
- * Forward propagation of precise flag is not
- * necessary either. This mark is only to stop
- * backtracking. Any register that contributed
- * to const 0 was marked precise before spill.
- */
- state->regs[value_regno].precise = true;
- } else {
- /* have read misc data from the stack */
- mark_reg_unknown(env, state->regs, value_regno);
- }
- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
- }
+ if (dst_regno >= 0)
+ mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
}
return 0;
}
-static int check_stack_access(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg,
- int off, int size)
+enum stack_access_src {
+ ACCESS_DIRECT = 1, /* the access is performed by an instruction */
+ ACCESS_HELPER = 2, /* the access is performed by a helper */
+};
+
+static int check_stack_range_initialized(struct bpf_verifier_env *env,
+ int regno, int off, int access_size,
+ bool zero_size_allowed,
+ enum stack_access_src type,
+ struct bpf_call_arg_meta *meta);
+
+static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
+{
+ return cur_regs(env) + regno;
+}
+
+/* Read the stack at 'ptr_regno + off' and put the result into the register
+ * 'dst_regno'.
+ * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
+ * but not its variable offset.
+ * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
+ *
+ * As opposed to check_stack_read_fixed_off, this function doesn't deal with
+ * filling registers (i.e. reads of spilled register cannot be detected when
+ * the offset is not fixed). We conservatively mark 'dst_regno' as containing
+ * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
+ * offset; for a fixed offset check_stack_read_fixed_off should be used
+ * instead.
+ */
+static int check_stack_read_var_off(struct bpf_verifier_env *env,
+ int ptr_regno, int off, int size, int dst_regno)
+{
+ /* The state of the source register. */
+ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
+ struct bpf_func_state *ptr_state = func(env, reg);
+ int err;
+ int min_off, max_off;
+
+ /* Note that we pass a NULL meta, so raw access will not be permitted.
+ */
+ err = check_stack_range_initialized(env, ptr_regno, off, size,
+ false, ACCESS_DIRECT, NULL);
+ if (err)
+ return err;
+
+ min_off = reg->smin_value + off;
+ max_off = reg->smax_value + off;
+ mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
+ return 0;
+}
+
+/* check_stack_read dispatches to check_stack_read_fixed_off or
+ * check_stack_read_var_off.
+ *
+ * The caller must ensure that the offset falls within the allocated stack
+ * bounds.
+ *
+ * 'dst_regno' is a register which will receive the value from the stack. It
+ * can be -1, meaning that the read value is not going to a register.
+ */
+static int check_stack_read(struct bpf_verifier_env *env,
+ int ptr_regno, int off, int size,
+ int dst_regno)
{
- /* Stack accesses must be at a fixed offset, so that we
- * can determine what type of data were returned. See
- * check_stack_read().
+ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
+ struct bpf_func_state *state = func(env, reg);
+ int err;
+ /* Some accesses are only permitted with a static offset. */
+ bool var_off = !tnum_is_const(reg->var_off);
+
+ /* The offset is required to be static when reads don't go to a
+ * register, in order to not leak pointers (see
+ * check_stack_read_fixed_off).
*/
- if (!tnum_is_const(reg->var_off)) {
+ if (dst_regno < 0 && var_off) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
+ verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
tn_buf, off, size);
return -EACCES;
}
+ /* Variable offset is prohibited for unprivileged mode for simplicity
+ * since it requires corresponding support in Spectre masking for stack
+ * ALU. See also retrieve_ptr_limit().
+ */
+ if (!env->bypass_spec_v1 && var_off) {
+ char tn_buf[48];
- if (off >= 0 || off < -MAX_BPF_STACK) {
- verbose(env, "invalid stack off=%d size=%d\n", off, size);
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+ verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
+ ptr_regno, tn_buf);
return -EACCES;
}
- return 0;
+ if (!var_off) {
+ off += reg->var_off.value;
+ err = check_stack_read_fixed_off(env, state, off, size,
+ dst_regno);
+ } else {
+ /* Variable offset stack reads need more conservative handling
+ * than fixed offset ones. Note that dst_regno >= 0 on this
+ * branch.
+ */
+ err = check_stack_read_var_off(env, ptr_regno, off, size,
+ dst_regno);
+ }
+ return err;
+}
+
+
+/* check_stack_write dispatches to check_stack_write_fixed_off or
+ * check_stack_write_var_off.
+ *
+ * 'ptr_regno' is the register used as a pointer into the stack.
+ * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
+ * 'value_regno' is the register whose value we're writing to the stack. It can
+ * be -1, meaning that we're not writing from a register.
+ *
+ * The caller must ensure that the offset falls within the maximum stack size.
+ */
+static int check_stack_write(struct bpf_verifier_env *env,
+ int ptr_regno, int off, int size,
+ int value_regno, int insn_idx)
+{
+ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
+ struct bpf_func_state *state = func(env, reg);
+ int err;
+
+ if (tnum_is_const(reg->var_off)) {
+ off += reg->var_off.value;
+ err = check_stack_write_fixed_off(env, state, off, size,
+ value_regno, insn_idx);
+ } else {
+ /* Variable offset stack reads need more conservative handling
+ * than fixed offset ones.
+ */
+ err = check_stack_write_var_off(env, state,
+ ptr_regno, off, size,
+ value_regno, insn_idx);
+ }
+ return err;
}
static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
@@ -2739,7 +3051,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
regno);
return -EACCES;
}
- err = __check_mem_access(env, regno, off, size, reg->range,
+
+ err = reg->range < 0 ? -EINVAL :
+ __check_mem_access(env, regno, off, size, reg->range,
zero_size_allowed);
if (err) {
verbose(env, "R%d offset is outside of the packet\n", regno);
@@ -2762,7 +3076,7 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type,
- u32 *btf_id)
+ struct btf **btf, u32 *btf_id)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
@@ -2780,10 +3094,12 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
*/
*reg_type = info.reg_type;
- if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL)
+ if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) {
+ *btf = info.btf;
*btf_id = info.btf_id;
- else
+ } else {
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
+ }
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
@@ -2851,11 +3167,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
return -EACCES;
}
-static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
-{
- return cur_regs(env) + regno;
-}
-
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
@@ -2974,8 +3285,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
- /* The stack spill tracking logic in check_stack_write()
- * and check_stack_read() relies on stack accesses being
+ /* The stack spill tracking logic in check_stack_write_fixed_off()
+ * and check_stack_read_fixed_off() relies on stack accesses being
* aligned.
*/
strict = true;
@@ -3067,9 +3378,7 @@ process_func:
continue_func:
subprog_end = subprog[idx + 1].start;
for (; i < subprog_end; i++) {
- if (insn[i].code != (BPF_JMP | BPF_CALL))
- continue;
- if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ if (!bpf_pseudo_call(insn + i))
continue;
/* remember insn and function to return to */
ret_insn[frame] = i + 1;
@@ -3295,8 +3604,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
int value_regno)
{
struct bpf_reg_state *reg = regs + regno;
- const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
- const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
+ const char *tname = btf_name_by_offset(reg->btf, t->name_off);
u32 btf_id;
int ret;
@@ -3317,23 +3626,23 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
}
if (env->ops->btf_struct_access) {
- ret = env->ops->btf_struct_access(&env->log, t, off, size,
- atype, &btf_id);
+ ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
+ off, size, atype, &btf_id);
} else {
if (atype != BPF_READ) {
verbose(env, "only read is supported\n");
return -EACCES;
}
- ret = btf_struct_access(&env->log, t, off, size, atype,
- &btf_id);
+ ret = btf_struct_access(&env->log, reg->btf, t, off, size,
+ atype, &btf_id);
}
if (ret < 0)
return ret;
if (atype == BPF_READ && value_regno >= 0)
- mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+ mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id);
return 0;
}
@@ -3383,16 +3692,101 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
return -EACCES;
}
- ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
+ ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id);
if (ret < 0)
return ret;
if (value_regno >= 0)
- mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+ mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id);
+
+ return 0;
+}
+
+/* Check that the stack access at the given offset is within bounds. The
+ * maximum valid offset is -1.
+ *
+ * The minimum valid offset is -MAX_BPF_STACK for writes, and
+ * -state->allocated_stack for reads.
+ */
+static int check_stack_slot_within_bounds(int off,
+ struct bpf_func_state *state,
+ enum bpf_access_type t)
+{
+ int min_valid_off;
+
+ if (t == BPF_WRITE)
+ min_valid_off = -MAX_BPF_STACK;
+ else
+ min_valid_off = -state->allocated_stack;
+ if (off < min_valid_off || off > -1)
+ return -EACCES;
return 0;
}
+/* Check that the stack access at 'regno + off' falls within the maximum stack
+ * bounds.
+ *
+ * 'off' includes `regno->offset`, but not its dynamic part (if any).
+ */
+static int check_stack_access_within_bounds(
+ struct bpf_verifier_env *env,
+ int regno, int off, int access_size,
+ enum stack_access_src src, enum bpf_access_type type)
+{
+ struct bpf_reg_state *regs = cur_regs(env);
+ struct bpf_reg_state *reg = regs + regno;
+ struct bpf_func_state *state = func(env, reg);
+ int min_off, max_off;
+ int err;
+ char *err_extra;
+
+ if (src == ACCESS_HELPER)
+ /* We don't know if helpers are reading or writing (or both). */
+ err_extra = " indirect access to";
+ else if (type == BPF_READ)
+ err_extra = " read from";
+ else
+ err_extra = " write to";
+
+ if (tnum_is_const(reg->var_off)) {
+ min_off = reg->var_off.value + off;
+ if (access_size > 0)
+ max_off = min_off + access_size - 1;
+ else
+ max_off = min_off;
+ } else {
+ if (reg->smax_value >= BPF_MAX_VAR_OFF ||
+ reg->smin_value <= -BPF_MAX_VAR_OFF) {
+ verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
+ err_extra, regno);
+ return -EACCES;
+ }
+ min_off = reg->smin_value + off;
+ if (access_size > 0)
+ max_off = reg->smax_value + off + access_size - 1;
+ else
+ max_off = min_off;
+ }
+
+ err = check_stack_slot_within_bounds(min_off, state, type);
+ if (!err)
+ err = check_stack_slot_within_bounds(max_off, state, type);
+
+ if (err) {
+ if (tnum_is_const(reg->var_off)) {
+ verbose(env, "invalid%s stack R%d off=%d size=%d\n",
+ err_extra, regno, off, access_size);
+ } else {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+ verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
+ err_extra, regno, tn_buf, access_size);
+ }
+ }
+ return err;
+}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
@@ -3464,6 +3858,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE;
+ struct btf *btf = NULL;
u32 btf_id = 0;
if (t == BPF_WRITE && value_regno >= 0 &&
@@ -3476,7 +3871,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err < 0)
return err;
- err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
+ err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf, &btf_id);
if (err)
verbose_linfo(env, insn_idx, "; ");
if (!err && t == BPF_READ && value_regno >= 0) {
@@ -3498,15 +3893,17 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
*/
regs[value_regno].subreg_def = DEF_NOT_SUBREG;
if (reg_type == PTR_TO_BTF_ID ||
- reg_type == PTR_TO_BTF_ID_OR_NULL)
+ reg_type == PTR_TO_BTF_ID_OR_NULL) {
+ regs[value_regno].btf = btf;
regs[value_regno].btf_id = btf_id;
+ }
}
regs[value_regno].type = reg_type;
}
} else if (reg->type == PTR_TO_STACK) {
- off += reg->var_off.value;
- err = check_stack_access(env, reg, off, size);
+ /* Basic bounds checks. */
+ err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
if (err)
return err;
@@ -3515,12 +3912,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err)
return err;
- if (t == BPF_WRITE)
- err = check_stack_write(env, state, off, size,
- value_regno, insn_idx);
- else
- err = check_stack_read(env, state, off, size,
+ if (t == BPF_READ)
+ err = check_stack_read(env, regno, off, size,
value_regno);
+ else
+ err = check_stack_write(env, regno, off, size,
+ value_regno, insn_idx);
} else if (reg_is_pkt_pointer(reg)) {
if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
verbose(env, "cannot write into packet\n");
@@ -3596,13 +3993,30 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return err;
}
-static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
+static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{
+ int load_reg;
int err;
- if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
- insn->imm != 0) {
- verbose(env, "BPF_XADD uses reserved fields\n");
+ switch (insn->imm) {
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ case BPF_XCHG:
+ case BPF_CMPXCHG:
+ break;
+ default:
+ verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
+ return -EINVAL;
+ }
+
+ if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
+ verbose(env, "invalid atomic operand size\n");
return -EINVAL;
}
@@ -3616,6 +4030,13 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
if (err)
return err;
+ if (insn->imm == BPF_CMPXCHG) {
+ /* Check comparison of R0 with memory location */
+ err = check_reg_arg(env, BPF_REG_0, SRC_OP);
+ if (err)
+ return err;
+ }
+
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
return -EACCES;
@@ -3625,66 +4046,91 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
is_pkt_reg(env, insn->dst_reg) ||
is_flow_key_reg(env, insn->dst_reg) ||
is_sk_reg(env, insn->dst_reg)) {
- verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
+ verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES;
}
- /* check whether atomic_add can read the memory */
+ if (insn->imm & BPF_FETCH) {
+ if (insn->imm == BPF_CMPXCHG)
+ load_reg = BPF_REG_0;
+ else
+ load_reg = insn->src_reg;
+
+ /* check and record load of old value */
+ err = check_reg_arg(env, load_reg, DST_OP);
+ if (err)
+ return err;
+ } else {
+ /* This instruction accesses a memory location but doesn't
+ * actually load it into a register.
+ */
+ load_reg = -1;
+ }
+
+ /* check whether we can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_READ, -1, true);
+ BPF_SIZE(insn->code), BPF_READ, load_reg, true);
if (err)
return err;
- /* check whether atomic_add can write into the same memory */
- return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_WRITE, -1, true);
-}
-
-static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
- int off, int access_size,
- bool zero_size_allowed)
-{
- struct bpf_reg_state *reg = reg_state(env, regno);
-
- if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
- access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
- if (tnum_is_const(reg->var_off)) {
- verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
- regno, off, access_size);
- } else {
- char tn_buf[48];
+ /* check whether we can write into the same memory */
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_WRITE, -1, true);
+ if (err)
+ return err;
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
- regno, tn_buf, access_size);
- }
- return -EACCES;
- }
return 0;
}
-/* when register 'regno' is passed into function that will read 'access_size'
- * bytes from that pointer, make sure that it's within stack boundary
- * and all elements of stack are initialized.
- * Unlike most pointer bounds-checking functions, this one doesn't take an
- * 'off' argument, so it has to add in reg->off itself.
+/* When register 'regno' is used to read the stack (either directly or through
+ * a helper function) make sure that it's within stack boundary and, depending
+ * on the access type, that all elements of the stack are initialized.
+ *
+ * 'off' includes 'regno->off', but not its dynamic part (if any).
+ *
+ * All registers that have been spilled on the stack in the slots within the
+ * read offsets are marked as read.
*/
-static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
- int access_size, bool zero_size_allowed,
- struct bpf_call_arg_meta *meta)
+static int check_stack_range_initialized(
+ struct bpf_verifier_env *env, int regno, int off,
+ int access_size, bool zero_size_allowed,
+ enum stack_access_src type, struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
int err, min_off, max_off, i, j, slot, spi;
+ char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
+ enum bpf_access_type bounds_check_type;
+ /* Some accesses can write anything into the stack, others are
+ * read-only.
+ */
+ bool clobber = false;
+
+ if (access_size == 0 && !zero_size_allowed) {
+ verbose(env, "invalid zero-sized read\n");
+ return -EACCES;
+ }
+
+ if (type == ACCESS_HELPER) {
+ /* The bounds checks for writes are more permissive than for
+ * reads. However, if raw_mode is not set, we'll do extra
+ * checks below.
+ */
+ bounds_check_type = BPF_WRITE;
+ clobber = true;
+ } else {
+ bounds_check_type = BPF_READ;
+ }
+ err = check_stack_access_within_bounds(env, regno, off, access_size,
+ type, bounds_check_type);
+ if (err)
+ return err;
+
if (tnum_is_const(reg->var_off)) {
- min_off = max_off = reg->var_off.value + reg->off;
- err = __check_stack_boundary(env, regno, min_off, access_size,
- zero_size_allowed);
- if (err)
- return err;
+ min_off = max_off = reg->var_off.value + off;
} else {
/* Variable offset is prohibited for unprivileged mode for
* simplicity since it requires corresponding support in
@@ -3695,8 +4141,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
- regno, tn_buf);
+ verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
+ regno, err_extra, tn_buf);
return -EACCES;
}
/* Only initialized buffer on stack is allowed to be accessed
@@ -3708,28 +4154,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
if (meta && meta->raw_mode)
meta = NULL;
- if (reg->smax_value >= BPF_MAX_VAR_OFF ||
- reg->smax_value <= -BPF_MAX_VAR_OFF) {
- verbose(env, "R%d unbounded indirect variable offset stack access\n",
- regno);
- return -EACCES;
- }
- min_off = reg->smin_value + reg->off;
- max_off = reg->smax_value + reg->off;
- err = __check_stack_boundary(env, regno, min_off, access_size,
- zero_size_allowed);
- if (err) {
- verbose(env, "R%d min value is outside of stack bound\n",
- regno);
- return err;
- }
- err = __check_stack_boundary(env, regno, max_off, access_size,
- zero_size_allowed);
- if (err) {
- verbose(env, "R%d max value is outside of stack bound\n",
- regno);
- return err;
- }
+ min_off = reg->smin_value + off;
+ max_off = reg->smax_value + off;
}
if (meta && meta->raw_mode) {
@@ -3749,8 +4175,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
if (*stype == STACK_MISC)
goto mark;
if (*stype == STACK_ZERO) {
- /* helper can write anything into the stack */
- *stype = STACK_MISC;
+ if (clobber) {
+ /* helper can write anything into the stack */
+ *stype = STACK_MISC;
+ }
goto mark;
}
@@ -3759,23 +4187,26 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
goto mark;
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
- state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
- __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
- for (j = 0; j < BPF_REG_SIZE; j++)
- state->stack[spi].slot_type[j] = STACK_MISC;
+ (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
+ env->allow_ptr_leaks)) {
+ if (clobber) {
+ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
+ for (j = 0; j < BPF_REG_SIZE; j++)
+ state->stack[spi].slot_type[j] = STACK_MISC;
+ }
goto mark;
}
err:
if (tnum_is_const(reg->var_off)) {
- verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
- min_off, i - min_off, access_size);
+ verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
+ err_extra, regno, min_off, i - min_off, access_size);
} else {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
- tn_buf, i - min_off, access_size);
+ verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
+ err_extra, regno, tn_buf, i - min_off, access_size);
}
return -EACCES;
mark:
@@ -3824,8 +4255,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
"rdwr",
&env->prog->aux->max_rdwr_access);
case PTR_TO_STACK:
- return check_stack_boundary(env, regno, access_size,
- zero_size_allowed, meta);
+ return check_stack_range_initialized(
+ env,
+ regno, reg->off, access_size,
+ zero_size_allowed, ACCESS_HELPER, meta);
default: /* scalar_value or invalid ptr */
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
@@ -3839,6 +4272,29 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
}
}
+int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ u32 regno, u32 mem_size)
+{
+ if (register_is_null(reg))
+ return 0;
+
+ if (reg_type_may_be_null(reg->type)) {
+ /* Assuming that the register contains a value check if the memory
+ * access is safe. Temporarily save and restore the register's state as
+ * the conversion shouldn't be visible to a caller.
+ */
+ const struct bpf_reg_state saved_reg = *reg;
+ int rv;
+
+ mark_ptr_not_null_reg(reg);
+ rv = check_helper_mem_access(env, regno, mem_size, true, NULL);
+ *reg = saved_reg;
+ return rv;
+ }
+
+ return check_helper_mem_access(env, regno, mem_size, true, NULL);
+}
+
/* Implementation details:
* bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
* Two bpf_map_lookups (even with the same key) will have different reg->id.
@@ -4116,11 +4572,11 @@ found:
arg_btf_id = compatible->btf_id;
}
- if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
- *arg_btf_id)) {
+ if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
+ btf_vmlinux, *arg_btf_id)) {
verbose(env, "R%d is of type %s but %s is expected\n",
- regno, kernel_type_name(reg->btf_id),
- kernel_type_name(*arg_btf_id));
+ regno, kernel_type_name(reg->btf, reg->btf_id),
+ kernel_type_name(btf_vmlinux, *arg_btf_id));
return -EACCES;
}
@@ -4242,6 +4698,7 @@ skip_type_check:
verbose(env, "Helper has invalid btf_id in R%d\n", regno);
return -EACCES;
}
+ meta->ret_btf = reg->btf;
meta->ret_btf_id = reg->btf_id;
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
if (meta->func_id == BPF_FUNC_spin_lock) {
@@ -4309,7 +4766,7 @@ skip_type_check:
err = mark_chain_precision(env, regno);
} else if (arg_type_is_alloc_size(arg_type)) {
if (!tnum_is_const(reg->var_off)) {
- verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n",
+ verbose(env, "R%d is not a known constant'\n",
regno);
return -EACCES;
}
@@ -4469,6 +4926,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_inode_storage_delete)
goto error;
break;
+ case BPF_MAP_TYPE_TASK_STORAGE:
+ if (func_id != BPF_FUNC_task_storage_get &&
+ func_id != BPF_FUNC_task_storage_delete)
+ goto error;
+ break;
default:
break;
}
@@ -4547,6 +5009,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
goto error;
break;
+ case BPF_FUNC_task_storage_get:
+ case BPF_FUNC_task_storage_delete:
+ if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
+ goto error;
+ break;
default:
break;
}
@@ -4687,6 +5154,32 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
__clear_all_pkt_pointers(env, vstate->frame[i]);
}
+enum {
+ AT_PKT_END = -1,
+ BEYOND_PKT_END = -2,
+};
+
+static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
+{
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *reg = &state->regs[regn];
+
+ if (reg->type != PTR_TO_PACKET)
+ /* PTR_TO_PACKET_META is not supported yet */
+ return;
+
+ /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
+ * How far beyond pkt_end it goes is unknown.
+ * if (!range_open) it's the case of pkt >= pkt_end
+ * if (range_open) it's the case of pkt > pkt_end
+ * hence this pointer is at least 1 byte bigger than pkt_end
+ */
+ if (range_open)
+ reg->range = BEYOND_PKT_END;
+ else
+ reg->range = AT_PKT_END;
+}
+
static void release_reg_references(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int ref_obj_id)
@@ -4786,8 +5279,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
subprog);
clear_caller_saved_regs(env, caller->regs);
- /* All global functions return SCALAR_VALUE */
+ /* All global functions return a 64-bit SCALAR_VALUE */
mark_reg_unknown(env, caller->regs, BPF_REG_0);
+ caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
/* continue with next insn after call */
return 0;
@@ -4895,6 +5389,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
ret_reg->smax_value = meta->msize_max_value;
ret_reg->s32_max_value = meta->msize_max_value;
+ ret_reg->smin_value = -MAX_ERRNO;
+ ret_reg->s32_min_value = -MAX_ERRNO;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
__update_reg_bounds(ret_reg);
@@ -5152,16 +5648,16 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
const struct btf_type *t;
mark_reg_known_zero(env, regs, BPF_REG_0);
- t = btf_type_skip_modifiers(btf_vmlinux, meta.ret_btf_id, NULL);
+ t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
if (!btf_type_is_struct(t)) {
u32 tsize;
const struct btf_type *ret;
const char *tname;
/* resolve the type size of ksym. */
- ret = btf_resolve_size(btf_vmlinux, t, &tsize);
+ ret = btf_resolve_size(meta.ret_btf, t, &tsize);
if (IS_ERR(ret)) {
- tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ tname = btf_name_by_offset(meta.ret_btf, t->name_off);
verbose(env, "unable to resolve the size of type '%s': %ld\n",
tname, PTR_ERR(ret));
return -EINVAL;
@@ -5174,19 +5670,27 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].type =
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
+ regs[BPF_REG_0].btf = meta.ret_btf;
regs[BPF_REG_0].btf_id = meta.ret_btf_id;
}
- } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
+ } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL ||
+ fn->ret_type == RET_PTR_TO_BTF_ID) {
int ret_btf_id;
mark_reg_known_zero(env, regs, BPF_REG_0);
- regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL;
+ regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ?
+ PTR_TO_BTF_ID :
+ PTR_TO_BTF_ID_OR_NULL;
ret_btf_id = *fn->ret_btf_id;
if (ret_btf_id == 0) {
verbose(env, "invalid return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
return -EINVAL;
}
+ /* current BPF helper definitions are only coming from
+ * built-in code with type IDs from vmlinux BTF
+ */
+ regs[BPF_REG_0].btf = btf_vmlinux;
regs[BPF_REG_0].btf_id = ret_btf_id;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
@@ -5255,7 +5759,7 @@ static bool signed_add_overflows(s64 a, s64 b)
return res < a;
}
-static bool signed_add32_overflows(s64 a, s64 b)
+static bool signed_add32_overflows(s32 a, s32 b)
{
/* Do the add in u32, where overflow is well-defined */
s32 res = (s32)((u32)a + (u32)b);
@@ -5265,7 +5769,7 @@ static bool signed_add32_overflows(s64 a, s64 b)
return res < a;
}
-static bool signed_sub_overflows(s32 a, s32 b)
+static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
@@ -5277,7 +5781,7 @@ static bool signed_sub_overflows(s32 a, s32 b)
static bool signed_sub32_overflows(s32 a, s32 b)
{
- /* Do the sub in u64, where overflow is well-defined */
+ /* Do the sub in u32, where overflow is well-defined */
s32 res = (s32)((u32)a - (u32)b);
if (b < 0)
@@ -5442,6 +5946,41 @@ do_sim:
return !ret ? -EFAULT : 0;
}
+/* check that stack access falls within stack limits and that 'reg' doesn't
+ * have a variable offset.
+ *
+ * Variable offset is prohibited for unprivileged mode for simplicity since it
+ * requires corresponding support in Spectre masking for stack ALU. See also
+ * retrieve_ptr_limit().
+ *
+ *
+ * 'off' includes 'reg->off'.
+ */
+static int check_stack_access_for_ptr_arithmetic(
+ struct bpf_verifier_env *env,
+ int regno,
+ const struct bpf_reg_state *reg,
+ int off)
+{
+ if (!tnum_is_const(reg->var_off)) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+ verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
+ regno, tn_buf, off);
+ return -EACCES;
+ }
+
+ if (off >= 0 || off < -MAX_BPF_STACK) {
+ verbose(env, "R%d stack pointer arithmetic goes out of range, "
+ "prohibited for !root; off=%d\n", regno, off);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
@@ -5586,7 +6125,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
- dst_reg->raw = 0;
+ memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
}
break;
case BPF_SUB:
@@ -5651,7 +6190,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
if (smin_val < 0)
- dst_reg->raw = 0;
+ memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
}
break;
case BPF_AND:
@@ -5685,10 +6224,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
"prohibited for !root\n", dst);
return -EACCES;
} else if (dst_reg->type == PTR_TO_STACK &&
- check_stack_access(env, dst_reg, dst_reg->off +
- dst_reg->var_off.value, 1)) {
- verbose(env, "R%d stack pointer arithmetic goes out of range, "
- "prohibited for !root\n", dst);
+ check_stack_access_for_ptr_arithmetic(
+ env, dst, dst_reg, dst_reg->off +
+ dst_reg->var_off.value)) {
return -EACCES;
}
}
@@ -6167,7 +6705,7 @@ static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
- * unsigned bounts capture the signed bounds.
+ * unsigned bounds capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
@@ -6198,7 +6736,7 @@ static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
- * unsigned bounts capture the signed bounds.
+ * unsigned bounds capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
@@ -6695,7 +7233,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
static void __find_good_pkt_pointers(struct bpf_func_state *state,
struct bpf_reg_state *dst_reg,
- enum bpf_reg_type type, u16 new_range)
+ enum bpf_reg_type type, int new_range)
{
struct bpf_reg_state *reg;
int i;
@@ -6720,8 +7258,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
enum bpf_reg_type type,
bool range_right_open)
{
- u16 new_range;
- int i;
+ int new_range, i;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
@@ -6820,7 +7357,7 @@ static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
case BPF_JSGT:
if (reg->s32_min_value > sval)
return 1;
- else if (reg->s32_max_value < sval)
+ else if (reg->s32_max_value <= sval)
return 0;
break;
case BPF_JLT:
@@ -6893,7 +7430,7 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
case BPF_JSGT:
if (reg->smin_value > sval)
return 1;
- else if (reg->smax_value < sval)
+ else if (reg->smax_value <= sval)
return 0;
break;
case BPF_JLT:
@@ -6972,6 +7509,67 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
return is_branch64_taken(reg, val, opcode);
}
+static int flip_opcode(u32 opcode)
+{
+ /* How can we transform "a <op> b" into "b <op> a"? */
+ static const u8 opcode_flip[16] = {
+ /* these stay the same */
+ [BPF_JEQ >> 4] = BPF_JEQ,
+ [BPF_JNE >> 4] = BPF_JNE,
+ [BPF_JSET >> 4] = BPF_JSET,
+ /* these swap "lesser" and "greater" (L and G in the opcodes) */
+ [BPF_JGE >> 4] = BPF_JLE,
+ [BPF_JGT >> 4] = BPF_JLT,
+ [BPF_JLE >> 4] = BPF_JGE,
+ [BPF_JLT >> 4] = BPF_JGT,
+ [BPF_JSGE >> 4] = BPF_JSLE,
+ [BPF_JSGT >> 4] = BPF_JSLT,
+ [BPF_JSLE >> 4] = BPF_JSGE,
+ [BPF_JSLT >> 4] = BPF_JSGT
+ };
+ return opcode_flip[opcode >> 4];
+}
+
+static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg,
+ u8 opcode)
+{
+ struct bpf_reg_state *pkt;
+
+ if (src_reg->type == PTR_TO_PACKET_END) {
+ pkt = dst_reg;
+ } else if (dst_reg->type == PTR_TO_PACKET_END) {
+ pkt = src_reg;
+ opcode = flip_opcode(opcode);
+ } else {
+ return -1;
+ }
+
+ if (pkt->range >= 0)
+ return -1;
+
+ switch (opcode) {
+ case BPF_JLE:
+ /* pkt <= pkt_end */
+ fallthrough;
+ case BPF_JGT:
+ /* pkt > pkt_end */
+ if (pkt->range == BEYOND_PKT_END)
+ /* pkt has at last one extra byte beyond pkt_end */
+ return opcode == BPF_JGT;
+ break;
+ case BPF_JLT:
+ /* pkt < pkt_end */
+ fallthrough;
+ case BPF_JGE:
+ /* pkt >= pkt_end */
+ if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
+ return opcode == BPF_JGE;
+ break;
+ }
+ return -1;
+}
+
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
@@ -7135,23 +7733,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
u64 val, u32 val32,
u8 opcode, bool is_jmp32)
{
- /* How can we transform "a <op> b" into "b <op> a"? */
- static const u8 opcode_flip[16] = {
- /* these stay the same */
- [BPF_JEQ >> 4] = BPF_JEQ,
- [BPF_JNE >> 4] = BPF_JNE,
- [BPF_JSET >> 4] = BPF_JSET,
- /* these swap "lesser" and "greater" (L and G in the opcodes) */
- [BPF_JGE >> 4] = BPF_JLE,
- [BPF_JGT >> 4] = BPF_JLT,
- [BPF_JLE >> 4] = BPF_JGE,
- [BPF_JLT >> 4] = BPF_JGT,
- [BPF_JSGE >> 4] = BPF_JSLE,
- [BPF_JSGT >> 4] = BPF_JSLT,
- [BPF_JSLE >> 4] = BPF_JSGE,
- [BPF_JSLT >> 4] = BPF_JSGT
- };
- opcode = opcode_flip[opcode >> 4];
+ opcode = flip_opcode(opcode);
/* This uses zero as "not present in table"; luckily the zero opcode,
* BPF_JA, can't get here.
*/
@@ -7224,43 +7806,19 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
}
if (is_null) {
reg->type = SCALAR_VALUE;
- } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
- const struct bpf_map *map = reg->map_ptr;
-
- if (map->inner_map_meta) {
- reg->type = CONST_PTR_TO_MAP;
- reg->map_ptr = map->inner_map_meta;
- } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
- reg->type = PTR_TO_XDP_SOCK;
- } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
- map->map_type == BPF_MAP_TYPE_SOCKHASH) {
- reg->type = PTR_TO_SOCKET;
- } else {
- reg->type = PTR_TO_MAP_VALUE;
- }
- } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
- reg->type = PTR_TO_SOCKET;
- } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
- reg->type = PTR_TO_SOCK_COMMON;
- } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
- reg->type = PTR_TO_TCP_SOCK;
- } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) {
- reg->type = PTR_TO_BTF_ID;
- } else if (reg->type == PTR_TO_MEM_OR_NULL) {
- reg->type = PTR_TO_MEM;
- } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) {
- reg->type = PTR_TO_RDONLY_BUF;
- } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) {
- reg->type = PTR_TO_RDWR_BUF;
- }
- if (is_null) {
/* We don't need id and ref_obj_id from this point
* onwards anymore, thus we should better reset it,
* so that state pruning has chances to take effect.
*/
reg->id = 0;
reg->ref_obj_id = 0;
- } else if (!reg_may_point_to_spin_lock(reg)) {
+
+ return;
+ }
+
+ mark_ptr_not_null_reg(reg);
+
+ if (!reg_may_point_to_spin_lock(reg)) {
/* For not-NULL ptr, reg->ref_obj_id will be reset
* in release_reg_references().
*
@@ -7333,6 +7891,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, false);
+ mark_pkt_end(other_branch, insn->dst_reg, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7340,6 +7899,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_end > pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, true);
+ mark_pkt_end(this_branch, insn->src_reg, false);
} else {
return false;
}
@@ -7352,6 +7912,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, true);
+ mark_pkt_end(this_branch, insn->dst_reg, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7359,6 +7920,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_end < pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, false);
+ mark_pkt_end(other_branch, insn->src_reg, true);
} else {
return false;
}
@@ -7371,6 +7933,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, true);
+ mark_pkt_end(other_branch, insn->dst_reg, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7378,6 +7941,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, false);
+ mark_pkt_end(this_branch, insn->src_reg, true);
} else {
return false;
}
@@ -7390,6 +7954,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, false);
+ mark_pkt_end(this_branch, insn->dst_reg, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7397,6 +7962,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, true);
+ mark_pkt_end(other_branch, insn->src_reg, false);
} else {
return false;
}
@@ -7496,6 +8062,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
src_reg->var_off.value,
opcode,
is_jmp32);
+ } else if (reg_is_pkt_pointer_any(dst_reg) &&
+ reg_is_pkt_pointer_any(src_reg) &&
+ !is_jmp32) {
+ pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
}
if (pred >= 0) {
@@ -7504,7 +8074,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
*/
if (!__is_pointer_value(false, dst_reg))
err = mark_chain_precision(env, insn->dst_reg);
- if (BPF_SRC(insn->code) == BPF_X && !err)
+ if (BPF_SRC(insn->code) == BPF_X && !err &&
+ !__is_pointer_value(false, src_reg))
err = mark_chain_precision(env, insn->src_reg);
if (err)
return err;
@@ -7646,6 +8217,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
break;
case PTR_TO_BTF_ID:
case PTR_TO_PERCPU_BTF_ID:
+ dst_reg->btf = aux->btf_var.btf;
dst_reg->btf_id = aux->btf_var.btf_id;
break;
default:
@@ -7786,9 +8358,11 @@ static int check_return_code(struct bpf_verifier_env *env)
struct tnum range = tnum_range(0, 1);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
+ const bool is_subprog = env->cur_state->frame[0]->subprogno;
/* LSM and struct_ops func-ptr's return type could be "void" */
- if ((prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
+ if (!is_subprog &&
+ (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
prog_type == BPF_PROG_TYPE_LSM) &&
!prog->aux->attach_func_proto->type)
return 0;
@@ -7808,6 +8382,16 @@ static int check_return_code(struct bpf_verifier_env *env)
return -EACCES;
}
+ reg = cur_regs(env) + BPF_REG_0;
+ if (is_subprog) {
+ if (reg->type != SCALAR_VALUE) {
+ verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
+ reg_type_str[reg->type]);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
@@ -7817,6 +8401,9 @@ static int check_return_code(struct bpf_verifier_env *env)
env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
range = tnum_range(1, 1);
+ if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
+ env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
+ range = tnum_range(0, 3);
break;
case BPF_PROG_TYPE_CGROUP_SKB:
if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
@@ -7861,7 +8448,6 @@ static int check_return_code(struct bpf_verifier_env *env)
return 0;
}
- reg = cur_regs(env) + BPF_REG_0;
if (reg->type != SCALAR_VALUE) {
verbose(env, "At program exit the register R0 is not a known value (%s)\n",
reg_type_str[reg->type]);
@@ -7949,6 +8535,11 @@ static void init_explored_state(struct bpf_verifier_env *env, int idx)
env->insn_aux_data[idx].prune_point = true;
}
+enum {
+ DONE_EXPLORING = 0,
+ KEEP_EXPLORING = 1,
+};
+
/* t, w, e - match pseudo-code above:
* t - index of current instruction
* w - next instruction
@@ -7961,10 +8552,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
int *insn_state = env->cfg.insn_state;
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
- return 0;
+ return DONE_EXPLORING;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
- return 0;
+ return DONE_EXPLORING;
if (w < 0 || w >= env->prog->len) {
verbose_linfo(env, t, "%d: ", t);
@@ -7983,10 +8574,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
if (env->cfg.cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[env->cfg.cur_stack++] = w;
- return 1;
+ return KEEP_EXPLORING;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
if (loop_ok && env->bpf_capable)
- return 0;
+ return DONE_EXPLORING;
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
verbose(env, "back-edge from insn %d to %d\n", t, w);
@@ -7998,7 +8589,74 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
verbose(env, "insn state internal bug\n");
return -EFAULT;
}
- return 0;
+ return DONE_EXPLORING;
+}
+
+/* Visits the instruction at index t and returns one of the following:
+ * < 0 - an error occurred
+ * DONE_EXPLORING - the instruction was fully explored
+ * KEEP_EXPLORING - there is still work to be done before it is fully explored
+ */
+static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
+{
+ struct bpf_insn *insns = env->prog->insnsi;
+ int ret;
+
+ /* All non-branch instructions have a single fall-through edge. */
+ if (BPF_CLASS(insns[t].code) != BPF_JMP &&
+ BPF_CLASS(insns[t].code) != BPF_JMP32)
+ return push_insn(t, t + 1, FALLTHROUGH, env, false);
+
+ switch (BPF_OP(insns[t].code)) {
+ case BPF_EXIT:
+ return DONE_EXPLORING;
+
+ case BPF_CALL:
+ ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
+ if (ret)
+ return ret;
+
+ if (t + 1 < insn_cnt)
+ init_explored_state(env, t + 1);
+ if (insns[t].src_reg == BPF_PSEUDO_CALL) {
+ init_explored_state(env, t);
+ ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
+ env, false);
+ }
+ return ret;
+
+ case BPF_JA:
+ if (BPF_SRC(insns[t].code) != BPF_K)
+ return -EINVAL;
+
+ /* unconditional jump with single edge */
+ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
+ true);
+ if (ret)
+ return ret;
+
+ /* unconditional jmp is not a good pruning point,
+ * but it's marked, since backtracking needs
+ * to record jmp history in is_state_visited().
+ */
+ init_explored_state(env, t + insns[t].off + 1);
+ /* tell verifier to check for equivalent states
+ * after every call and jump
+ */
+ if (t + 1 < insn_cnt)
+ init_explored_state(env, t + 1);
+
+ return ret;
+
+ default:
+ /* conditional jump with two edges */
+ init_explored_state(env, t);
+ ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
+ if (ret)
+ return ret;
+
+ return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
+ }
}
/* non-recursive depth-first-search to detect loops in BPF program
@@ -8006,11 +8664,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
*/
static int check_cfg(struct bpf_verifier_env *env)
{
- struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
int *insn_stack, *insn_state;
int ret = 0;
- int i, t;
+ int i;
insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
@@ -8026,92 +8683,32 @@ static int check_cfg(struct bpf_verifier_env *env)
insn_stack[0] = 0; /* 0 is the first instruction */
env->cfg.cur_stack = 1;
-peek_stack:
- if (env->cfg.cur_stack == 0)
- goto check_state;
- t = insn_stack[env->cfg.cur_stack - 1];
-
- if (BPF_CLASS(insns[t].code) == BPF_JMP ||
- BPF_CLASS(insns[t].code) == BPF_JMP32) {
- u8 opcode = BPF_OP(insns[t].code);
-
- if (opcode == BPF_EXIT) {
- goto mark_explored;
- } else if (opcode == BPF_CALL) {
- ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- if (t + 1 < insn_cnt)
- init_explored_state(env, t + 1);
- if (insns[t].src_reg == BPF_PSEUDO_CALL) {
- init_explored_state(env, t);
- ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
- env, false);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- }
- } else if (opcode == BPF_JA) {
- if (BPF_SRC(insns[t].code) != BPF_K) {
- ret = -EINVAL;
- goto err_free;
- }
- /* unconditional jump with single edge */
- ret = push_insn(t, t + insns[t].off + 1,
- FALLTHROUGH, env, true);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- /* unconditional jmp is not a good pruning point,
- * but it's marked, since backtracking needs
- * to record jmp history in is_state_visited().
- */
- init_explored_state(env, t + insns[t].off + 1);
- /* tell verifier to check for equivalent states
- * after every call and jump
- */
- if (t + 1 < insn_cnt)
- init_explored_state(env, t + 1);
- } else {
- /* conditional jump with two edges */
- init_explored_state(env, t);
- ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
+ while (env->cfg.cur_stack > 0) {
+ int t = insn_stack[env->cfg.cur_stack - 1];
- ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
- goto err_free;
- }
- } else {
- /* all other non-branch instructions with single
- * fall-through edge
- */
- ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
- if (ret == 1)
- goto peek_stack;
- else if (ret < 0)
+ ret = visit_insn(t, insn_cnt, env);
+ switch (ret) {
+ case DONE_EXPLORING:
+ insn_state[t] = EXPLORED;
+ env->cfg.cur_stack--;
+ break;
+ case KEEP_EXPLORING:
+ break;
+ default:
+ if (ret > 0) {
+ verbose(env, "visit_insn internal bug\n");
+ ret = -EFAULT;
+ }
goto err_free;
+ }
}
-mark_explored:
- insn_state[t] = EXPLORED;
- if (env->cfg.cur_stack-- <= 0) {
+ if (env->cfg.cur_stack < 0) {
verbose(env, "pop stack internal bug\n");
ret = -EFAULT;
goto err_free;
}
- goto peek_stack;
-check_state:
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i);
@@ -8452,7 +9049,11 @@ static bool range_within(struct bpf_reg_state *old,
return old->umin_value <= cur->umin_value &&
old->umax_value >= cur->umax_value &&
old->smin_value <= cur->smin_value &&
- old->smax_value >= cur->smax_value;
+ old->smax_value >= cur->smax_value &&
+ old->u32_min_value <= cur->u32_min_value &&
+ old->u32_max_value >= cur->u32_max_value &&
+ old->s32_min_value <= cur->s32_min_value &&
+ old->s32_max_value >= cur->s32_max_value;
}
/* Maximum number of register states that can exist at once */
@@ -9388,14 +9989,19 @@ static int do_check(struct bpf_verifier_env *env)
} else if (class == BPF_STX) {
enum bpf_reg_type *prev_dst_type, dst_reg_type;
- if (BPF_MODE(insn->code) == BPF_XADD) {
- err = check_xadd(env, env->insn_idx, insn);
+ if (BPF_MODE(insn->code) == BPF_ATOMIC) {
+ err = check_atomic(env, env->insn_idx, insn);
if (err)
return err;
env->insn_idx++;
continue;
}
+ if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
+ verbose(env, "BPF_STX uses reserved fields\n");
+ return -EINVAL;
+ }
+
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
@@ -9567,54 +10173,94 @@ process_bpf_exit:
return 0;
}
+static int find_btf_percpu_datasec(struct btf *btf)
+{
+ const struct btf_type *t;
+ const char *tname;
+ int i, n;
+
+ /*
+ * Both vmlinux and module each have their own ".data..percpu"
+ * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
+ * types to look at only module's own BTF types.
+ */
+ n = btf_nr_types(btf);
+ if (btf_is_module(btf))
+ i = btf_nr_types(btf_vmlinux);
+ else
+ i = 1;
+
+ for(; i < n; i++) {
+ t = btf_type_by_id(btf, i);
+ if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
+ continue;
+
+ tname = btf_name_by_offset(btf, t->name_off);
+ if (!strcmp(tname, ".data..percpu"))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
/* replace pseudo btf_id with kernel symbol address */
static int check_pseudo_btf_id(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_insn_aux_data *aux)
{
- u32 datasec_id, type, id = insn->imm;
const struct btf_var_secinfo *vsi;
const struct btf_type *datasec;
+ struct btf_mod_pair *btf_mod;
const struct btf_type *t;
const char *sym_name;
bool percpu = false;
+ u32 type, id = insn->imm;
+ struct btf *btf;
+ s32 datasec_id;
u64 addr;
- int i;
+ int i, btf_fd, err;
- if (!btf_vmlinux) {
- verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
- return -EINVAL;
- }
-
- if (insn[1].imm != 0) {
- verbose(env, "reserved field (insn[1].imm) is used in pseudo_btf_id ldimm64 insn.\n");
- return -EINVAL;
+ btf_fd = insn[1].imm;
+ if (btf_fd) {
+ btf = btf_get_by_fd(btf_fd);
+ if (IS_ERR(btf)) {
+ verbose(env, "invalid module BTF object FD specified.\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!btf_vmlinux) {
+ verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
+ return -EINVAL;
+ }
+ btf = btf_vmlinux;
+ btf_get(btf);
}
- t = btf_type_by_id(btf_vmlinux, id);
+ t = btf_type_by_id(btf, id);
if (!t) {
verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
- return -ENOENT;
+ err = -ENOENT;
+ goto err_put;
}
if (!btf_type_is_var(t)) {
- verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n",
- id);
- return -EINVAL;
+ verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
+ err = -EINVAL;
+ goto err_put;
}
- sym_name = btf_name_by_offset(btf_vmlinux, t->name_off);
+ sym_name = btf_name_by_offset(btf, t->name_off);
addr = kallsyms_lookup_name(sym_name);
if (!addr) {
verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
sym_name);
- return -ENOENT;
+ err = -ENOENT;
+ goto err_put;
}
- datasec_id = btf_find_by_name_kind(btf_vmlinux, ".data..percpu",
- BTF_KIND_DATASEC);
+ datasec_id = find_btf_percpu_datasec(btf);
if (datasec_id > 0) {
- datasec = btf_type_by_id(btf_vmlinux, datasec_id);
+ datasec = btf_type_by_id(btf, datasec_id);
for_each_vsi(i, datasec, vsi) {
if (vsi->type == id) {
percpu = true;
@@ -9627,9 +10273,10 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
insn[1].imm = addr >> 32;
type = t->type;
- t = btf_type_skip_modifiers(btf_vmlinux, type, NULL);
+ t = btf_type_skip_modifiers(btf, type, NULL);
if (percpu) {
aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
+ aux->btf_var.btf = btf;
aux->btf_var.btf_id = type;
} else if (!btf_type_is_struct(t)) {
const struct btf_type *ret;
@@ -9637,20 +10284,54 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
u32 tsize;
/* resolve the type size of ksym. */
- ret = btf_resolve_size(btf_vmlinux, t, &tsize);
+ ret = btf_resolve_size(btf, t, &tsize);
if (IS_ERR(ret)) {
- tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ tname = btf_name_by_offset(btf, t->name_off);
verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
tname, PTR_ERR(ret));
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put;
}
aux->btf_var.reg_type = PTR_TO_MEM;
aux->btf_var.mem_size = tsize;
} else {
aux->btf_var.reg_type = PTR_TO_BTF_ID;
+ aux->btf_var.btf = btf;
aux->btf_var.btf_id = type;
}
+
+ /* check whether we recorded this BTF (and maybe module) already */
+ for (i = 0; i < env->used_btf_cnt; i++) {
+ if (env->used_btfs[i].btf == btf) {
+ btf_put(btf);
+ return 0;
+ }
+ }
+
+ if (env->used_btf_cnt >= MAX_USED_BTFS) {
+ err = -E2BIG;
+ goto err_put;
+ }
+
+ btf_mod = &env->used_btfs[env->used_btf_cnt];
+ btf_mod->btf = btf;
+ btf_mod->module = NULL;
+
+ /* if we reference variables from kernel module, bump its refcount */
+ if (btf_is_module(btf)) {
+ btf_mod->module = btf_try_get_module(btf);
+ if (!btf_mod->module) {
+ err = -ENXIO;
+ goto err_put;
+ }
+ }
+
+ env->used_btf_cnt++;
+
return 0;
+err_put:
+ btf_put(btf);
+ return err;
}
static int check_map_prealloc(struct bpf_map *map)
@@ -9719,11 +10400,21 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
}
- if ((is_tracing_prog_type(prog_type) ||
- prog_type == BPF_PROG_TYPE_SOCKET_FILTER) &&
- map_value_has_spin_lock(map)) {
- verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
- return -EINVAL;
+ if (map_value_has_spin_lock(map)) {
+ if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
+ verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
+ return -EINVAL;
+ }
+
+ if (is_tracing_prog_type(prog_type)) {
+ verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
+ return -EINVAL;
+ }
+
+ if (prog->aux->sleepable) {
+ verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
+ return -EINVAL;
+ }
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
@@ -9742,15 +10433,22 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_LRU_HASH:
case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
if (!is_preallocated_map(map)) {
verbose(env,
- "Sleepable programs can only use preallocated hash maps\n");
+ "Sleepable programs can only use preallocated maps\n");
return -EINVAL;
}
break;
+ case BPF_MAP_TYPE_RINGBUF:
+ break;
default:
verbose(env,
- "Sleepable programs can only use array and hash maps\n");
+ "Sleepable programs can only use array, hash, and ringbuf maps\n");
return -EINVAL;
}
@@ -9787,13 +10485,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (BPF_CLASS(insn->code) == BPF_STX &&
- ((BPF_MODE(insn->code) != BPF_MEM &&
- BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
- verbose(env, "BPF_STX uses reserved fields\n");
- return -EINVAL;
- }
-
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_insn_aux_data *aux;
struct bpf_map *map;
@@ -9937,6 +10628,13 @@ static void release_maps(struct bpf_verifier_env *env)
env->used_map_cnt);
}
+/* drop refcnt of maps used by the rejected program */
+static void release_btfs(struct bpf_verifier_env *env)
+{
+ __bpf_free_used_btfs(env->prog->aux, env->used_btfs,
+ env->used_btf_cnt);
+}
+
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
@@ -10308,6 +11006,7 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
for (i = 0; i < len; i++) {
int adj_idx = i + delta;
struct bpf_insn insn;
+ u8 load_reg;
insn = insns[adj_idx];
if (!aux[adj_idx].zext_dst) {
@@ -10350,9 +11049,27 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
if (!bpf_jit_needs_zext())
continue;
+ /* zext_dst means that we want to zero-extend whatever register
+ * the insn defines, which is dst_reg most of the time, with
+ * the notable exception of BPF_STX + BPF_ATOMIC + BPF_FETCH.
+ */
+ if (BPF_CLASS(insn.code) == BPF_STX &&
+ BPF_MODE(insn.code) == BPF_ATOMIC) {
+ /* BPF_STX + BPF_ATOMIC insns without BPF_FETCH do not
+ * define any registers, therefore zext_dst cannot be
+ * set.
+ */
+ if (WARN_ON(!(insn.imm & BPF_FETCH)))
+ return -EINVAL;
+ load_reg = insn.imm == BPF_CMPXCHG ? BPF_REG_0
+ : insn.src_reg;
+ } else {
+ load_reg = insn.dst_reg;
+ }
+
zext_patch[0] = insn;
- zext_patch[1].dst_reg = insn.dst_reg;
- zext_patch[1].src_reg = insn.dst_reg;
+ zext_patch[1].dst_reg = load_reg;
+ zext_patch[1].src_reg = load_reg;
patch = zext_patch;
patch_len = 2;
apply_patch_buffer:
@@ -10568,8 +11285,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
return 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
- if (insn->code != (BPF_JMP | BPF_CALL) ||
- insn->src_reg != BPF_PSEUDO_CALL)
+ if (!bpf_pseudo_call(insn))
continue;
/* Upon error here we cannot fall back to interpreter but
* need a hard reject of the program. Thus -EFAULT is
@@ -10610,7 +11326,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
/* BPF_PROG_RUN doesn't call subprogs directly,
* hence main prog stats include the runtime of subprogs.
* subprogs don't have IDs and not reachable via prog_get_next_id
- * func[i]->aux->stats will never be accessed and stays NULL
+ * func[i]->stats will never be accessed and stays NULL
*/
func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
if (!func[i])
@@ -10698,8 +11414,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
for (i = 0; i < env->subprog_cnt; i++) {
insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) {
- if (insn->code != (BPF_JMP | BPF_CALL) ||
- insn->src_reg != BPF_PSEUDO_CALL)
+ if (!bpf_pseudo_call(insn))
continue;
subprog = insn->off;
insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
@@ -10744,8 +11459,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
* later look the same as if they were interpreted only.
*/
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
- if (insn->code != (BPF_JMP | BPF_CALL) ||
- insn->src_reg != BPF_PSEUDO_CALL)
+ if (!bpf_pseudo_call(insn))
continue;
insn->off = env->insn_aux_data[i].call_imm;
subprog = find_subprog(env, i + insn->off + 1);
@@ -10774,8 +11488,7 @@ out_undo_insn:
/* cleanup main prog to be interpreted */
prog->jit_requested = 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
- if (insn->code != (BPF_JMP | BPF_CALL) ||
- insn->src_reg != BPF_PSEUDO_CALL)
+ if (!bpf_pseudo_call(insn))
continue;
insn->off = 0;
insn->imm = env->insn_aux_data[i].call_imm;
@@ -10810,8 +11523,7 @@ static int fixup_call_args(struct bpf_verifier_env *env)
return -EINVAL;
}
for (i = 0; i < prog->len; i++, insn++) {
- if (insn->code != (BPF_JMP | BPF_CALL) ||
- insn->src_reg != BPF_PSEUDO_CALL)
+ if (!bpf_pseudo_call(insn))
continue;
depth = get_callee_stack_depth(env, insn, i);
if (depth < 0)
@@ -10848,30 +11560,30 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
- struct bpf_insn mask_and_div[] = {
- BPF_MOV32_REG(insn->src_reg, insn->src_reg),
- /* Rx div 0 -> 0 */
- BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
+ bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+ struct bpf_insn *patchlet;
+ struct bpf_insn chk_and_div[] = {
+ /* [R,W]x div 0 -> 0 */
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JNE | BPF_K, insn->src_reg,
+ 0, 2, 0),
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
*insn,
};
- struct bpf_insn mask_and_mod[] = {
- BPF_MOV32_REG(insn->src_reg, insn->src_reg),
- /* Rx mod 0 -> Rx */
- BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
+ struct bpf_insn chk_and_mod[] = {
+ /* [R,W]x mod 0 -> [R,W]x */
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JEQ | BPF_K, insn->src_reg,
+ 0, 1 + (is64 ? 0 : 1), 0),
*insn,
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
};
- struct bpf_insn *patchlet;
- if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
- insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
- patchlet = mask_and_div + (is64 ? 1 : 0);
- cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
- } else {
- patchlet = mask_and_mod + (is64 ? 1 : 0);
- cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
- }
+ patchlet = isdiv ? chk_and_div : chk_and_mod;
+ cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
+ ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
@@ -11276,6 +11988,13 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
mark_reg_known_zero(env, regs, i);
else if (regs[i].type == SCALAR_VALUE)
mark_reg_unknown(env, regs, i);
+ else if (regs[i].type == PTR_TO_MEM_OR_NULL) {
+ const u32 mem_size = regs[i].mem_size;
+
+ mark_reg_known_zero(env, regs, i);
+ regs[i].mem_size = mem_size;
+ regs[i].id = ++env->id_gen;
+ }
}
} else {
/* 1st arg to a function */
@@ -11454,20 +12173,6 @@ static int check_attach_modify_return(unsigned long addr, const char *func_name)
return -EINVAL;
}
-/* non exhaustive list of sleepable bpf_lsm_*() functions */
-BTF_SET_START(btf_sleepable_lsm_hooks)
-#ifdef CONFIG_BPF_LSM
-BTF_ID(func, bpf_lsm_bprm_committed_creds)
-#else
-BTF_ID_UNUSED
-#endif
-BTF_SET_END(btf_sleepable_lsm_hooks)
-
-static int check_sleepable_lsm_hook(u32 btf_id)
-{
- return btf_id_set_contains(&btf_sleepable_lsm_hooks, btf_id);
-}
-
/* list of non-sleepable functions that are otherwise on
* ALLOW_ERROR_INJECTION list
*/
@@ -11504,7 +12209,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
bpf_log(log, "Tracing programs must provide btf_id\n");
return -EINVAL;
}
- btf = tgt_prog ? tgt_prog->aux->btf : btf_vmlinux;
+ btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
if (!btf) {
bpf_log(log,
"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
@@ -11689,7 +12394,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
/* LSM progs check that they are attached to bpf_lsm_*() funcs.
* Only some of them are sleepable.
*/
- if (check_sleepable_lsm_hook(btf_id))
+ if (bpf_lsm_is_sleepable_hook(btf_id))
ret = 0;
break;
default:
@@ -11780,7 +12485,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
return ret;
}
- key = bpf_trampoline_compute_key(tgt_prog, btf_id);
+ key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
tr = bpf_trampoline_get(key, &tgt_info);
if (!tr)
return -ENOMEM;
@@ -11868,6 +12573,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->strict_alignment = false;
env->allow_ptr_leaks = bpf_allow_ptr_leaks();
+ env->allow_uninit_stack = bpf_allow_uninit_stack();
env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
env->bypass_spec_v1 = bpf_bypass_spec_v1();
env->bypass_spec_v4 = bpf_bypass_spec_v4();
@@ -11963,7 +12669,10 @@ skip_full_check:
goto err_release_maps;
}
- if (ret == 0 && env->used_map_cnt) {
+ if (ret)
+ goto err_release_maps;
+
+ if (env->used_map_cnt) {
/* if program passed verifier, update used_maps in bpf_prog_info */
env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
sizeof(env->used_maps[0]),
@@ -11977,15 +12686,29 @@ skip_full_check:
memcpy(env->prog->aux->used_maps, env->used_maps,
sizeof(env->used_maps[0]) * env->used_map_cnt);
env->prog->aux->used_map_cnt = env->used_map_cnt;
+ }
+ if (env->used_btf_cnt) {
+ /* if program passed verifier, update used_btfs in bpf_prog_aux */
+ env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
+ sizeof(env->used_btfs[0]),
+ GFP_KERNEL);
+ if (!env->prog->aux->used_btfs) {
+ ret = -ENOMEM;
+ goto err_release_maps;
+ }
+ memcpy(env->prog->aux->used_btfs, env->used_btfs,
+ sizeof(env->used_btfs[0]) * env->used_btf_cnt);
+ env->prog->aux->used_btf_cnt = env->used_btf_cnt;
+ }
+ if (env->used_map_cnt || env->used_btf_cnt) {
/* program is valid. Convert pseudo bpf_ld_imm64 into generic
* bpf_ld_imm64 instructions
*/
convert_pseudo_ld_imm64(env);
}
- if (ret == 0)
- adjust_btf_func(env);
+ adjust_btf_func(env);
err_release_maps:
if (!env->prog->aux->used_maps)
@@ -11993,6 +12716,8 @@ err_release_maps:
* them now. Otherwise free_used_maps() will release them.
*/
release_maps(env);
+ if (!env->prog->aux->used_btfs)
+ release_btfs(env);
/* extension progs temporarily inherit the attach_type of their targets
for verification purposes, so set it back to zero before returning
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 191c329e482a..a5751784ad74 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -908,6 +908,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
if (opt == -ENOPARAM) {
if (strcmp(param->key, "source") == 0) {
+ if (fc->source)
+ return invalf(fc, "Multiple sources not supported");
fc->source = param->string;
param->string = NULL;
return 0;
@@ -915,6 +917,9 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
for_each_subsys(ss, i) {
if (strcmp(param->key, ss->legacy_name))
continue;
+ if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
+ return invalfc(fc, "Disabled controller '%s'",
+ param->key);
ctx->subsys_mask |= (1 << i);
return 0;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e41c21819ba0..1ea995f801ec 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -199,7 +199,7 @@ static u16 have_canfork_callback __read_mostly;
/* cgroup namespace for init task */
struct cgroup_namespace init_cgroup_ns = {
- .count = REFCOUNT_INIT(2),
+ .ns.count = REFCOUNT_INIT(2),
.user_ns = &init_user_ns,
.ns.ops = &cgroupns_operations,
.ns.inum = PROC_CGROUP_INIT_INO,
@@ -244,7 +244,7 @@ bool cgroup_ssid_enabled(int ssid)
*
* The default hierarchy is the v2 interface of cgroup and this function
* can be used to test whether a cgroup is on the default hierarchy for
- * cases where a subsystem should behave differnetly depending on the
+ * cases where a subsystem should behave differently depending on the
* interface version.
*
* List of changed behaviors:
@@ -262,7 +262,7 @@ bool cgroup_ssid_enabled(int ssid)
* "cgroup.procs" instead.
*
* - "cgroup.procs" is not sorted. pids will be unique unless they got
- * recycled inbetween reads.
+ * recycled in-between reads.
*
* - "release_agent" and "notify_on_release" are removed. Replacement
* notification mechanism will be implemented.
@@ -281,9 +281,6 @@ bool cgroup_ssid_enabled(int ssid)
* - cpuset: a task can be moved into an empty cpuset, and again it takes
* masks of ancestors.
*
- * - memcg: use_hierarchy is on by default and the cgroup file for the flag
- * is not created.
- *
* - blkcg: blk-throttle becomes properly hierarchical.
*
* - debug: disallowed on the default hierarchy.
@@ -345,7 +342,7 @@ static bool cgroup_is_mixable(struct cgroup *cgrp)
return !cgroup_parent(cgrp);
}
-/* can @cgrp become a thread root? should always be true for a thread root */
+/* can @cgrp become a thread root? Should always be true for a thread root */
static bool cgroup_can_be_thread_root(struct cgroup *cgrp)
{
/* mixables don't care */
@@ -530,7 +527,7 @@ static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
* the root css is returned, so this function always returns a valid css.
*
* The returned css is not guaranteed to be online, and therefore it is the
- * callers responsiblity to tryget a reference for it.
+ * callers responsibility to try get a reference for it.
*/
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
struct cgroup_subsys *ss)
@@ -702,7 +699,7 @@ EXPORT_SYMBOL_GPL(of_css);
; \
else
-/* walk live descendants in preorder */
+/* walk live descendants in pre order */
#define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
if (({ lockdep_assert_held(&cgroup_mutex); \
@@ -936,7 +933,7 @@ void put_css_set_locked(struct css_set *cset)
WARN_ON_ONCE(!list_empty(&cset->threaded_csets));
- /* This css_set is dead. unlink it and release cgroup and css refs */
+ /* This css_set is dead. Unlink it and release cgroup and css refs */
for_each_subsys(ss, ssid) {
list_del(&cset->e_cset_node[ssid]);
css_put(cset->subsys[ssid]);
@@ -1061,7 +1058,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
/*
* Build the set of subsystem state objects that we want to see in the
- * new css_set. while subsystems can change globally, the entries here
+ * new css_set. While subsystems can change globally, the entries here
* won't change, so no need for locking.
*/
for_each_subsys(ss, i) {
@@ -1151,7 +1148,7 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
/*
* Always add links to the tail of the lists so that the lists are
- * in choronological order.
+ * in chronological order.
*/
list_move_tail(&link->cset_link, &cgrp->cset_links);
list_add_tail(&link->cgrp_link, &cset->cgrp_links);
@@ -3567,6 +3564,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
{
struct psi_trigger *new;
struct cgroup *cgrp;
+ struct psi_group *psi;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
@@ -3575,7 +3573,8 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
cgroup_get(cgrp);
cgroup_kn_unlock(of->kn);
- new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
+ psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
+ new = psi_trigger_create(psi, buf, nbytes, res);
if (IS_ERR(new)) {
cgroup_put(cgrp);
return PTR_ERR(new);
@@ -3657,7 +3656,7 @@ static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
static int cgroup_file_open(struct kernfs_open_file *of)
{
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
if (cft->open)
return cft->open(of);
@@ -3666,7 +3665,7 @@ static int cgroup_file_open(struct kernfs_open_file *of)
static void cgroup_file_release(struct kernfs_open_file *of)
{
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
if (cft->release)
cft->release(of);
@@ -3677,7 +3676,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
{
struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
struct cgroup *cgrp = of->kn->parent->priv;
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
struct cgroup_subsys_state *css;
int ret;
@@ -3727,7 +3726,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
{
- struct cftype *cft = of->kn->priv;
+ struct cftype *cft = of_cft(of);
if (cft->poll)
return cft->poll(of, pt);
@@ -4137,7 +4136,7 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
* implies that if we observe !CSS_RELEASED on @pos in this RCU
* critical section, the one pointed to by its next pointer is
* guaranteed to not have finished its RCU grace period even if we
- * have dropped rcu_read_lock() inbetween iterations.
+ * have dropped rcu_read_lock() in-between iterations.
*
* If @pos has CSS_RELEASED set, its next pointer can't be
* dereferenced; however, as each css is given a monotonically
@@ -4385,7 +4384,7 @@ static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
}
/**
- * css_task_iter_advance_css_set - advance a task itererator to the next css_set
+ * css_task_iter_advance_css_set - advance a task iterator to the next css_set
* @it: the iterator to advance
*
* Advance @it to the next css_set to walk.
@@ -5152,15 +5151,6 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
if (err)
goto err_list_del;
- if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
- cgroup_parent(parent)) {
- pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
- current->comm, current->pid, ss->name);
- if (!strcmp(ss->name, "memory"))
- pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
- ss->warned_broken_hierarchy = true;
- }
-
return css;
err_list_del:
@@ -6320,7 +6310,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
*
* Find the cgroup at @path on the default hierarchy, increment its
* reference count and return it. Returns pointer to the found cgroup on
- * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
+ * success, ERR_PTR(-ENOENT) if @path doesn't exist and ERR_PTR(-ENOTDIR)
* if @path points to a non-directory.
*/
struct cgroup *cgroup_get_from_path(const char *path)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 57b5b5d0a5fd..53c70c470a38 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -983,25 +983,48 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
*/
static void rebuild_sched_domains_locked(void)
{
+ struct cgroup_subsys_state *pos_css;
struct sched_domain_attr *attr;
cpumask_var_t *doms;
+ struct cpuset *cs;
int ndoms;
lockdep_assert_cpus_held();
percpu_rwsem_assert_held(&cpuset_rwsem);
/*
- * We have raced with CPU hotplug. Don't do anything to avoid
+ * If we have raced with CPU hotplug, return early to avoid
* passing doms with offlined cpu to partition_sched_domains().
- * Anyways, hotplug work item will rebuild sched domains.
+ * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
+ *
+ * With no CPUs in any subpartitions, top_cpuset's effective CPUs
+ * should be the same as the active CPUs, so checking only top_cpuset
+ * is enough to detect racing CPU offlines.
*/
if (!top_cpuset.nr_subparts_cpus &&
!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
return;
- if (top_cpuset.nr_subparts_cpus &&
- !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
- return;
+ /*
+ * With subpartition CPUs, however, the effective CPUs of a partition
+ * root should be only a subset of the active CPUs. Since a CPU in any
+ * partition root could be offlined, all must be checked.
+ */
+ if (top_cpuset.nr_subparts_cpus) {
+ rcu_read_lock();
+ cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
+ if (!is_partition_root(cs)) {
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+ }
+ if (!cpumask_subset(cs->effective_cpus,
+ cpu_active_mask)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ }
/* Generate domain masks and attrs */
ndoms = generate_sched_domains(&doms, &attr);
diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c
index 812a61afd538..f5e8828c109c 100644
--- a/kernel/cgroup/namespace.c
+++ b/kernel/cgroup/namespace.c
@@ -32,7 +32,7 @@ static struct cgroup_namespace *alloc_cgroup_ns(void)
kfree(new_ns);
return ERR_PTR(ret);
}
- refcount_set(&new_ns->count, 1);
+ refcount_set(&new_ns->ns.count, 1);
new_ns->ns.ops = &cgroupns_operations;
return new_ns;
}
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index 81e9af7dcec2..eb0029c9a6a6 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -81,7 +81,6 @@ CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
@@ -111,7 +110,6 @@ CONFIG_STRICT_KERNEL_RWX=y
CONFIG_SUSPEND_TIME=y
CONFIG_TABLET_USB_ACECAD=y
CONFIG_TABLET_USB_AIPTEK=y
-CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_TASKSTATS=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ff2578ecf17..1b6302ecbabe 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -330,6 +330,13 @@ void lockdep_assert_cpus_held(void)
percpu_rwsem_assert_held(&cpu_hotplug_lock);
}
+#ifdef CONFIG_LOCKDEP
+int lockdep_is_cpus_held(void)
+{
+ return percpu_rwsem_is_held(&cpu_hotplug_lock);
+}
+#endif
+
static void lockdep_acquire_cpus_lock(void)
{
rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
@@ -815,6 +822,10 @@ void __init cpuhp_threads_init(void)
}
#ifdef CONFIG_HOTPLUG_CPU
+#ifndef arch_clear_mm_cpumask_cpu
+#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
+#endif
+
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
@@ -850,7 +861,7 @@ void clear_tasks_mm_cpumask(int cpu)
t = find_lock_task_mm(p);
if (!t)
continue;
- cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
+ arch_clear_mm_cpumask_cpu(cpu, t->mm);
task_unlock(t);
}
rcu_read_unlock();
@@ -1602,7 +1613,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.name = "ap:online",
},
/*
- * Handled on controll processor until the plugged processor manages
+ * Handled on control processor until the plugged processor manages
* this itself.
*/
[CPUHP_TEARDOWN_CPU] = {
@@ -1611,6 +1622,13 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.teardown.single = takedown_cpu,
.cant_stop = true,
},
+
+ [CPUHP_AP_SCHED_WAIT_EMPTY] = {
+ .name = "sched:waitempty",
+ .startup.single = NULL,
+ .teardown.single = sched_cpu_wait_empty,
+ },
+
/* Handle smpboot threads park/unpark */
[CPUHP_AP_SMPBOOT_THREADS] = {
.name = "smpboot/threads:online",
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 106e4500fd53..825284baaf46 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -11,7 +11,7 @@
#include <asm/page.h>
#include <asm/sections.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
/* vmcoreinfo stuff */
unsigned char *vmcoreinfo_data;
@@ -447,6 +447,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_PAGESIZE(PAGE_SIZE);
VMCOREINFO_SYMBOL(init_uts_ns);
+ VMCOREINFO_OFFSET(uts_namespace, name);
VMCOREINFO_SYMBOL(node_online_map);
#ifdef CONFIG_MMU
VMCOREINFO_SYMBOL_ARRAY(swapper_pg_dir);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1e75a8923a8d..af6e8b4fb359 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -225,8 +225,6 @@ NOKPROBE_SYMBOL(kgdb_skipexception);
* Default (weak) implementation for kgdb_roundup_cpus
*/
-static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
-
void __weak kgdb_call_nmi_hook(void *ignored)
{
/*
@@ -241,6 +239,9 @@ void __weak kgdb_call_nmi_hook(void *ignored)
}
NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
+static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
+ CSD_INIT(kgdb_call_nmi_hook, NULL);
+
void __weak kgdb_roundup_cpus(void)
{
call_single_data_t *csd;
@@ -267,7 +268,6 @@ void __weak kgdb_roundup_cpus(void)
continue;
kgdb_info[cpu].rounding_up = true;
- csd->func = kgdb_call_nmi_hook;
ret = smp_call_function_single_async(cpu, csd);
if (ret)
kgdb_info[cpu].rounding_up = false;
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index c99de4a21458..479fc145acfc 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -20,6 +20,10 @@ config DMA_OPS
config DMA_OPS_BYPASS
bool
+# Lets platform IOMMU driver choose between bypass and IOMMU
+config ARCH_HAS_DMA_MAP_DIRECT
+ bool
+
config NEED_SG_DMA_LENGTH
bool
@@ -75,11 +79,6 @@ config ARCH_HAS_DMA_PREP_COHERENT
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool
-config DMA_VIRT_OPS
- bool
- depends on HAS_DMA
- select DMA_OPS
-
config SWIOTLB
bool
select NEED_DMA_MAP_STATE
@@ -225,3 +224,12 @@ config DMA_API_DEBUG_SG
is technically out-of-spec.
If unsure, say N.
+
+config DMA_MAP_BENCHMARK
+ bool "Enable benchmarking of streaming DMA mapping"
+ depends on DEBUG_FS
+ help
+ Provides /sys/kernel/debug/dma_map_benchmark that helps with testing
+ performance of dma_(un)map_page.
+
+ See tools/testing/selftests/dma/dma_map_benchmark.c
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index dc755ab68aab..0dd65ec1d234 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -5,8 +5,8 @@ obj-$(CONFIG_DMA_OPS) += ops_helpers.o
obj-$(CONFIG_DMA_OPS) += dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
-obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
obj-$(CONFIG_DMA_REMAP) += remap.o
+obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 16b95ff12e4d..3d63d91cba5c 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -20,7 +20,7 @@
* coders, etc.
*
* Such devices often require big memory buffers (a full HD frame
- * is, for instance, more then 2 mega pixels large, i.e. more than 6
+ * is, for instance, more than 2 mega pixels large, i.e. more than 6
* MB of memory), which makes mechanisms such as kmalloc() or
* alloc_page() ineffective.
*
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 06c111544f61..002268262c9a 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -547,4 +547,3 @@ int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
dev->dma_range_map = map;
return 0;
}
-EXPORT_SYMBOL_GPL(dma_direct_set_offset);
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
new file mode 100644
index 000000000000..da95df381483
--- /dev/null
+++ b/kernel/dma/map_benchmark.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Hisilicon Limited.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
+#define DMA_MAP_MAX_THREADS 1024
+#define DMA_MAP_MAX_SECONDS 300
+
+#define DMA_MAP_BIDIRECTIONAL 0
+#define DMA_MAP_TO_DEVICE 1
+#define DMA_MAP_FROM_DEVICE 2
+
+struct map_benchmark {
+ __u64 avg_map_100ns; /* average map latency in 100ns */
+ __u64 map_stddev; /* standard deviation of map latency */
+ __u64 avg_unmap_100ns; /* as above */
+ __u64 unmap_stddev;
+ __u32 threads; /* how many threads will do map/unmap in parallel */
+ __u32 seconds; /* how long the test will last */
+ __s32 node; /* which numa node this benchmark will run on */
+ __u32 dma_bits; /* DMA addressing capability */
+ __u32 dma_dir; /* DMA data direction */
+ __u8 expansion[84]; /* For future use */
+};
+
+struct map_benchmark_data {
+ struct map_benchmark bparam;
+ struct device *dev;
+ struct dentry *debugfs;
+ enum dma_data_direction dir;
+ atomic64_t sum_map_100ns;
+ atomic64_t sum_unmap_100ns;
+ atomic64_t sum_sq_map;
+ atomic64_t sum_sq_unmap;
+ atomic64_t loops;
+};
+
+static int map_benchmark_thread(void *data)
+{
+ void *buf;
+ dma_addr_t dma_addr;
+ struct map_benchmark_data *map = data;
+ int ret = 0;
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ u64 map_100ns, unmap_100ns, map_sq, unmap_sq;
+ ktime_t map_stime, map_etime, unmap_stime, unmap_etime;
+ ktime_t map_delta, unmap_delta;
+
+ /*
+ * for a non-coherent device, if we don't stain them in the
+ * cache, this will give an underestimate of the real-world
+ * overhead of BIDIRECTIONAL or TO_DEVICE mappings;
+ * 66 means evertything goes well! 66 is lucky.
+ */
+ if (map->dir != DMA_FROM_DEVICE)
+ memset(buf, 0x66, PAGE_SIZE);
+
+ map_stime = ktime_get();
+ dma_addr = dma_map_single(map->dev, buf, PAGE_SIZE, map->dir);
+ if (unlikely(dma_mapping_error(map->dev, dma_addr))) {
+ pr_err("dma_map_single failed on %s\n",
+ dev_name(map->dev));
+ ret = -ENOMEM;
+ goto out;
+ }
+ map_etime = ktime_get();
+ map_delta = ktime_sub(map_etime, map_stime);
+
+ unmap_stime = ktime_get();
+ dma_unmap_single(map->dev, dma_addr, PAGE_SIZE, map->dir);
+ unmap_etime = ktime_get();
+ unmap_delta = ktime_sub(unmap_etime, unmap_stime);
+
+ /* calculate sum and sum of squares */
+
+ map_100ns = div64_ul(map_delta, 100);
+ unmap_100ns = div64_ul(unmap_delta, 100);
+ map_sq = map_100ns * map_100ns;
+ unmap_sq = unmap_100ns * unmap_100ns;
+
+ atomic64_add(map_100ns, &map->sum_map_100ns);
+ atomic64_add(unmap_100ns, &map->sum_unmap_100ns);
+ atomic64_add(map_sq, &map->sum_sq_map);
+ atomic64_add(unmap_sq, &map->sum_sq_unmap);
+ atomic64_inc(&map->loops);
+ }
+
+out:
+ free_page((unsigned long)buf);
+ return ret;
+}
+
+static int do_map_benchmark(struct map_benchmark_data *map)
+{
+ struct task_struct **tsk;
+ int threads = map->bparam.threads;
+ int node = map->bparam.node;
+ const cpumask_t *cpu_mask = cpumask_of_node(node);
+ u64 loops;
+ int ret = 0;
+ int i;
+
+ tsk = kmalloc_array(threads, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk)
+ return -ENOMEM;
+
+ get_device(map->dev);
+
+ for (i = 0; i < threads; i++) {
+ tsk[i] = kthread_create_on_node(map_benchmark_thread, map,
+ map->bparam.node, "dma-map-benchmark/%d", i);
+ if (IS_ERR(tsk[i])) {
+ pr_err("create dma_map thread failed\n");
+ ret = PTR_ERR(tsk[i]);
+ goto out;
+ }
+
+ if (node != NUMA_NO_NODE)
+ kthread_bind_mask(tsk[i], cpu_mask);
+ }
+
+ /* clear the old value in the previous benchmark */
+ atomic64_set(&map->sum_map_100ns, 0);
+ atomic64_set(&map->sum_unmap_100ns, 0);
+ atomic64_set(&map->sum_sq_map, 0);
+ atomic64_set(&map->sum_sq_unmap, 0);
+ atomic64_set(&map->loops, 0);
+
+ for (i = 0; i < threads; i++) {
+ get_task_struct(tsk[i]);
+ wake_up_process(tsk[i]);
+ }
+
+ msleep_interruptible(map->bparam.seconds * 1000);
+
+ /* wait for the completion of benchmark threads */
+ for (i = 0; i < threads; i++) {
+ ret = kthread_stop(tsk[i]);
+ if (ret)
+ goto out;
+ }
+
+ loops = atomic64_read(&map->loops);
+ if (likely(loops > 0)) {
+ u64 map_variance, unmap_variance;
+ u64 sum_map = atomic64_read(&map->sum_map_100ns);
+ u64 sum_unmap = atomic64_read(&map->sum_unmap_100ns);
+ u64 sum_sq_map = atomic64_read(&map->sum_sq_map);
+ u64 sum_sq_unmap = atomic64_read(&map->sum_sq_unmap);
+
+ /* average latency */
+ map->bparam.avg_map_100ns = div64_u64(sum_map, loops);
+ map->bparam.avg_unmap_100ns = div64_u64(sum_unmap, loops);
+
+ /* standard deviation of latency */
+ map_variance = div64_u64(sum_sq_map, loops) -
+ map->bparam.avg_map_100ns *
+ map->bparam.avg_map_100ns;
+ unmap_variance = div64_u64(sum_sq_unmap, loops) -
+ map->bparam.avg_unmap_100ns *
+ map->bparam.avg_unmap_100ns;
+ map->bparam.map_stddev = int_sqrt64(map_variance);
+ map->bparam.unmap_stddev = int_sqrt64(unmap_variance);
+ }
+
+out:
+ for (i = 0; i < threads; i++)
+ put_task_struct(tsk[i]);
+ put_device(map->dev);
+ kfree(tsk);
+ return ret;
+}
+
+static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct map_benchmark_data *map = file->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 old_dma_mask;
+
+ int ret;
+
+ if (copy_from_user(&map->bparam, argp, sizeof(map->bparam)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case DMA_MAP_BENCHMARK:
+ if (map->bparam.threads == 0 ||
+ map->bparam.threads > DMA_MAP_MAX_THREADS) {
+ pr_err("invalid thread number\n");
+ return -EINVAL;
+ }
+
+ if (map->bparam.seconds == 0 ||
+ map->bparam.seconds > DMA_MAP_MAX_SECONDS) {
+ pr_err("invalid duration seconds\n");
+ return -EINVAL;
+ }
+
+ if (map->bparam.node != NUMA_NO_NODE &&
+ !node_possible(map->bparam.node)) {
+ pr_err("invalid numa node\n");
+ return -EINVAL;
+ }
+
+ switch (map->bparam.dma_dir) {
+ case DMA_MAP_BIDIRECTIONAL:
+ map->dir = DMA_BIDIRECTIONAL;
+ break;
+ case DMA_MAP_FROM_DEVICE:
+ map->dir = DMA_FROM_DEVICE;
+ break;
+ case DMA_MAP_TO_DEVICE:
+ map->dir = DMA_TO_DEVICE;
+ break;
+ default:
+ pr_err("invalid DMA direction\n");
+ return -EINVAL;
+ }
+
+ old_dma_mask = dma_get_mask(map->dev);
+
+ ret = dma_set_mask(map->dev,
+ DMA_BIT_MASK(map->bparam.dma_bits));
+ if (ret) {
+ pr_err("failed to set dma_mask on device %s\n",
+ dev_name(map->dev));
+ return -EINVAL;
+ }
+
+ ret = do_map_benchmark(map);
+
+ /*
+ * restore the original dma_mask as many devices' dma_mask are
+ * set by architectures, acpi, busses. When we bind them back
+ * to their original drivers, those drivers shouldn't see
+ * dma_mask changed by benchmark
+ */
+ dma_set_mask(map->dev, old_dma_mask);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (copy_to_user(argp, &map->bparam, sizeof(map->bparam)))
+ return -EFAULT;
+
+ return ret;
+}
+
+static const struct file_operations map_benchmark_fops = {
+ .open = simple_open,
+ .unlocked_ioctl = map_benchmark_ioctl,
+};
+
+static void map_benchmark_remove_debugfs(void *data)
+{
+ struct map_benchmark_data *map = (struct map_benchmark_data *)data;
+
+ debugfs_remove(map->debugfs);
+}
+
+static int __map_benchmark_probe(struct device *dev)
+{
+ struct dentry *entry;
+ struct map_benchmark_data *map;
+ int ret;
+
+ map = devm_kzalloc(dev, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+ map->dev = dev;
+
+ ret = devm_add_action(dev, map_benchmark_remove_debugfs, map);
+ if (ret) {
+ pr_err("Can't add debugfs remove action\n");
+ return ret;
+ }
+
+ /*
+ * we only permit a device bound with this driver, 2nd probe
+ * will fail
+ */
+ entry = debugfs_create_file("dma_map_benchmark", 0600, NULL, map,
+ &map_benchmark_fops);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ map->debugfs = entry;
+
+ return 0;
+}
+
+static int map_benchmark_platform_probe(struct platform_device *pdev)
+{
+ return __map_benchmark_probe(&pdev->dev);
+}
+
+static struct platform_driver map_benchmark_platform_driver = {
+ .driver = {
+ .name = "dma_map_benchmark",
+ },
+ .probe = map_benchmark_platform_probe,
+};
+
+static int
+map_benchmark_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return __map_benchmark_probe(&pdev->dev);
+}
+
+static struct pci_driver map_benchmark_pci_driver = {
+ .name = "dma_map_benchmark",
+ .probe = map_benchmark_pci_probe,
+};
+
+static int __init map_benchmark_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&map_benchmark_pci_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&map_benchmark_platform_driver);
+ if (ret) {
+ pci_unregister_driver(&map_benchmark_pci_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit map_benchmark_cleanup(void)
+{
+ platform_driver_unregister(&map_benchmark_platform_driver);
+ pci_unregister_driver(&map_benchmark_pci_driver);
+}
+
+module_init(map_benchmark_init);
+module_exit(map_benchmark_cleanup);
+
+MODULE_AUTHOR("Barry Song <song.bao.hua@hisilicon.com>");
+MODULE_DESCRIPTION("dma_map benchmark driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 51bb8fa8eb89..f87a89d08654 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -149,7 +149,8 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
if (WARN_ON_ONCE(!dev->dma_mask))
return DMA_MAPPING_ERROR;
- if (dma_map_direct(dev, ops))
+ if (dma_map_direct(dev, ops) ||
+ arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
@@ -165,7 +166,8 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (dma_map_direct(dev, ops))
+ if (dma_map_direct(dev, ops) ||
+ arch_dma_unmap_page_direct(dev, addr + size))
dma_direct_unmap_page(dev, addr, size, dir, attrs);
else if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
@@ -188,7 +190,8 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
if (WARN_ON_ONCE(!dev->dma_mask))
return 0;
- if (dma_map_direct(dev, ops))
+ if (dma_map_direct(dev, ops) ||
+ arch_dma_map_sg_direct(dev, sg, nents))
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
else
ents = ops->map_sg(dev, sg, nents, dir, attrs);
@@ -207,7 +210,8 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
- if (dma_map_direct(dev, ops))
+ if (dma_map_direct(dev, ops) ||
+ arch_dma_unmap_sg_direct(dev, sg, nents))
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index d4637f72239b..5f84e6cdb78e 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -38,9 +38,6 @@ static void __init dma_atomic_pool_debugfs_init(void)
struct dentry *root;
root = debugfs_create_dir("dma_pools", NULL);
- if (IS_ERR_OR_NULL(root))
- return;
-
debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index b4eea0abc3f0..7c42df6e6100 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -152,8 +152,6 @@ void swiotlb_set_max_segment(unsigned int val)
max_segment = rounddown(val, PAGE_SIZE);
}
-/* default to 64MB */
-#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
unsigned long size;
@@ -163,6 +161,24 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
+void __init swiotlb_adjust_size(unsigned long new_size)
+{
+ unsigned long size;
+
+ /*
+ * If swiotlb parameter has not been specified, give a chance to
+ * architectures such as those supporting memory encryption to
+ * adjust/expand SWIOTLB size for their use.
+ */
+ if (!io_tlb_nslabs) {
+ size = ALIGN(new_size, 1 << IO_TLB_SHIFT);
+ io_tlb_nslabs = size >> IO_TLB_SHIFT;
+ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+
+ pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
+ }
+}
+
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
@@ -229,6 +245,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
+ no_iotlb_memory = false;
if (verbose)
swiotlb_print_info();
@@ -260,9 +277,11 @@ swiotlb_init(int verbose)
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
- if (io_tlb_start)
+ if (io_tlb_start) {
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+ io_tlb_start = 0;
+ }
pr_warn("Cannot allocate buffer");
no_iotlb_memory = true;
}
@@ -360,6 +379,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
+ no_iotlb_memory = false;
swiotlb_print_info();
@@ -441,14 +461,11 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
}
}
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t orig_addr,
- size_t mapping_size,
- size_t alloc_size,
- enum dma_data_direction dir,
- unsigned long attrs)
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
+ size_t mapping_size, size_t alloc_size,
+ enum dma_data_direction dir, unsigned long attrs)
{
+ dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start);
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
@@ -667,9 +684,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
swiotlb_force);
- swiotlb_addr = swiotlb_tbl_map_single(dev,
- phys_to_dma_unencrypted(dev, io_tlb_start),
- paddr, size, size, dir, attrs);
+ swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
+ attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
diff --git a/kernel/dma/virt.c b/kernel/dma/virt.c
deleted file mode 100644
index 59d32317dd57..000000000000
--- a/kernel/dma/virt.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DMA operations that map to virtual addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-map-ops.h>
-#include <linux/scatterlist.h>
-
-static void *dma_virt_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(gfp | __GFP_ZERO, get_order(size));
- if (ret)
- *dma_handle = (uintptr_t)ret;
- return ret;
-}
-
-static void dma_virt_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return (uintptr_t)(page_address(page) + offset);
-}
-
-static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- BUG_ON(!sg_page(sg));
- sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-const struct dma_map_ops dma_virt_ops = {
- .alloc = dma_virt_alloc,
- .free = dma_virt_free,
- .map_page = dma_virt_map_page,
- .map_sg = dma_virt_map_sg,
- .alloc_pages = dma_common_alloc_pages,
- .free_pages = dma_common_free_pages,
-};
-EXPORT_SYMBOL(dma_virt_ops);
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
deleted file mode 100644
index 57fb4dcff434..000000000000
--- a/kernel/elfcore.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/elf.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/binfmts.h>
-#include <linux/elfcore.h>
-
-Elf_Half __weak elf_core_extra_phdrs(void)
-{
- return 0;
-}
-
-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
-{
- return 1;
-}
-
-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
-{
- return 1;
-}
-
-size_t __weak elf_core_extra_data_size(void)
-{
- return 0;
-}
diff --git a/kernel/entry/Makefile b/kernel/entry/Makefile
index 34c8a3f1c735..095c775e001e 100644
--- a/kernel/entry/Makefile
+++ b/kernel/entry/Makefile
@@ -9,5 +9,5 @@ KCOV_INSTRUMENT := n
CFLAGS_REMOVE_common.o = -fstack-protector -fstack-protector-strong
CFLAGS_common.o += -fno-stack-protector
-obj-$(CONFIG_GENERIC_ENTRY) += common.o
+obj-$(CONFIG_GENERIC_ENTRY) += common.o syscall_user_dispatch.o
obj-$(CONFIG_KVM_XFER_TO_GUEST_WORK) += kvm.o
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 2b8366693d5c..8442e5c9cfa2 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -2,23 +2,17 @@
#include <linux/context_tracking.h>
#include <linux/entry-common.h>
+#include <linux/highmem.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
+#include "common.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
-/**
- * enter_from_user_mode - Establish state when coming from user mode
- *
- * Syscall/interrupt entry disables interrupts, but user mode is traced as
- * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
- *
- * 1) Tell lockdep that interrupts are disabled
- * 2) Invoke context tracking if enabled to reactivate RCU
- * 3) Trace interrupts off state
- */
-static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+/* See comment for enter_from_user_mode() in entry-common.h */
+static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
{
arch_check_user_regs(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
@@ -31,6 +25,11 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
instrumentation_end();
}
+void noinstr enter_from_user_mode(struct pt_regs *regs)
+{
+ __enter_from_user_mode(regs);
+}
+
static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
{
if (unlikely(audit_context())) {
@@ -42,19 +41,29 @@ static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
}
static long syscall_trace_enter(struct pt_regs *regs, long syscall,
- unsigned long ti_work)
+ unsigned long work)
{
long ret = 0;
+ /*
+ * Handle Syscall User Dispatch. This must comes first, since
+ * the ABI here can be something that doesn't make sense for
+ * other syscall_work features.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (syscall_user_dispatch(regs))
+ return -1L;
+ }
+
/* Handle ptrace */
- if (ti_work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
+ if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
ret = arch_syscall_enter_tracehook(regs);
- if (ret || (ti_work & _TIF_SYSCALL_EMU))
+ if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
return -1L;
}
/* Do seccomp after ptrace, to catch any tracer changes. */
- if (ti_work & _TIF_SECCOMP) {
+ if (work & SYSCALL_WORK_SECCOMP) {
ret = __secure_computing(NULL);
if (ret == -1L)
return ret;
@@ -63,7 +72,7 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
/* Either of the above might have changed the syscall number */
syscall = syscall_get_nr(current, regs);
- if (unlikely(ti_work & _TIF_SYSCALL_TRACEPOINT))
+ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall);
syscall_enter_audit(regs, syscall);
@@ -74,11 +83,10 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
static __always_inline long
__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
{
- unsigned long ti_work;
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
- ti_work = READ_ONCE(current_thread_info()->flags);
- if (ti_work & SYSCALL_ENTER_WORK)
- syscall = syscall_trace_enter(regs, syscall, ti_work);
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
return syscall;
}
@@ -92,7 +100,7 @@ noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
{
long ret;
- enter_from_user_mode(regs);
+ __enter_from_user_mode(regs);
instrumentation_begin();
local_irq_enable();
@@ -104,25 +112,14 @@ noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
{
- enter_from_user_mode(regs);
+ __enter_from_user_mode(regs);
instrumentation_begin();
local_irq_enable();
instrumentation_end();
}
-/**
- * exit_to_user_mode - Fixup state when exiting to user mode
- *
- * Syscall/interupt exit enables interrupts, but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about it.
- *
- * 1) Trace interrupts on state
- * 2) Invoke context tracking if enabled to adjust RCU state
- * 3) Invoke architecture specific last minute exit code, e.g. speculation
- * mitigations, etc.
- * 4) Tell lockdep that interrupts are enabled
- */
-static __always_inline void exit_to_user_mode(void)
+/* See comment for exit_to_user_mode() in entry-common.h */
+static __always_inline void __exit_to_user_mode(void)
{
instrumentation_begin();
trace_hardirqs_on_prepare();
@@ -134,8 +131,21 @@ static __always_inline void exit_to_user_mode(void)
lockdep_hardirqs_on(CALLER_ADDR0);
}
+void noinstr exit_to_user_mode(void)
+{
+ __exit_to_user_mode();
+}
+
/* Workaround to allow gradual conversion of architecture code */
-void __weak arch_do_signal(struct pt_regs *regs) { }
+void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
+
+static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
+{
+ if (ti_work & _TIF_NOTIFY_SIGNAL)
+ tracehook_notify_signal();
+
+ arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
+}
static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
unsigned long ti_work)
@@ -157,8 +167,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
if (ti_work & _TIF_PATCH_PENDING)
klp_update_patch_state(current);
- if (ti_work & _TIF_SIGPENDING)
- arch_do_signal(regs);
+ if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ handle_signal_work(regs, ti_work);
if (ti_work & _TIF_NOTIFY_RESUME) {
tracehook_notify_resume(regs);
@@ -174,6 +184,10 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
* enabled above.
*/
local_irq_disable_exit_to_user();
+
+ /* Check if any of the above work has queued a deferred wakeup */
+ rcu_nocb_flush_deferred_wakeup();
+
ti_work = READ_ONCE(current_thread_info()->flags);
}
@@ -187,6 +201,9 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
lockdep_assert_irqs_disabled();
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ rcu_nocb_flush_deferred_wakeup();
+
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work);
@@ -194,40 +211,48 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
/* Ensure that the address limit is intact and no locks are held */
addr_limit_user_check();
+ kmap_assert_nomap();
lockdep_assert_irqs_disabled();
lockdep_sys_exit();
}
-#ifndef _TIF_SINGLESTEP
-static inline bool report_single_step(unsigned long ti_work)
-{
- return false;
-}
-#else
/*
- * If TIF_SYSCALL_EMU is set, then the only reason to report is when
- * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
+ * If SYSCALL_EMU is set, then the only reason to report is when
+ * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
* instruction has been already reported in syscall_enter_from_user_mode().
*/
-#define SYSEMU_STEP (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)
-
-static inline bool report_single_step(unsigned long ti_work)
+static inline bool report_single_step(unsigned long work)
{
- return (ti_work & SYSEMU_STEP) == _TIF_SINGLESTEP;
+ if (work & SYSCALL_WORK_SYSCALL_EMU)
+ return false;
+
+ return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
}
-#endif
-static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
+static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
{
bool step;
+ /*
+ * If the syscall was rolled back due to syscall user dispatching,
+ * then the tracers below are not invoked for the same reason as
+ * the entry side was not invoked in syscall_trace_enter(): The ABI
+ * of these syscalls is unknown.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (unlikely(current->syscall_dispatch.on_dispatch)) {
+ current->syscall_dispatch.on_dispatch = false;
+ return;
+ }
+ }
+
audit_syscall_exit(regs);
- if (ti_work & _TIF_SYSCALL_TRACEPOINT)
+ if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
trace_sys_exit(regs, syscall_get_return_value(current, regs));
- step = report_single_step(ti_work);
- if (step || ti_work & _TIF_SYSCALL_TRACE)
+ step = report_single_step(work);
+ if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
arch_syscall_exit_tracehook(regs, step);
}
@@ -237,7 +262,7 @@ static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
*/
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
{
- u32 cached_flags = READ_ONCE(current_thread_info()->flags);
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
unsigned long nr = syscall_get_nr(current, regs);
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
@@ -254,23 +279,33 @@ static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
* enabled, we want to run them exactly once per syscall exit with
* interrupts enabled.
*/
- if (unlikely(cached_flags & SYSCALL_EXIT_WORK))
- syscall_exit_work(regs, cached_flags);
+ if (unlikely(work & SYSCALL_WORK_EXIT))
+ syscall_exit_work(regs, work);
}
-__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
+static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
- instrumentation_begin();
syscall_exit_to_user_mode_prepare(regs);
local_irq_disable_exit_to_user();
exit_to_user_mode_prepare(regs);
+}
+
+void syscall_exit_to_user_mode_work(struct pt_regs *regs)
+{
+ __syscall_exit_to_user_mode_work(regs);
+}
+
+__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ __syscall_exit_to_user_mode_work(regs);
instrumentation_end();
- exit_to_user_mode();
+ __exit_to_user_mode();
}
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
{
- enter_from_user_mode(regs);
+ __enter_from_user_mode(regs);
}
noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
@@ -278,7 +313,7 @@ noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
instrumentation_begin();
exit_to_user_mode_prepare(regs);
instrumentation_end();
- exit_to_user_mode();
+ __exit_to_user_mode();
}
noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
@@ -296,7 +331,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
* If this entry hit the idle task invoke rcu_irq_enter() whether
* RCU is watching or not.
*
- * Interupts can nest when the first interrupt invokes softirq
+ * Interrupts can nest when the first interrupt invokes softirq
* processing on return which enables interrupts.
*
* Scheduler ticks in the idle task can mark quiescent state and
@@ -307,7 +342,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
* interrupt to invoke rcu_irq_enter(). If that nested interrupt is
* the tick then rcu_flavor_sched_clock_irq() would wrongfully
* assume that it is the first interupt and eventually claim
- * quiescient state and end grace periods prematurely.
+ * quiescent state and end grace periods prematurely.
*
* Unconditionally invoke rcu_irq_enter() so RCU state stays
* consistent.
@@ -319,7 +354,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
/*
* If RCU is not watching then the same careful
* sequence vs. lockdep and tracing is required
- * as in irq_enter_from_user_mode().
+ * as in irqentry_enter_from_user_mode().
*/
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter();
@@ -337,10 +372,10 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
* already contains a warning when RCU is not watching, so no point
* in having another one here.
*/
+ lockdep_hardirqs_off(CALLER_ADDR0);
instrumentation_begin();
rcu_irq_enter_check_tick();
- /* Use the combo lockdep/tracing function */
- trace_hardirqs_off();
+ trace_hardirqs_off_finish();
instrumentation_end();
return ret;
@@ -357,6 +392,9 @@ void irqentry_exit_cond_resched(void)
preempt_schedule_irq();
}
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
+#endif
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
{
@@ -383,8 +421,13 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
}
instrumentation_begin();
- if (IS_ENABLED(CONFIG_PREEMPTION))
+ if (IS_ENABLED(CONFIG_PREEMPTION)) {
+#ifdef CONFIG_PREEMT_DYNAMIC
+ static_call(irqentry_exit_cond_resched)();
+#else
irqentry_exit_cond_resched();
+#endif
+ }
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();
@@ -397,3 +440,39 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
rcu_irq_exit();
}
}
+
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
+{
+ irqentry_state_t irq_state;
+
+ irq_state.lockdep = lockdep_hardirqs_enabled();
+
+ __nmi_enter();
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirq_enter();
+ rcu_nmi_enter();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ ftrace_nmi_enter();
+ instrumentation_end();
+
+ return irq_state;
+}
+
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
+{
+ instrumentation_begin();
+ ftrace_nmi_exit();
+ if (irq_state.lockdep) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+ instrumentation_end();
+
+ rcu_nmi_exit();
+ lockdep_hardirq_exit();
+ if (irq_state.lockdep)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ __nmi_exit();
+}
diff --git a/kernel/entry/common.h b/kernel/entry/common.h
new file mode 100644
index 000000000000..f6e6d02f07fe
--- /dev/null
+++ b/kernel/entry/common.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _COMMON_H
+#define _COMMON_H
+
+bool syscall_user_dispatch(struct pt_regs *regs);
+
+#endif
diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c
index b6678a5e3cf6..49972ee99aff 100644
--- a/kernel/entry/kvm.c
+++ b/kernel/entry/kvm.c
@@ -8,6 +8,9 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
do {
int ret;
+ if (ti_work & _TIF_NOTIFY_SIGNAL)
+ tracehook_notify_signal();
+
if (ti_work & _TIF_SIGPENDING) {
kvm_handle_signal_exit(vcpu);
return -EINTR;
diff --git a/kernel/entry/syscall_user_dispatch.c b/kernel/entry/syscall_user_dispatch.c
new file mode 100644
index 000000000000..c240302f56e2
--- /dev/null
+++ b/kernel/entry/syscall_user_dispatch.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Collabora Ltd.
+ */
+#include <linux/sched.h>
+#include <linux/prctl.h>
+#include <linux/syscall_user_dispatch.h>
+#include <linux/uaccess.h>
+#include <linux/signal.h>
+#include <linux/elf.h>
+
+#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/syscall.h>
+
+#include "common.h"
+
+static void trigger_sigsys(struct pt_regs *regs)
+{
+ struct kernel_siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSYS;
+ info.si_code = SYS_USER_DISPATCH;
+ info.si_call_addr = (void __user *)KSTK_EIP(current);
+ info.si_errno = 0;
+ info.si_arch = syscall_get_arch(current);
+ info.si_syscall = syscall_get_nr(current, regs);
+
+ force_sig_info(&info);
+}
+
+bool syscall_user_dispatch(struct pt_regs *regs)
+{
+ struct syscall_user_dispatch *sd = &current->syscall_dispatch;
+ char state;
+
+ if (likely(instruction_pointer(regs) - sd->offset < sd->len))
+ return false;
+
+ if (unlikely(arch_syscall_is_vdso_sigreturn(regs)))
+ return false;
+
+ if (likely(sd->selector)) {
+ /*
+ * access_ok() is performed once, at prctl time, when
+ * the selector is loaded by userspace.
+ */
+ if (unlikely(__get_user(state, sd->selector)))
+ do_exit(SIGSEGV);
+
+ if (likely(state == SYSCALL_DISPATCH_FILTER_ALLOW))
+ return false;
+
+ if (state != SYSCALL_DISPATCH_FILTER_BLOCK)
+ do_exit(SIGSYS);
+ }
+
+ sd->on_dispatch = true;
+ syscall_rollback(current, regs);
+ trigger_sigsys(regs);
+
+ return true;
+}
+
+int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
+ unsigned long len, char __user *selector)
+{
+ switch (mode) {
+ case PR_SYS_DISPATCH_OFF:
+ if (offset || len || selector)
+ return -EINVAL;
+ break;
+ case PR_SYS_DISPATCH_ON:
+ /*
+ * Validate the direct dispatcher region just for basic
+ * sanity against overflow and a 0-sized dispatcher
+ * region. If the user is able to submit a syscall from
+ * an address, that address is obviously valid.
+ */
+ if (offset && offset + len <= offset)
+ return -EINVAL;
+
+ if (selector && !access_ok(selector, sizeof(*selector)))
+ return -EFAULT;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ current->syscall_dispatch.selector = selector;
+ current->syscall_dispatch.offset = offset;
+ current->syscall_dispatch.len = len;
+ current->syscall_dispatch.on_dispatch = false;
+
+ if (mode == PR_SYS_DISPATCH_ON)
+ set_syscall_work(SYSCALL_USER_DISPATCH);
+ else
+ clear_syscall_work(SYSCALL_USER_DISPATCH);
+
+ return 0;
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index da467e1dd49a..129dee540a8b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -51,6 +51,9 @@
#include <linux/proc_ns.h>
#include <linux/mount.h>
#include <linux/min_heap.h>
+#include <linux/highmem.h>
+#include <linux/pgtable.h>
+#include <linux/buildid.h>
#include "internal.h"
@@ -395,6 +398,7 @@ static atomic_t nr_ksymbol_events __read_mostly;
static atomic_t nr_bpf_events __read_mostly;
static atomic_t nr_cgroup_events __read_mostly;
static atomic_t nr_text_poke_events __read_mostly;
+static atomic_t nr_build_id_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
@@ -1325,7 +1329,7 @@ static void put_ctx(struct perf_event_context *ctx)
* function.
*
* Lock order:
- * exec_update_mutex
+ * exec_update_lock
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event::child_mutex;
@@ -1593,50 +1597,91 @@ static void perf_event_groups_init(struct perf_event_groups *groups)
groups->index = 0;
}
+static inline struct cgroup *event_cgroup(const struct perf_event *event)
+{
+ struct cgroup *cgroup = NULL;
+
+#ifdef CONFIG_CGROUP_PERF
+ if (event->cgrp)
+ cgroup = event->cgrp->css.cgroup;
+#endif
+
+ return cgroup;
+}
+
/*
* Compare function for event groups;
*
* Implements complex key that first sorts by CPU and then by virtual index
* which provides ordering when rotating groups for the same CPU.
*/
-static bool
-perf_event_groups_less(struct perf_event *left, struct perf_event *right)
+static __always_inline int
+perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup,
+ const u64 left_group_index, const struct perf_event *right)
{
- if (left->cpu < right->cpu)
- return true;
- if (left->cpu > right->cpu)
- return false;
+ if (left_cpu < right->cpu)
+ return -1;
+ if (left_cpu > right->cpu)
+ return 1;
#ifdef CONFIG_CGROUP_PERF
- if (left->cgrp != right->cgrp) {
- if (!left->cgrp || !left->cgrp->css.cgroup) {
- /*
- * Left has no cgroup but right does, no cgroups come
- * first.
- */
- return true;
- }
- if (!right->cgrp || !right->cgrp->css.cgroup) {
- /*
- * Right has no cgroup but left does, no cgroups come
- * first.
- */
- return false;
- }
- /* Two dissimilar cgroups, order by id. */
- if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id)
- return true;
+ {
+ const struct cgroup *right_cgroup = event_cgroup(right);
- return false;
+ if (left_cgroup != right_cgroup) {
+ if (!left_cgroup) {
+ /*
+ * Left has no cgroup but right does, no
+ * cgroups come first.
+ */
+ return -1;
+ }
+ if (!right_cgroup) {
+ /*
+ * Right has no cgroup but left does, no
+ * cgroups come first.
+ */
+ return 1;
+ }
+ /* Two dissimilar cgroups, order by id. */
+ if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
+ return -1;
+
+ return 1;
+ }
}
#endif
- if (left->group_index < right->group_index)
- return true;
- if (left->group_index > right->group_index)
- return false;
+ if (left_group_index < right->group_index)
+ return -1;
+ if (left_group_index > right->group_index)
+ return 1;
- return false;
+ return 0;
+}
+
+#define __node_2_pe(node) \
+ rb_entry((node), struct perf_event, group_node)
+
+static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
+{
+ struct perf_event *e = __node_2_pe(a);
+ return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index,
+ __node_2_pe(b)) < 0;
+}
+
+struct __group_key {
+ int cpu;
+ struct cgroup *cgroup;
+};
+
+static inline int __group_cmp(const void *key, const struct rb_node *node)
+{
+ const struct __group_key *a = key;
+ const struct perf_event *b = __node_2_pe(node);
+
+ /* partial/subtree match: @cpu, @cgroup; ignore: @group_index */
+ return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b);
}
/*
@@ -1648,27 +1693,9 @@ static void
perf_event_groups_insert(struct perf_event_groups *groups,
struct perf_event *event)
{
- struct perf_event *node_event;
- struct rb_node *parent;
- struct rb_node **node;
-
event->group_index = ++groups->index;
- node = &groups->tree.rb_node;
- parent = *node;
-
- while (*node) {
- parent = *node;
- node_event = container_of(*node, struct perf_event, group_node);
-
- if (perf_event_groups_less(event, node_event))
- node = &parent->rb_left;
- else
- node = &parent->rb_right;
- }
-
- rb_link_node(&event->group_node, parent, node);
- rb_insert_color(&event->group_node, &groups->tree);
+ rb_add(&event->group_node, &groups->tree, __group_less);
}
/*
@@ -1716,45 +1743,17 @@ static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
struct cgroup *cgrp)
{
- struct perf_event *node_event = NULL, *match = NULL;
- struct rb_node *node = groups->tree.rb_node;
-#ifdef CONFIG_CGROUP_PERF
- u64 node_cgrp_id, cgrp_id = 0;
-
- if (cgrp)
- cgrp_id = cgrp->kn->id;
-#endif
-
- while (node) {
- node_event = container_of(node, struct perf_event, group_node);
-
- if (cpu < node_event->cpu) {
- node = node->rb_left;
- continue;
- }
- if (cpu > node_event->cpu) {
- node = node->rb_right;
- continue;
- }
-#ifdef CONFIG_CGROUP_PERF
- node_cgrp_id = 0;
- if (node_event->cgrp && node_event->cgrp->css.cgroup)
- node_cgrp_id = node_event->cgrp->css.cgroup->kn->id;
+ struct __group_key key = {
+ .cpu = cpu,
+ .cgroup = cgrp,
+ };
+ struct rb_node *node;
- if (cgrp_id < node_cgrp_id) {
- node = node->rb_left;
- continue;
- }
- if (cgrp_id > node_cgrp_id) {
- node = node->rb_right;
- continue;
- }
-#endif
- match = node_event;
- node = node->rb_left;
- }
+ node = rb_find_first(&key, &groups->tree, __group_cmp);
+ if (node)
+ return __node_2_pe(node);
- return match;
+ return NULL;
}
/*
@@ -1763,27 +1762,17 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu,
static struct perf_event *
perf_event_groups_next(struct perf_event *event)
{
- struct perf_event *next;
-#ifdef CONFIG_CGROUP_PERF
- u64 curr_cgrp_id = 0;
- u64 next_cgrp_id = 0;
-#endif
-
- next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
- if (next == NULL || next->cpu != event->cpu)
- return NULL;
-
-#ifdef CONFIG_CGROUP_PERF
- if (event->cgrp && event->cgrp->css.cgroup)
- curr_cgrp_id = event->cgrp->css.cgroup->kn->id;
+ struct __group_key key = {
+ .cpu = event->cpu,
+ .cgroup = event_cgroup(event),
+ };
+ struct rb_node *next;
- if (next->cgrp && next->cgrp->css.cgroup)
- next_cgrp_id = next->cgrp->css.cgroup->kn->id;
+ next = rb_next_match(&key, &event->group_node, __group_cmp);
+ if (next)
+ return __node_2_pe(next);
- if (curr_cgrp_id != next_cgrp_id)
- return NULL;
-#endif
- return next;
+ return NULL;
}
/*
@@ -1877,8 +1866,8 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
if (sample_type & PERF_SAMPLE_PERIOD)
size += sizeof(data->period);
- if (sample_type & PERF_SAMPLE_WEIGHT)
- size += sizeof(data->weight);
+ if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
+ size += sizeof(data->weight.full);
if (sample_type & PERF_SAMPLE_READ)
size += event->read_size;
@@ -1895,6 +1884,12 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
if (sample_type & PERF_SAMPLE_CGROUP)
size += sizeof(data->cgroup);
+ if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
+ size += sizeof(data->data_page_size);
+
+ if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
+ size += sizeof(data->code_page_size);
+
event->header_size = size;
}
@@ -2312,9 +2307,6 @@ group_sched_out(struct perf_event *group_event,
event_sched_out(event, cpuctx, ctx);
perf_pmu_enable(ctx->pmu);
-
- if (group_event->attr.exclusive)
- cpuctx->exclusive = 0;
}
#define DETACH_GROUP 0x01UL
@@ -2583,11 +2575,8 @@ group_sched_in(struct perf_event *group_event,
pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
- if (event_sched_in(group_event, cpuctx, ctx)) {
- pmu->cancel_txn(pmu);
- perf_mux_hrtimer_restart(cpuctx);
- return -EAGAIN;
- }
+ if (event_sched_in(group_event, cpuctx, ctx))
+ goto error;
/*
* Schedule in siblings as one group (if any):
@@ -2616,10 +2605,8 @@ group_error:
}
event_sched_out(group_event, cpuctx, ctx);
+error:
pmu->cancel_txn(pmu);
-
- perf_mux_hrtimer_restart(cpuctx);
-
return -EAGAIN;
}
@@ -2645,7 +2632,7 @@ static int group_can_go_on(struct perf_event *event,
* If this group is exclusive and there are already
* events on the CPU, it can't go on.
*/
- if (event->attr.exclusive && cpuctx->active_oncpu)
+ if (event->attr.exclusive && !list_empty(get_event_list(event)))
return 0;
/*
* Otherwise, try to add it if all previous groups were able
@@ -3679,6 +3666,7 @@ static int merge_sched_in(struct perf_event *event, void *data)
*can_add_hw = 0;
ctx->rotate_necessary = 1;
+ perf_mux_hrtimer_restart(cpuctx);
}
return 0;
@@ -4672,6 +4660,8 @@ static void unaccount_event(struct perf_event *event)
dec = true;
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
+ if (event->attr.build_id)
+ atomic_dec(&nr_build_id_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.namespaces)
@@ -6374,14 +6364,13 @@ perf_output_sample_regs(struct perf_output_handle *handle,
}
static void perf_sample_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
} else if (!(current->flags & PF_KTHREAD)) {
- perf_get_regs_user(regs_user, regs, regs_user_copy);
+ perf_get_regs_user(regs_user, regs);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
@@ -6907,8 +6896,8 @@ void perf_output_sample(struct perf_output_handle *handle,
data->regs_user.regs);
}
- if (sample_type & PERF_SAMPLE_WEIGHT)
- perf_output_put(handle, data->weight);
+ if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
+ perf_output_put(handle, data->weight.full);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
@@ -6939,6 +6928,12 @@ void perf_output_sample(struct perf_output_handle *handle,
if (sample_type & PERF_SAMPLE_CGROUP)
perf_output_put(handle, data->cgroup);
+ if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
+ perf_output_put(handle, data->data_page_size);
+
+ if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
+ perf_output_put(handle, data->code_page_size);
+
if (sample_type & PERF_SAMPLE_AUX) {
perf_output_put(handle, data->aux_size);
@@ -6996,6 +6991,93 @@ static u64 perf_virt_to_phys(u64 virt)
return phys_addr;
}
+/*
+ * Return the pagetable size of a given virtual address.
+ */
+static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
+{
+ u64 size = 0;
+
+#ifdef CONFIG_HAVE_FAST_GUP
+ pgd_t *pgdp, pgd;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+
+ pgdp = pgd_offset(mm, addr);
+ pgd = READ_ONCE(*pgdp);
+ if (pgd_none(pgd))
+ return 0;
+
+ if (pgd_leaf(pgd))
+ return pgd_leaf_size(pgd);
+
+ p4dp = p4d_offset_lockless(pgdp, pgd, addr);
+ p4d = READ_ONCE(*p4dp);
+ if (!p4d_present(p4d))
+ return 0;
+
+ if (p4d_leaf(p4d))
+ return p4d_leaf_size(p4d);
+
+ pudp = pud_offset_lockless(p4dp, p4d, addr);
+ pud = READ_ONCE(*pudp);
+ if (!pud_present(pud))
+ return 0;
+
+ if (pud_leaf(pud))
+ return pud_leaf_size(pud);
+
+ pmdp = pmd_offset_lockless(pudp, pud, addr);
+ pmd = READ_ONCE(*pmdp);
+ if (!pmd_present(pmd))
+ return 0;
+
+ if (pmd_leaf(pmd))
+ return pmd_leaf_size(pmd);
+
+ ptep = pte_offset_map(&pmd, addr);
+ pte = ptep_get_lockless(ptep);
+ if (pte_present(pte))
+ size = pte_leaf_size(pte);
+ pte_unmap(ptep);
+#endif /* CONFIG_HAVE_FAST_GUP */
+
+ return size;
+}
+
+static u64 perf_get_page_size(unsigned long addr)
+{
+ struct mm_struct *mm;
+ unsigned long flags;
+ u64 size;
+
+ if (!addr)
+ return 0;
+
+ /*
+ * Software page-table walkers must disable IRQs,
+ * which prevents any tear down of the page tables.
+ */
+ local_irq_save(flags);
+
+ mm = current->mm;
+ if (!mm) {
+ /*
+ * For kernel threads and the like, use init_mm so that
+ * we can find kernel memory.
+ */
+ mm = &init_mm;
+ }
+
+ size = perf_get_pgtable_size(mm, addr);
+
+ local_irq_restore(flags);
+
+ return size;
+}
+
static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
struct perf_callchain_entry *
@@ -7031,7 +7113,7 @@ void perf_prepare_sample(struct perf_event_header *header,
__perf_event_header__init_id(header, data, event);
- if (sample_type & PERF_SAMPLE_IP)
+ if (sample_type & (PERF_SAMPLE_IP | PERF_SAMPLE_CODE_PAGE_SIZE))
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
@@ -7083,8 +7165,7 @@ void perf_prepare_sample(struct perf_event_header *header,
}
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
- perf_sample_regs_user(&data->regs_user, regs,
- &data->regs_user_copy);
+ perf_sample_regs_user(&data->regs_user, regs);
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
@@ -7151,6 +7232,17 @@ void perf_prepare_sample(struct perf_event_header *header,
}
#endif
+ /*
+ * PERF_DATA_PAGE_SIZE requires PERF_SAMPLE_ADDR. If the user doesn't
+ * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr,
+ * but the value will not dump to the userspace.
+ */
+ if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
+ data->data_page_size = perf_get_page_size(data->addr);
+
+ if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
+ data->code_page_size = perf_get_page_size(data->ip);
+
if (sample_type & PERF_SAMPLE_AUX) {
u64 size;
@@ -7186,6 +7278,7 @@ __perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs,
int (*output_begin)(struct perf_output_handle *,
+ struct perf_sample_data *,
struct perf_event *,
unsigned int))
{
@@ -7198,7 +7291,7 @@ __perf_event_output(struct perf_event *event,
perf_prepare_sample(&header, data, event, regs);
- err = output_begin(&handle, event, header.size);
+ err = output_begin(&handle, data, event, header.size);
if (err)
goto exit;
@@ -7264,7 +7357,7 @@ perf_event_read_event(struct perf_event *event,
int ret;
perf_event_header__init_id(&read_event.header, &sample, event);
- ret = perf_output_begin(&handle, event, read_event.header.size);
+ ret = perf_output_begin(&handle, &sample, event, read_event.header.size);
if (ret)
return;
@@ -7533,7 +7626,7 @@ static void perf_event_task_output(struct perf_event *event,
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
task_event->event_id.header.size);
if (ret)
goto out;
@@ -7636,7 +7729,7 @@ static void perf_event_comm_output(struct perf_event *event,
return;
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
comm_event->event_id.header.size);
if (ret)
@@ -7736,7 +7829,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
perf_event_header__init_id(&namespaces_event->event_id.header,
&sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
namespaces_event->event_id.header.size);
if (ret)
goto out;
@@ -7863,7 +7956,7 @@ static void perf_event_cgroup_output(struct perf_event *event, void *data)
perf_event_header__init_id(&cgroup_event->event_id.header,
&sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
cgroup_event->event_id.header.size);
if (ret)
goto out;
@@ -7942,6 +8035,8 @@ struct perf_mmap_event {
u64 ino;
u64 ino_generation;
u32 prot, flags;
+ u8 build_id[BUILD_ID_SIZE_MAX];
+ u32 build_id_size;
struct {
struct perf_event_header header;
@@ -7973,6 +8068,7 @@ static void perf_event_mmap_output(struct perf_event *event,
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
u32 type = mmap_event->event_id.header.type;
+ bool use_build_id;
int ret;
if (!perf_event_mmap_match(event, data))
@@ -7989,7 +8085,7 @@ static void perf_event_mmap_output(struct perf_event *event,
}
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
mmap_event->event_id.header.size);
if (ret)
goto out;
@@ -7997,13 +8093,25 @@ static void perf_event_mmap_output(struct perf_event *event,
mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);
+ use_build_id = event->attr.build_id && mmap_event->build_id_size;
+
+ if (event->attr.mmap2 && use_build_id)
+ mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
+
perf_output_put(&handle, mmap_event->event_id);
if (event->attr.mmap2) {
- perf_output_put(&handle, mmap_event->maj);
- perf_output_put(&handle, mmap_event->min);
- perf_output_put(&handle, mmap_event->ino);
- perf_output_put(&handle, mmap_event->ino_generation);
+ if (use_build_id) {
+ u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 };
+
+ __output_copy(&handle, size, 4);
+ __output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX);
+ } else {
+ perf_output_put(&handle, mmap_event->maj);
+ perf_output_put(&handle, mmap_event->min);
+ perf_output_put(&handle, mmap_event->ino);
+ perf_output_put(&handle, mmap_event->ino_generation);
+ }
perf_output_put(&handle, mmap_event->prot);
perf_output_put(&handle, mmap_event->flags);
}
@@ -8132,6 +8240,9 @@ got_name:
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
+ if (atomic_read(&nr_build_id_events))
+ build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size);
+
perf_iterate_sb(perf_event_mmap_output,
mmap_event,
NULL);
@@ -8299,7 +8410,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
int ret;
perf_event_header__init_id(&rec.header, &sample, event);
- ret = perf_output_begin(&handle, event, rec.header.size);
+ ret = perf_output_begin(&handle, &sample, event, rec.header.size);
if (ret)
return;
@@ -8333,7 +8444,7 @@ void perf_log_lost_samples(struct perf_event *event, u64 lost)
perf_event_header__init_id(&lost_samples_event.header, &sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
lost_samples_event.header.size);
if (ret)
return;
@@ -8388,7 +8499,7 @@ static void perf_event_switch_output(struct perf_event *event, void *data)
perf_event_header__init_id(&se->event_id.header, &sample, event);
- ret = perf_output_begin(&handle, event, se->event_id.header.size);
+ ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size);
if (ret)
return;
@@ -8463,7 +8574,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
perf_event_header__init_id(&throttle_event.header, &sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
throttle_event.header.size);
if (ret)
return;
@@ -8506,7 +8617,7 @@ static void perf_event_ksymbol_output(struct perf_event *event, void *data)
perf_event_header__init_id(&ksymbol_event->event_id.header,
&sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, &sample, event,
ksymbol_event->event_id.header.size);
if (ret)
return;
@@ -8596,7 +8707,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data)
perf_event_header__init_id(&bpf_event->event_id.header,
&sample, event);
- ret = perf_output_begin(&handle, event,
+ ret = perf_output_begin(&handle, data, event,
bpf_event->event_id.header.size);
if (ret)
return;
@@ -8705,7 +8816,8 @@ static void perf_event_text_poke_output(struct perf_event *event, void *data)
perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event);
- ret = perf_output_begin(&handle, event, text_poke_event->event_id.header.size);
+ ret = perf_output_begin(&handle, &sample, event,
+ text_poke_event->event_id.header.size);
if (ret)
return;
@@ -8786,7 +8898,7 @@ static void perf_log_itrace_start(struct perf_event *event)
rec.tid = perf_event_tid(event, current);
perf_event_header__init_id(&rec.header, &sample, event);
- ret = perf_output_begin(&handle, event, rec.header.size);
+ ret = perf_output_begin(&handle, &sample, event, rec.header.size);
if (ret)
return;
@@ -10085,6 +10197,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
int fpos = token == IF_SRC_FILE ? 2 : 1;
+ kfree(filename);
filename = match_strdup(&args[fpos]);
if (!filename) {
ret = -ENOMEM;
@@ -10131,16 +10244,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
*/
ret = -EOPNOTSUPP;
if (!event->ctx->task)
- goto fail_free_name;
+ goto fail;
/* look up the path and grab its inode */
ret = kern_path(filename, LOOKUP_FOLLOW,
&filter->path);
if (ret)
- goto fail_free_name;
-
- kfree(filename);
- filename = NULL;
+ goto fail;
ret = -EINVAL;
if (!filter->path.dentry ||
@@ -10160,13 +10270,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (state != IF_STATE_ACTION)
goto fail;
+ kfree(filename);
kfree(orig);
return 0;
-fail_free_name:
- kfree(filename);
fail:
+ kfree(filename);
free_filters_list(filters);
kfree(orig);
@@ -11069,6 +11179,8 @@ static void account_event(struct perf_event *event)
inc = true;
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
+ if (event->attr.build_id)
+ atomic_inc(&nr_build_id_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.namespaces)
@@ -11461,6 +11573,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->sample_type & PERF_SAMPLE_CGROUP)
return -EINVAL;
#endif
+ if ((attr->sample_type & PERF_SAMPLE_WEIGHT) &&
+ (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT))
+ return -EINVAL;
out:
return ret;
@@ -11729,24 +11844,6 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_task;
}
- if (task) {
- err = mutex_lock_interruptible(&task->signal->exec_update_mutex);
- if (err)
- goto err_task;
-
- /*
- * Preserve ptrace permission check for backwards compatibility.
- *
- * We must hold exec_update_mutex across this and any potential
- * perf_install_in_context() call for this new event to
- * serialize against exec() altering our credentials (and the
- * perf_event_exit_task() that could imply).
- */
- err = -EACCES;
- if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
- goto err_cred;
- }
-
if (flags & PERF_FLAG_PID_CGROUP)
cgroup_fd = pid;
@@ -11754,7 +11851,7 @@ SYSCALL_DEFINE5(perf_event_open,
NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
- goto err_cred;
+ goto err_task;
}
if (is_sampling_event(event)) {
@@ -11873,6 +11970,24 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context;
}
+ if (task) {
+ err = down_read_interruptible(&task->signal->exec_update_lock);
+ if (err)
+ goto err_file;
+
+ /*
+ * Preserve ptrace permission check for backwards compatibility.
+ *
+ * We must hold exec_update_lock across this and any potential
+ * perf_install_in_context() call for this new event to
+ * serialize against exec() altering our credentials (and the
+ * perf_event_exit_task() that could imply).
+ */
+ err = -EACCES;
+ if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+ goto err_cred;
+ }
+
if (move_group) {
gctx = __perf_event_ctx_lock_double(group_leader, ctx);
@@ -12026,7 +12141,7 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_unlock(&ctx->mutex);
if (task) {
- mutex_unlock(&task->signal->exec_update_mutex);
+ up_read(&task->signal->exec_update_lock);
put_task_struct(task);
}
@@ -12048,7 +12163,10 @@ err_locked:
if (move_group)
perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
-/* err_file: */
+err_cred:
+ if (task)
+ up_read(&task->signal->exec_update_lock);
+err_file:
fput(event_file);
err_context:
perf_unpin_context(ctx);
@@ -12060,9 +12178,6 @@ err_alloc:
*/
if (!event_file)
free_event(event);
-err_cred:
- if (task)
- mutex_unlock(&task->signal->exec_update_mutex);
err_task:
if (task)
put_task_struct(task);
@@ -12367,7 +12482,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
/*
* When a child task exits, feed back event values to parent events.
*
- * Can be called with exec_update_mutex held when called from
+ * Can be called with exec_update_lock held when called from
* setup_new_exec().
*/
void perf_event_exit_task(struct task_struct *child)
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index fcbf5616a441..228801e20788 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -205,16 +205,12 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
static inline int get_recursion_context(int *recursion)
{
- int rctx;
-
- if (unlikely(in_nmi()))
- rctx = 3;
- else if (in_irq())
- rctx = 2;
- else if (in_softirq())
- rctx = 1;
- else
- rctx = 0;
+ unsigned int pc = preempt_count();
+ unsigned char rctx = 0;
+
+ rctx += !!(pc & (NMI_MASK));
+ rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
if (recursion[rctx])
return -1;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 192b8abc6330..ef91ae75ca56 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -147,6 +147,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
static __always_inline int
__perf_output_begin(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event, unsigned int size,
bool backward)
{
@@ -237,18 +238,16 @@ __perf_output_begin(struct perf_output_handle *handle,
handle->size = (1UL << page_shift) - offset;
if (unlikely(have_lost)) {
- struct perf_sample_data sample_data;
-
lost_event.header.size = sizeof(lost_event);
lost_event.header.type = PERF_RECORD_LOST;
lost_event.header.misc = 0;
lost_event.id = event->id;
lost_event.lost = local_xchg(&rb->lost, 0);
- perf_event_header__init_id(&lost_event.header,
- &sample_data, event);
+ /* XXX mostly redundant; @data is already fully initializes */
+ perf_event_header__init_id(&lost_event.header, data, event);
perf_output_put(handle, lost_event);
- perf_event__output_id_sample(event, handle, &sample_data);
+ perf_event__output_id_sample(event, handle, data);
}
return 0;
@@ -263,22 +262,25 @@ out:
}
int perf_output_begin_forward(struct perf_output_handle *handle,
- struct perf_event *event, unsigned int size)
+ struct perf_sample_data *data,
+ struct perf_event *event, unsigned int size)
{
- return __perf_output_begin(handle, event, size, false);
+ return __perf_output_begin(handle, data, event, size, false);
}
int perf_output_begin_backward(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event, unsigned int size)
{
- return __perf_output_begin(handle, event, size, true);
+ return __perf_output_begin(handle, data, event, size, true);
}
int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event, unsigned int size)
{
- return __perf_output_begin(handle, event, size,
+ return __perf_output_begin(handle, data, event, size,
unlikely(is_write_backward(event)));
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 00b0358739ab..3ea7f8f92f1d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -613,41 +613,56 @@ static void put_uprobe(struct uprobe *uprobe)
}
}
-static int match_uprobe(struct uprobe *l, struct uprobe *r)
+static __always_inline
+int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
+ const struct uprobe *r)
{
- if (l->inode < r->inode)
+ if (l_inode < r->inode)
return -1;
- if (l->inode > r->inode)
+ if (l_inode > r->inode)
return 1;
- if (l->offset < r->offset)
+ if (l_offset < r->offset)
return -1;
- if (l->offset > r->offset)
+ if (l_offset > r->offset)
return 1;
return 0;
}
+#define __node_2_uprobe(node) \
+ rb_entry((node), struct uprobe, rb_node)
+
+struct __uprobe_key {
+ struct inode *inode;
+ loff_t offset;
+};
+
+static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
+{
+ const struct __uprobe_key *a = key;
+ return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
+}
+
+static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
+{
+ struct uprobe *u = __node_2_uprobe(a);
+ return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
+}
+
static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
{
- struct uprobe u = { .inode = inode, .offset = offset };
- struct rb_node *n = uprobes_tree.rb_node;
- struct uprobe *uprobe;
- int match;
+ struct __uprobe_key key = {
+ .inode = inode,
+ .offset = offset,
+ };
+ struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
- while (n) {
- uprobe = rb_entry(n, struct uprobe, rb_node);
- match = match_uprobe(&u, uprobe);
- if (!match)
- return get_uprobe(uprobe);
+ if (node)
+ return get_uprobe(__node_2_uprobe(node));
- if (match < 0)
- n = n->rb_left;
- else
- n = n->rb_right;
- }
return NULL;
}
@@ -668,32 +683,15 @@ static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
{
- struct rb_node **p = &uprobes_tree.rb_node;
- struct rb_node *parent = NULL;
- struct uprobe *u;
- int match;
+ struct rb_node *node;
- while (*p) {
- parent = *p;
- u = rb_entry(parent, struct uprobe, rb_node);
- match = match_uprobe(uprobe, u);
- if (!match)
- return get_uprobe(u);
+ node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
+ if (node)
+ return get_uprobe(__node_2_uprobe(node));
- if (match < 0)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
-
- }
-
- u = NULL;
- rb_link_node(&uprobe->rb_node, parent, p);
- rb_insert_color(&uprobe->rb_node, &uprobes_tree);
/* get access + creation ref */
refcount_set(&uprobe->ref, 2);
-
- return u;
+ return NULL;
}
/*
@@ -1973,7 +1971,7 @@ bool uprobe_deny_signal(void)
WARN_ON_ONCE(utask->state != UTASK_SSTEP);
- if (signal_pending(t)) {
+ if (task_sigpending(t)) {
spin_lock_irq(&t->sighand->siglock);
clear_tsk_thread_flag(t, TIF_SIGPENDING);
spin_unlock_irq(&t->sighand->siglock);
diff --git a/kernel/exit.c b/kernel/exit.c
index 87a2d515de0d..04029e35e69a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -63,6 +63,7 @@
#include <linux/random.h>
#include <linux/rcuwait.h>
#include <linux/compat.h>
+#include <linux/io_uring.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -454,7 +455,10 @@ static void exit_mm(void)
mmap_read_unlock(mm);
self.task = current;
- self.next = xchg(&core_state->dumper.next, &self);
+ if (self.task->flags & PF_SIGNALED)
+ self.next = xchg(&core_state->dumper.next, &self);
+ else
+ self.task = NULL;
/*
* Implies mb(), the result of xchg() must be visible
* to core_state->dumper.
@@ -475,10 +479,24 @@ static void exit_mm(void)
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */
task_lock(current);
+ /*
+ * When a thread stops operating on an address space, the loop
+ * in membarrier_private_expedited() may not observe that
+ * tsk->mm, and the loop in membarrier_global_expedited() may
+ * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
+ * rq->membarrier_state, so those would not issue an IPI.
+ * Membarrier requires a memory barrier after accessing
+ * user-space memory, before clearing tsk->mm or the
+ * rq->membarrier_state.
+ */
+ smp_mb__after_spinlock();
+ local_irq_disable();
current->mm = NULL;
- mmap_read_unlock(mm);
+ membarrier_update_current_mm(NULL);
enter_lazy_tlb(mm, current);
+ local_irq_enable();
task_unlock(current);
+ mmap_read_unlock(mm);
mm_update_next_owner(mm);
mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
@@ -759,6 +777,7 @@ void __noreturn do_exit(long code)
schedule();
}
+ io_uring_files_cancel(tsk->files);
exit_signals(tsk); /* sets PF_EXITING */
/* sync mm's RSS info before statistics gathering */
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 63b349168da7..60dc825ecc2b 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -37,9 +37,7 @@ static unsigned long adjust_error_retval(unsigned long addr, unsigned long retv)
{
switch (get_injectable_error_type(addr)) {
case EI_ETYPE_NULL:
- if (retv != 0)
- return 0;
- break;
+ return 0;
case EI_ETYPE_ERRNO:
if (retv < (unsigned long)-MAX_ERRNO)
return (unsigned long)-EINVAL;
@@ -48,6 +46,8 @@ static unsigned long adjust_error_retval(unsigned long addr, unsigned long retv)
if (retv != 0 && retv < (unsigned long)-MAX_ERRNO)
return (unsigned long)-EINVAL;
break;
+ case EI_ETYPE_TRUE:
+ return 1;
}
return retv;
@@ -253,7 +253,7 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
- goto out;
+ goto out_free;
}
buf[count] = '\0';
sym = strstrip(buf);
@@ -307,8 +307,9 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
ret = count;
}
out:
- kfree(buf);
mutex_unlock(&fei_lock);
+out_free:
+ kfree(buf);
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 32083db7a2a2..d66cd1014211 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -225,8 +225,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s)
continue;
- /* Clear the KASAN shadow of the stack. */
- kasan_unpoison_shadow(s->addr, THREAD_SIZE);
+ /* Mark stack accessible for KASAN. */
+ kasan_unpoison_range(s->addr, THREAD_SIZE);
/* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE);
@@ -385,7 +385,7 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
mod_lruvec_page_state(vm->pages[0], NR_KERNEL_STACK_KB,
account * (THREAD_SIZE / 1024));
else
- mod_lruvec_slab_state(stack, NR_KERNEL_STACK_KB,
+ mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
account * (THREAD_SIZE / 1024));
}
@@ -404,9 +404,10 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk)
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
/*
- * If memcg_kmem_charge_page() fails, page->mem_cgroup
- * pointer is NULL, and memcg_kmem_uncharge_page() in
- * free_thread_stack() will ignore this page.
+ * If memcg_kmem_charge_page() fails, page's
+ * memory cgroup pointer is NULL, and
+ * memcg_kmem_uncharge_page() in free_thread_stack()
+ * will ignore this page.
*/
ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL,
0);
@@ -818,9 +819,8 @@ void __init fork_init(void)
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
- for (i = 0; i < UCOUNT_COUNTS; i++) {
+ for (i = 0; i < UCOUNT_COUNTS; i++)
init_user_ns.ucount_max[i] = max_threads/2;
- }
#ifdef CONFIG_VMAP_STACK
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -906,6 +906,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
set_task_stack_end_magic(tsk);
+ clear_syscall_work_syscall_user_dispatch(tsk);
#ifdef CONFIG_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
@@ -930,6 +931,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
account_kernel_stack(tsk, 1);
kcov_task_init(tsk);
+ kmap_local_fork(tsk);
#ifdef CONFIG_FAULT_INJECTION
tsk->fail_nth = 0;
@@ -1007,6 +1009,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
+ seqcount_init(&mm->write_protect_seq);
mmap_init_lock(mm);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
@@ -1221,7 +1224,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
struct mm_struct *mm;
int err;
- err = mutex_lock_killable(&task->signal->exec_update_mutex);
+ err = down_read_killable(&task->signal->exec_update_lock);
if (err)
return ERR_PTR(err);
@@ -1231,7 +1234,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
mmput(mm);
mm = ERR_PTR(-EACCES);
}
- mutex_unlock(&task->signal->exec_update_mutex);
+ up_read(&task->signal->exec_update_lock);
return mm;
}
@@ -1591,7 +1594,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
mutex_init(&sig->cred_guard_mutex);
- mutex_init(&sig->exec_update_mutex);
+ init_rwsem(&sig->exec_update_lock);
return 0;
}
@@ -1625,7 +1628,7 @@ static void copy_seccomp(struct task_struct *p)
* to manually enable the seccomp thread flag here.
*/
if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
- set_tsk_thread_flag(p, TIF_SECCOMP);
+ set_task_syscall_work(p, SECCOMP);
#endif
}
@@ -1650,9 +1653,8 @@ static inline void init_task_pid_links(struct task_struct *task)
{
enum pid_type type;
- for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+ for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
INIT_HLIST_NODE(&task->pid_links[type]);
- }
}
static inline void
@@ -2158,23 +2160,18 @@ static __latent_entropy struct task_struct *copy_process(
* child regardless of CLONE_PTRACE.
*/
user_disable_single_step(p);
- clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
-#ifdef TIF_SYSCALL_EMU
- clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
+ clear_task_syscall_work(p, SYSCALL_TRACE);
+#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
+ clear_task_syscall_work(p, SYSCALL_EMU);
#endif
clear_tsk_latency_tracing(p);
/* ok, now we should be set up.. */
p->pid = pid_nr(pid);
if (clone_flags & CLONE_THREAD) {
- p->exit_signal = -1;
p->group_leader = current->group_leader;
p->tgid = current->tgid;
} else {
- if (clone_flags & CLONE_PARENT)
- p->exit_signal = current->group_leader->exit_signal;
- else
- p->exit_signal = args->exit_signal;
p->group_leader = p;
p->tgid = p->pid;
}
@@ -2187,6 +2184,10 @@ static __latent_entropy struct task_struct *copy_process(
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
+#ifdef CONFIG_KRETPROBES
+ p->kretprobe_instances.first = NULL;
+#endif
+
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted that the new process's css_set can be changed
@@ -2218,9 +2219,14 @@ static __latent_entropy struct task_struct *copy_process(
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
+ if (clone_flags & CLONE_THREAD)
+ p->exit_signal = -1;
+ else
+ p->exit_signal = current->group_leader->exit_signal;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
+ p->exit_signal = args->exit_signal;
}
klp_copy_process(p);
@@ -3023,21 +3029,21 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
* the exec layer of the kernel.
*/
-int unshare_files(struct files_struct **displaced)
+int unshare_files(void)
{
struct task_struct *task = current;
- struct files_struct *copy = NULL;
+ struct files_struct *old, *copy = NULL;
int error;
error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, &copy);
- if (error || !copy) {
- *displaced = NULL;
+ if (error || !copy)
return error;
- }
- *displaced = task->files;
+
+ old = task->files;
task_lock(task);
task->files = copy;
task_unlock(task);
+ put_files_struct(old);
return 0;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index be68ac0d49ad..e68db7745039 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -310,8 +310,6 @@ static inline bool should_fail_futex(bool fshared)
#ifdef CONFIG_COMPAT
static void compat_exit_robust_list(struct task_struct *curr);
-#else
-static inline void compat_exit_robust_list(struct task_struct *curr) { }
#endif
/*
@@ -765,6 +763,29 @@ static struct futex_pi_state *alloc_pi_state(void)
return pi_state;
}
+static void pi_state_update_owner(struct futex_pi_state *pi_state,
+ struct task_struct *new_owner)
+{
+ struct task_struct *old_owner = pi_state->owner;
+
+ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
+
+ if (old_owner) {
+ raw_spin_lock(&old_owner->pi_lock);
+ WARN_ON(list_empty(&pi_state->list));
+ list_del_init(&pi_state->list);
+ raw_spin_unlock(&old_owner->pi_lock);
+ }
+
+ if (new_owner) {
+ raw_spin_lock(&new_owner->pi_lock);
+ WARN_ON(!list_empty(&pi_state->list));
+ list_add(&pi_state->list, &new_owner->pi_state_list);
+ pi_state->owner = new_owner;
+ raw_spin_unlock(&new_owner->pi_lock);
+ }
+}
+
static void get_pi_state(struct futex_pi_state *pi_state)
{
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
@@ -787,17 +808,12 @@ static void put_pi_state(struct futex_pi_state *pi_state)
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- struct task_struct *owner;
+ unsigned long flags;
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- owner = pi_state->owner;
- if (owner) {
- raw_spin_lock(&owner->pi_lock);
- list_del_init(&pi_state->list);
- raw_spin_unlock(&owner->pi_lock);
- }
- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
+ pi_state_update_owner(pi_state, NULL);
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
+ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
}
if (current->pi_state_cache) {
@@ -942,7 +958,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
* FUTEX_OWNER_DIED bit. See [4]
*
* [10] There is no transient state which leaves owner and user space
- * TID out of sync.
+ * TID out of sync. Except one error case where the kernel is denied
+ * write access to the user address, see fixup_pi_state_owner().
*
*
* Serialization and lifetime rules:
@@ -1503,8 +1520,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
- if (unlikely(should_fail_futex(true)))
+ if (unlikely(should_fail_futex(true))) {
ret = -EFAULT;
+ goto out_unlock;
+ }
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (!ret && (curval != uval)) {
@@ -1520,26 +1539,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
ret = -EINVAL;
}
- if (ret)
- goto out_unlock;
-
- /*
- * This is a point of no return; once we modify the uval there is no
- * going back and subsequent operations must not fail.
- */
-
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
-
- raw_spin_lock(&new_owner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &new_owner->pi_state_list);
- pi_state->owner = new_owner;
- raw_spin_unlock(&new_owner->pi_lock);
-
- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ if (!ret) {
+ /*
+ * This is a point of no return; once we modified the uval
+ * there is no going back and subsequent operations must
+ * not fail.
+ */
+ pi_state_update_owner(pi_state, new_owner);
+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ }
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
@@ -2322,18 +2330,13 @@ static void unqueue_me_pi(struct futex_q *q)
spin_unlock(q->lock_ptr);
}
-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *argowner)
+static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
{
struct futex_pi_state *pi_state = q->pi_state;
- u32 uval, curval, newval;
struct task_struct *oldowner, *newowner;
- u32 newtid;
- int ret, err = 0;
-
- lockdep_assert_held(q->lock_ptr);
-
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ u32 uval, curval, newval, newtid;
+ int err = 0;
oldowner = pi_state->owner;
@@ -2367,21 +2370,31 @@ retry:
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 0;
}
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
- /* We got the lock after all, nothing to fix. */
- ret = 0;
- goto out_unlock;
+ /* We got the lock. pi_state is correct. Tell caller. */
+ return 1;
}
/*
- * Since we just failed the trylock; there must be an owner.
+ * The trylock just failed, so either there is an owner or
+ * there is a higher priority waiter than this one.
*/
newowner = rt_mutex_owner(&pi_state->pi_mutex);
- BUG_ON(!newowner);
+ /*
+ * If the higher priority waiter has not yet taken over the
+ * rtmutex then newowner is NULL. We can't return here with
+ * that state because it's inconsistent vs. the user space
+ * state. So drop the locks and try again. It's a valid
+ * situation and not any different from the other retry
+ * conditions.
+ */
+ if (unlikely(!newowner)) {
+ err = -EAGAIN;
+ goto handle_err;
+ }
} else {
WARN_ON_ONCE(argowner != current);
if (oldowner == current) {
@@ -2389,8 +2402,7 @@ retry:
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 1;
}
newowner = argowner;
}
@@ -2420,22 +2432,9 @@ retry:
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
- if (pi_state->owner != NULL) {
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
- }
-
- pi_state->owner = newowner;
-
- raw_spin_lock(&newowner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &newowner->pi_state_list);
- raw_spin_unlock(&newowner->pi_lock);
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ pi_state_update_owner(pi_state, newowner);
- return 0;
+ return argowner == current;
/*
* In order to reschedule or handle a page fault, we need to drop the
@@ -2456,17 +2455,16 @@ handle_err:
switch (err) {
case -EFAULT:
- ret = fault_in_user_writeable(uaddr);
+ err = fault_in_user_writeable(uaddr);
break;
case -EAGAIN:
cond_resched();
- ret = 0;
+ err = 0;
break;
default:
WARN_ON_ONCE(1);
- ret = err;
break;
}
@@ -2476,17 +2474,44 @@ handle_err:
/*
* Check if someone else fixed it for us:
*/
- if (pi_state->owner != oldowner) {
- ret = 0;
- goto out_unlock;
- }
+ if (pi_state->owner != oldowner)
+ return argowner == current;
- if (ret)
- goto out_unlock;
+ /* Retry if err was -EAGAIN or the fault in succeeded */
+ if (!err)
+ goto retry;
- goto retry;
+ /*
+ * fault_in_user_writeable() failed so user state is immutable. At
+ * best we can make the kernel state consistent but user state will
+ * be most likely hosed and any subsequent unlock operation will be
+ * rejected due to PI futex rule [10].
+ *
+ * Ensure that the rtmutex owner is also the pi_state owner despite
+ * the user space value claiming something different. There is no
+ * point in unlocking the rtmutex if current is the owner as it
+ * would need to wait until the next waiter has taken the rtmutex
+ * to guarantee consistent state. Keep it simple. Userspace asked
+ * for this wreckaged state.
+ *
+ * The rtmutex has an owner - either current or some other
+ * task. See the EAGAIN loop above.
+ */
+ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
-out_unlock:
+ return err;
+}
+
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
+{
+ struct futex_pi_state *pi_state = q->pi_state;
+ int ret;
+
+ lockdep_assert_held(q->lock_ptr);
+
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ ret = __fixup_pi_state_owner(uaddr, q, argowner);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
}
@@ -2510,8 +2535,6 @@ static long futex_wait_restart(struct restart_block *restart);
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
- int ret = 0;
-
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
@@ -2522,8 +2545,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
* stable state, anything else needs more attention.
*/
if (q->pi_state->owner != current)
- ret = fixup_pi_state_owner(uaddr, q, current);
- return ret ? ret : locked;
+ return fixup_pi_state_owner(uaddr, q, current);
+ return 1;
}
/*
@@ -2534,23 +2557,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
* Another speculative read; pi_state->owner == current is unstable
* but needs our attention.
*/
- if (q->pi_state->owner == current) {
- ret = fixup_pi_state_owner(uaddr, q, NULL);
- return ret;
- }
+ if (q->pi_state->owner == current)
+ return fixup_pi_state_owner(uaddr, q, NULL);
/*
* Paranoia check. If we did not take the lock, then we should not be
- * the owner of the rt_mutex.
+ * the owner of the rt_mutex. Warn and establish consistent state.
*/
- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
- "pi-state %p\n", ret,
- q->pi_state->pi_mutex.owner,
- q->pi_state->owner);
- }
+ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
+ return fixup_pi_state_owner(uaddr, q, current);
- return ret;
+ return 0;
}
/**
@@ -2758,7 +2775,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to;
- struct futex_pi_state *pi_state = NULL;
struct task_struct *exiting = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
@@ -2894,23 +2910,8 @@ no_block:
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_owner() faulted and was unable to handle the fault, unlock
- * it and return the fault to userspace.
- */
- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
-
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
goto out;
out_unlock_put_key:
@@ -3011,7 +3012,7 @@ retry:
* Success, we're done! No tricky corner cases.
*/
if (!ret)
- goto out_putkey;
+ return ret;
/*
* The atomic access to the futex value generated a
* pagefault, so retry the user-access and the wakeup:
@@ -3028,7 +3029,7 @@ retry:
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
- goto out_putkey;
+ return ret;
}
/*
@@ -3049,7 +3050,7 @@ retry:
default:
WARN_ON_ONCE(1);
- goto out_putkey;
+ return ret;
}
}
@@ -3060,7 +3061,6 @@ retry:
out_unlock:
spin_unlock(&hb->lock);
-out_putkey:
return ret;
pi_retry:
@@ -3170,7 +3170,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to;
- struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
@@ -3248,16 +3247,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
+ /*
+ * Adjust the return value. It's either -EFAULT or
+ * success (1) but the caller expects 0 for success.
+ */
+ ret = ret < 0 ? ret : 0;
}
} else {
struct rt_mutex *pi_mutex;
@@ -3288,25 +3288,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle
- * the fault, unlock the rt_mutex and return the fault to
- * userspace.
- */
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
@@ -3777,8 +3762,8 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
- struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
- u32, val3)
+ const struct __kernel_timespec __user *, utime,
+ u32 __user *, uaddr2, u32, val3)
{
struct timespec64 ts;
ktime_t t, *tp = NULL;
@@ -3973,7 +3958,7 @@ err_unlock:
#ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
- struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
+ const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec64 ts;
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 3110c77230c7..f62de2dea8a3 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -4,7 +4,7 @@ menu "GCOV-based kernel profiling"
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
- select CONSTRUCTORS if !UML
+ select CONSTRUCTORS
default n
help
This option enables gcov-based code profiling (e.g. for code coverage
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 53c67c87f141..c53408a00d0b 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -25,10 +25,8 @@
#define GCOV_COUNTERS 9
#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
-#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
-#define GCOV_COUNTERS 9
#else
-#define GCOV_COUNTERS 8
+#define GCOV_COUNTERS 9
#endif
#define GCOV_TAG_FUNCTION_LENGTH 3
@@ -229,10 +227,10 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
/**
* gcov_info_add - add up profiling data
- * @dest: profiling data set to which data is added
- * @source: profiling data set which is added
+ * @dst: profiling data set to which data is added
+ * @src: profiling data set which is added
*
- * Adds profiling counts of @source to @dest.
+ * Adds profiling counts of @src to @dst.
*/
void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
{
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index ce76f490126c..396ebaebea3f 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -225,8 +225,7 @@ static long hung_timeout_jiffies(unsigned long last_checked,
* Process updating of timeout sysctl
*/
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 10a5aff4eecc..d79ef2493a28 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -26,11 +26,6 @@ config GENERIC_IRQ_SHOW_LEVEL
config GENERIC_IRQ_EFFECTIVE_AFF_MASK
bool
-# Facility to allocate a hardware interrupt. This is legacy support
-# and should not be used in new code. Use irq domains instead.
-config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
- bool
-
# Support for delayed migration from interrupt context
config GENERIC_PENDING_IRQ
bool
@@ -82,6 +77,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
# Generic IRQ IPI support
config GENERIC_IRQ_IPI
bool
+ select IRQ_DOMAIN_HIERARCHY
# Generic MSI interrupt support
config GENERIC_MSI_IRQ
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b9b9618e1aca..6d89e33fe3aa 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -61,7 +61,7 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)
EXPORT_SYMBOL(irq_set_chip);
/**
- * irq_set_type - set the irq trigger type for an irq
+ * irq_set_irq_type - set the irq trigger type for an irq
* @irq: irq number
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/
@@ -945,33 +945,6 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
}
/**
- * handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu
- * dev ids
- * @desc: the interrupt description structure for this irq
- *
- * The biggest difference with the IRQ version is that the interrupt is
- * EOIed early, as the IPI could result in a context switch, and we need to
- * make sure the IPI can fire again. We also assume that the arch code has
- * registered an action. If not, we are positively doomed.
- */
-void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irqaction *action = desc->action;
- unsigned int irq = irq_desc_get_irq(desc);
- irqreturn_t res;
-
- __kstat_incr_irqs_this_cpu(desc);
-
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
-
- trace_irq_handler_entry(irq, action);
- res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
- trace_irq_handler_exit(irq, action, res);
-}
-
-/**
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
* dev ids
* @desc: the interrupt description structure for this irq
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index e2999a070a99..a23ac2bbf433 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -269,7 +269,7 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
}
/**
- * __irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
+ * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
* @d: irq domain for which to allocate chips
* @irqs_per_chip: Number of interrupts each chip handles (max 32)
* @num_ct: Number of irq_chip_type instances associated with this
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 1a7723604399..cc1a09406c6e 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -147,12 +147,12 @@ static ssize_t per_cpu_count_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- int cpu, irq = desc->irq_data.irq;
ssize_t ret = 0;
char *p = "";
+ int cpu;
for_each_possible_cpu(cpu) {
- unsigned int c = kstat_irqs_cpu(irq, cpu);
+ unsigned int c = irq_desc_kstat_cpu(desc, cpu);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
p = ",";
@@ -352,7 +352,9 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{
return radix_tree_lookup(&irq_desc_tree, irq);
}
-EXPORT_SYMBOL(irq_to_desc);
+#ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
+EXPORT_SYMBOL_GPL(irq_to_desc);
+#endif
static void delete_irq_desc(unsigned int irq)
{
@@ -810,57 +812,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
-#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
-/**
- * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
- * @cnt: number of interrupts to allocate
- * @node: node on which to allocate
- *
- * Returns an interrupt number > 0 or 0, if the allocation fails.
- */
-unsigned int irq_alloc_hwirqs(int cnt, int node)
-{
- int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
-
- if (irq < 0)
- return 0;
-
- for (i = irq; cnt > 0; i++, cnt--) {
- if (arch_setup_hwirq(i, node))
- goto err;
- irq_clear_status_flags(i, _IRQ_NOREQUEST);
- }
- return irq;
-
-err:
- for (i--; i >= irq; i--) {
- irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
- arch_teardown_hwirq(i);
- }
- irq_free_descs(irq, cnt);
- return 0;
-}
-EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
-
-/**
- * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
- * @from: Free from irq number
- * @cnt: number of interrupts to free
- *
- */
-void irq_free_hwirqs(unsigned int from, int cnt)
-{
- int i, j;
-
- for (i = from, j = cnt; j > 0; i++, j--) {
- irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
- arch_teardown_hwirq(i);
- }
- irq_free_descs(from, cnt);
-}
-EXPORT_SYMBOL_GPL(irq_free_hwirqs);
-#endif
-
/**
* irq_get_next_irq - get next allocated irq number
* @offset: where to start the search
@@ -975,15 +926,7 @@ static bool irq_is_nmi(struct irq_desc *desc)
return desc->istate & IRQS_NMI;
}
-/**
- * kstat_irqs - Get the statistics for an interrupt
- * @irq: The interrupt number
- *
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. The caller must ensure that the interrupt is not removed
- * concurrently.
- */
-unsigned int kstat_irqs(unsigned int irq)
+static unsigned int kstat_irqs(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned int sum = 0;
@@ -994,21 +937,22 @@ unsigned int kstat_irqs(unsigned int irq)
if (!irq_settings_is_per_cpu_devid(desc) &&
!irq_settings_is_per_cpu(desc) &&
!irq_is_nmi(desc))
- return desc->tot_count;
+ return data_race(desc->tot_count);
for_each_possible_cpu(cpu)
- sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+ sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu));
return sum;
}
/**
- * kstat_irqs_usr - Get the statistics for an interrupt
+ * kstat_irqs_usr - Get the statistics for an interrupt from thread context
* @irq: The interrupt number
*
* Returns the sum of interrupt counts on all cpus since boot for @irq.
- * Contrary to kstat_irqs() this can be called from any context.
- * It uses rcu since a concurrent removal of an interrupt descriptor is
- * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
+ *
+ * It uses rcu to protect the access since a concurrent removal of an
+ * interrupt descriptor is observing an rcu grace period before
+ * delayed_free_desc()/irq_kobj_release().
*/
unsigned int kstat_irqs_usr(unsigned int irq)
{
@@ -1019,3 +963,17 @@ unsigned int kstat_irqs_usr(unsigned int irq)
rcu_read_unlock();
return sum;
}
+
+#ifdef CONFIG_LOCKDEP
+void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+ struct lock_class_key *request_class)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc) {
+ lockdep_set_class(&desc->lock, lock_class);
+ lockdep_set_class(&desc->request_mutex, request_class);
+ }
+}
+EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
+#endif
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index cf8b374b892d..6aacd342cd14 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -42,7 +42,16 @@ static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
#endif
-const struct fwnode_operations irqchip_fwnode_ops;
+static const char *irqchip_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+
+ return fwid->name;
+}
+
+const struct fwnode_operations irqchip_fwnode_ops = {
+ .get_name = irqchip_fwnode_get_name,
+};
EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
/**
@@ -91,7 +100,7 @@ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
fwid->type = type;
fwid->name = n;
fwid->pa = pa;
- fwid->fwnode.ops = &irqchip_fwnode_ops;
+ fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops);
return &fwid->fwnode;
}
EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
@@ -351,16 +360,27 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
{
+ return irq_domain_create_legacy(of_node_to_fwnode(of_node), size,
+ first_irq, first_hwirq, ops, host_data);
+}
+EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
+
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
struct irq_domain *domain;
- domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
- first_hwirq + size, 0, ops, host_data);
+ domain = __irq_domain_add(fwnode, first_hwirq + size, first_hwirq + size, 0, ops, host_data);
if (domain)
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
return domain;
}
-EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
+EXPORT_SYMBOL_GPL(irq_domain_create_legacy);
/**
* irq_find_matching_fwspec() - Locates a domain for a given fwspec
@@ -485,7 +505,7 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
}
}
-void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{
struct irq_data *irq_data = irq_get_irq_data(irq);
irq_hw_number_t hwirq;
@@ -624,17 +644,19 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
/**
- * irq_create_mapping() - Map a hardware interrupt into linux irq space
+ * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
* @domain: domain owning this hardware interrupt or NULL for default domain
* @hwirq: hardware irq number in that domain space
+ * @affinity: irq affinity
*
* Only one mapping per hardware interrupt is permitted. Returns a linux
* irq number.
* If the sense/trigger is to be specified, set_irq_type() should be called
* on the number returned from that call.
*/
-unsigned int irq_create_mapping(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity)
{
struct device_node *of_node;
int virq;
@@ -660,7 +682,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
/* Allocate a virtual interrupt number */
- virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
+ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
+ affinity);
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
@@ -676,7 +699,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
return virq;
}
-EXPORT_SYMBOL_GPL(irq_create_mapping);
+EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
/**
* irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
@@ -737,7 +760,7 @@ static void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
{
int i;
- fwspec->fwnode = np ? &np->fwnode : NULL;
+ fwspec->fwnode = of_node_to_fwnode(np);
fwspec->param_count = count;
for (i = 0; i < count; i++)
@@ -1370,8 +1393,15 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs)
{
- if (domain->ops->free)
- domain->ops->free(domain, irq_base, nr_irqs);
+ unsigned int i;
+
+ if (!domain->ops->free)
+ return;
+
+ for (i = 0; i < nr_irqs; i++) {
+ if (irq_domain_get_irq_data(domain, irq_base + i))
+ domain->ops->free(domain, irq_base + i, 1);
+ }
}
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c460e0496006..dec3f73e8db9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -371,6 +371,76 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
return ret;
}
+/**
+ * irq_update_affinity_desc - Update affinity management for an interrupt
+ * @irq: The interrupt number to update
+ * @affinity: Pointer to the affinity descriptor
+ *
+ * This interface can be used to configure the affinity management of
+ * interrupts which have been allocated already.
+ *
+ * There are certain limitations on when it may be used - attempts to use it
+ * for when the kernel is configured for generic IRQ reservation mode (in
+ * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
+ * managed/non-managed interrupt accounting. In addition, attempts to use it on
+ * an interrupt which is already started or which has already been configured
+ * as managed will also fail, as these mean invalid init state or double init.
+ */
+int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+ bool activated;
+ int ret = 0;
+
+ /*
+ * Supporting this with the reservation scheme used by x86 needs
+ * some more thought. Fail it for now.
+ */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
+ return -EOPNOTSUPP;
+
+ desc = irq_get_desc_buslock(irq, &flags, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* Requires the interrupt to be shut down */
+ if (irqd_is_started(&desc->irq_data)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Interrupts which are already managed cannot be modified */
+ if (irqd_affinity_is_managed(&desc->irq_data)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /*
+ * Deactivate the interrupt. That's required to undo
+ * anything an earlier activation has established.
+ */
+ activated = irqd_is_activated(&desc->irq_data);
+ if (activated)
+ irq_domain_deactivate_irq(&desc->irq_data);
+
+ if (affinity->is_managed) {
+ irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
+ irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
+ }
+
+ cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
+
+ /* Restore the activation state */
+ if (activated)
+ irq_domain_activate_irq(&desc->irq_data, false);
+
+out_unlock:
+ irq_put_desc_busunlock(desc, flags);
+ return ret;
+}
+
int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -2752,3 +2822,41 @@ out_unlock:
return err;
}
EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
+
+/**
+ * irq_has_action - Check whether an interrupt is requested
+ * @irq: The linux irq number
+ *
+ * Returns: A snapshot of the current state
+ */
+bool irq_has_action(unsigned int irq)
+{
+ bool res;
+
+ rcu_read_lock();
+ res = irq_desc_has_action(irq_to_desc(irq));
+ rcu_read_unlock();
+ return res;
+}
+EXPORT_SYMBOL_GPL(irq_has_action);
+
+/**
+ * irq_check_status_bit - Check whether bits in the irq descriptor status are set
+ * @irq: The linux irq number
+ * @bitmask: The bitmask to evaluate
+ *
+ * Returns: True if one of the bits in @bitmask is set
+ */
+bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
+{
+ struct irq_desc *desc;
+ bool res = false;
+
+ rcu_read_lock();
+ desc = irq_to_desc(irq);
+ if (desc)
+ res = !!(desc->status_use_accessors & bitmask);
+ rcu_read_unlock();
+ return res;
+}
+EXPORT_SYMBOL_GPL(irq_check_status_bit);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 2c0c4d6d0f83..b338d622f26e 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -402,7 +402,7 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct msi_domain_ops *ops = info->ops;
struct irq_data *irq_data;
struct msi_desc *desc;
- msi_alloc_info_t arg;
+ msi_alloc_info_t arg = { };
int i, ret, virq;
bool can_reserve;
@@ -436,22 +436,22 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
can_reserve = msi_check_reservation_mode(domain, info, dev);
- for_each_msi_entry(desc, dev) {
- virq = desc->irq;
- if (desc->nvec_used == 1)
- dev_dbg(dev, "irq %d for MSI\n", virq);
- else
+ /*
+ * This flag is set by the PCI layer as we need to activate
+ * the MSI entries before the PCI layer enables MSI in the
+ * card. Otherwise the card latches a random msi message.
+ */
+ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
+ goto skip_activate;
+
+ for_each_msi_vector(desc, i, dev) {
+ if (desc->irq == i) {
+ virq = desc->irq;
dev_dbg(dev, "irq [%d-%d] for MSI\n",
virq, virq + desc->nvec_used - 1);
- /*
- * This flag is set by the PCI layer as we need to activate
- * the MSI entries before the PCI layer enables MSI in the
- * card. Otherwise the card latches a random msi message.
- */
- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
- continue;
+ }
- irq_data = irq_domain_get_irq_data(domain, desc->irq);
+ irq_data = irq_domain_get_irq_data(domain, i);
if (!can_reserve) {
irqd_clr_can_reserve(irq_data);
if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
@@ -462,28 +462,24 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
goto cleanup;
}
+skip_activate:
/*
* If these interrupts use reservation mode, clear the activated bit
* so request_irq() will assign the final vector.
*/
if (can_reserve) {
- for_each_msi_entry(desc, dev) {
- irq_data = irq_domain_get_irq_data(domain, desc->irq);
+ for_each_msi_vector(desc, i, dev) {
+ irq_data = irq_domain_get_irq_data(domain, i);
irqd_clr_activated(irq_data);
}
}
return 0;
cleanup:
- for_each_msi_entry(desc, dev) {
- struct irq_data *irqd;
-
- if (desc->irq == virq)
- break;
-
- irqd = irq_domain_get_irq_data(domain, desc->irq);
- if (irqd_is_activated(irqd))
- irq_domain_deactivate_irq(irqd);
+ for_each_msi_vector(desc, i, dev) {
+ irq_data = irq_domain_get_irq_data(domain, i);
+ if (irqd_is_activated(irq_data))
+ irq_domain_deactivate_irq(irq_data);
}
msi_domain_free_irqs(domain, dev);
return ret;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 72513ed2a5fc..98138788cb04 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -488,9 +488,10 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc || irq_settings_is_hidden(desc))
goto outsparse;
- if (desc->kstat_irqs)
+ if (desc->kstat_irqs) {
for_each_online_cpu(j)
- any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
+ any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
+ }
if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
goto outsparse;
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 8ccd32a0cc80..bd1d85c610aa 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -27,7 +27,7 @@ static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
/*
* Run software resends of IRQ's
*/
-static void resend_irqs(unsigned long arg)
+static void resend_irqs(struct tasklet_struct *unused)
{
struct irq_desc *desc;
int irq;
@@ -45,7 +45,7 @@ static void resend_irqs(unsigned long arg)
}
/* Tasklet to handle resend: */
-static DECLARE_TASKLET_OLD(resend_tasklet, resend_irqs);
+static DECLARE_TASKLET(resend_tasklet, resend_irqs);
static int irq_sw_resend(struct irq_desc *desc)
{
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index eca83965b631..e8da1e71583a 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -31,10 +31,10 @@ static bool irq_work_claim(struct irq_work *work)
{
int oflags;
- oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
+ oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
/*
* If the work is already pending, no need to raise the IPI.
- * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
+ * The pairing smp_mb() in irq_work_single() makes sure
* everything we did before is visible.
*/
if (oflags & IRQ_WORK_PENDING)
@@ -53,12 +53,12 @@ void __weak arch_irq_work_raise(void)
static void __irq_work_queue_local(struct irq_work *work)
{
/* If the work is "lazy", handle it from next tick if any */
- if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+ if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) {
+ if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped())
arch_irq_work_raise();
} else {
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+ if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list)))
arch_irq_work_raise();
}
}
@@ -102,7 +102,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (cpu != smp_processor_id()) {
/* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi());
- __smp_call_single_queue(cpu, &work->llnode);
+ __smp_call_single_queue(cpu, &work->node.llist);
} else {
__irq_work_queue_local(work);
}
@@ -136,23 +136,28 @@ void irq_work_single(void *arg)
int flags;
/*
- * Clear the PENDING bit, after this point the @work
- * can be re-used.
- * Make it immediately visible so that other CPUs trying
- * to claim that work don't rely on us to handle their data
- * while we are in the middle of the func.
+ * Clear the PENDING bit, after this point the @work can be re-used.
+ * The PENDING bit acts as a lock, and we own it, so we can clear it
+ * without atomic ops.
*/
- flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
+ flags = atomic_read(&work->node.a_flags);
+ flags &= ~IRQ_WORK_PENDING;
+ atomic_set(&work->node.a_flags, flags);
+
+ /*
+ * See irq_work_claim().
+ */
+ smp_mb();
- lockdep_irq_work_enter(work);
+ lockdep_irq_work_enter(flags);
work->func(work);
- lockdep_irq_work_exit(work);
+ lockdep_irq_work_exit(flags);
+
/*
- * Clear the BUSY bit and return to the free state if
- * no-one else claimed it meanwhile.
+ * Clear the BUSY bit, if set, and return to the free state if no-one
+ * else claimed it meanwhile.
*/
- flags &= ~IRQ_WORK_PENDING;
- (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
+ (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
}
static void irq_work_run_list(struct llist_head *list)
@@ -166,7 +171,7 @@ static void irq_work_run_list(struct llist_head *list)
return;
llnode = llist_del_all(list);
- llist_for_each_entry_safe(work, tmp, llnode, llnode)
+ llist_for_each_entry_safe(work, tmp, llnode, node.llist)
irq_work_single(work);
}
@@ -198,7 +203,7 @@ void irq_work_sync(struct irq_work *work)
{
lockdep_assert_irqs_enabled();
- while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
+ while (irq_work_is_busy(work))
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 015ef903ce8c..c6a39d662935 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -793,6 +793,7 @@ int jump_label_text_reserved(void *start, void *end)
static void jump_label_update(struct static_key *key)
{
struct jump_entry *stop = __stop___jump_table;
+ bool init = system_state < SYSTEM_RUNNING;
struct jump_entry *entry;
#ifdef CONFIG_MODULES
struct module *mod;
@@ -804,15 +805,16 @@ static void jump_label_update(struct static_key *key)
preempt_disable();
mod = __module_address((unsigned long)key);
- if (mod)
+ if (mod) {
stop = mod->jump_entries + mod->num_jump_entries;
+ init = mod->state == MODULE_STATE_COMING;
+ }
preempt_enable();
#endif
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */
if (entry)
- __jump_label_update(key, entry, stop,
- system_state < SYSTEM_RUNNING);
+ __jump_label_update(key, entry, stop, init);
}
#ifdef CONFIG_STATIC_KEYS_SELFTEST
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index b3ff9288c6cc..5353edfad8e1 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -61,39 +61,34 @@ static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
static struct file *
get_file_raw_ptr(struct task_struct *task, unsigned int idx)
{
- struct file *file = NULL;
+ struct file *file;
- task_lock(task);
rcu_read_lock();
-
- if (task->files)
- file = fcheck_files(task->files, idx);
-
+ file = task_lookup_fd_rcu(task, idx);
rcu_read_unlock();
- task_unlock(task);
return file;
}
-static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
+static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
{
- if (likely(m2 != m1))
- mutex_unlock(m2);
- mutex_unlock(m1);
+ if (likely(l2 != l1))
+ up_read(l2);
+ up_read(l1);
}
-static int kcmp_lock(struct mutex *m1, struct mutex *m2)
+static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
{
int err;
- if (m2 > m1)
- swap(m1, m2);
+ if (l2 > l1)
+ swap(l1, l2);
- err = mutex_lock_killable(m1);
- if (!err && likely(m1 != m2)) {
- err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
+ err = down_read_killable(l1);
+ if (!err && likely(l1 != l2)) {
+ err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
if (err)
- mutex_unlock(m1);
+ up_read(l1);
}
return err;
@@ -107,7 +102,6 @@ static int kcmp_epoll_target(struct task_struct *task1,
{
struct file *filp, *filp_epoll, *filp_tgt;
struct kcmp_epoll_slot slot;
- struct files_struct *files;
if (copy_from_user(&slot, uslot, sizeof(slot)))
return -EFAULT;
@@ -116,23 +110,12 @@ static int kcmp_epoll_target(struct task_struct *task1,
if (!filp)
return -EBADF;
- files = get_files_struct(task2);
- if (!files)
+ filp_epoll = fget_task(task2, slot.efd);
+ if (!filp_epoll)
return -EBADF;
- spin_lock(&files->file_lock);
- filp_epoll = fcheck_files(files, slot.efd);
- if (filp_epoll)
- get_file(filp_epoll);
- else
- filp_tgt = ERR_PTR(-EBADF);
- spin_unlock(&files->file_lock);
- put_files_struct(files);
-
- if (filp_epoll) {
- filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
- fput(filp_epoll);
- }
+ filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
+ fput(filp_epoll);
if (IS_ERR(filp_tgt))
return PTR_ERR(filp_tgt);
@@ -173,8 +156,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
/*
* One should have enough rights to inspect task details.
*/
- ret = kcmp_lock(&task1->signal->exec_update_mutex,
- &task2->signal->exec_update_mutex);
+ ret = kcmp_lock(&task1->signal->exec_update_lock,
+ &task2->signal->exec_update_lock);
if (ret)
goto err;
if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
@@ -229,8 +212,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
}
err_unlock:
- kcmp_unlock(&task1->signal->exec_update_mutex,
- &task2->signal->exec_update_mutex);
+ kcmp_unlock(&task1->signal->exec_update_lock,
+ &task2->signal->exec_update_lock);
err:
put_task_struct(task1);
put_task_struct(task2);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 6b8368be89c8..80bfe71bbe13 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1023,6 +1023,8 @@ EXPORT_SYMBOL(kcov_remote_stop);
/* See the comment before kcov_remote_start() for usage details. */
u64 kcov_common_handle(void)
{
+ if (!in_task())
+ return 0;
return current->kcov_handle;
}
EXPORT_SYMBOL(kcov_common_handle);
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 3994a217bde7..3bf98db9c702 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -12,7 +12,6 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
-#include <linux/random.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
static DEFINE_PER_CPU(long, kcsan_skip);
/* For kcsan_prandom_u32_max(). */
-static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
+static DEFINE_PER_CPU(u32, kcsan_rand_state);
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
size_t size,
@@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
}
/*
- * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
- * for more details.
- *
- * The open-coded version here is using only safe primitives for all contexts
- * where we can have KCSAN instrumentation. In particular, we cannot use
- * prandom_u32() directly, as its tracepoint could cause recursion.
+ * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
+ * congruential generator, using constants from "Numerical Recipes".
*/
static u32 kcsan_prandom_u32_max(u32 ep_ro)
{
- struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
- const u32 res = prandom_u32_state(state);
+ u32 state = this_cpu_read(kcsan_rand_state);
+
+ state = 1664525 * state + 1013904223;
+ this_cpu_write(kcsan_rand_state, state);
- put_cpu_var(kcsan_rand_state);
- return (u32)(((u64) res * ep_ro) >> 32);
+ return state % ep_ro;
}
static inline void reset_kcsan_skip(void)
@@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
void __init kcsan_init(void)
{
+ int cpu;
+
BUG_ON(!in_task());
kcsan_debugfs_init();
- prandom_seed_full_state(&kcsan_rand_state);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
/*
* We are in the init task, and no other tasks should be running;
diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
index 1a6db2f797ac..7ee405524904 100644
--- a/kernel/kcsan/encoding.h
+++ b/kernel/kcsan/encoding.h
@@ -37,18 +37,20 @@
*/
#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS)
-/*
- * Masks to set/retrieve the encoded data.
- */
-#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1)
-#define WATCHPOINT_SIZE_MASK \
- GENMASK(BITS_PER_LONG-2, BITS_PER_LONG-2 - WATCHPOINT_SIZE_BITS)
-#define WATCHPOINT_ADDR_MASK \
- GENMASK(BITS_PER_LONG-3 - WATCHPOINT_SIZE_BITS, 0)
+/* Bitmasks for the encoded watchpoint access information. */
+#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1)
+#define WATCHPOINT_SIZE_MASK GENMASK(BITS_PER_LONG-2, WATCHPOINT_ADDR_BITS)
+#define WATCHPOINT_ADDR_MASK GENMASK(WATCHPOINT_ADDR_BITS-1, 0)
+static_assert(WATCHPOINT_ADDR_MASK == (1UL << WATCHPOINT_ADDR_BITS) - 1);
+static_assert((WATCHPOINT_WRITE_MASK ^ WATCHPOINT_SIZE_MASK ^ WATCHPOINT_ADDR_MASK) == ~0UL);
static inline bool check_encodable(unsigned long addr, size_t size)
{
- return size <= MAX_ENCODABLE_SIZE;
+ /*
+ * While we can encode addrs<PAGE_SIZE, avoid crashing with a NULL
+ * pointer deref inside KCSAN.
+ */
+ return addr >= PAGE_SIZE && size <= MAX_ENCODABLE_SIZE;
}
static inline long
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index d98bc208d06d..9014a3a82cf9 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -33,6 +33,9 @@ static bool test_encode_decode(void)
unsigned long addr;
prandom_bytes(&addr, sizeof(addr));
+ if (addr < PAGE_SIZE)
+ addr = PAGE_SIZE;
+
if (WARN_ON(!check_encodable(addr, size)))
return false;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8798a8183974..a0b6780740c8 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -42,7 +42,6 @@
#include <asm/sections.h>
#include <crypto/hash.h>
-#include <crypto/sha.h>
#include "kexec_internal.h"
DEFINE_MUTEX(kexec_mutex);
@@ -1077,7 +1076,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
if (!buf)
return;
memset(&prstatus, 0, sizeof(prstatus));
- prstatus.pr_pid = current->pid;
+ prstatus.common.pr_pid = current->pid;
elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
@@ -1135,7 +1134,6 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
- lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
@@ -1198,7 +1196,6 @@ int kernel_kexec(void)
thaw_processes();
Restore_console:
pm_restore_console();
- unlock_system_sleep();
}
#endif
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index e21f6b9234f7..b02086d70492 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -20,7 +20,7 @@
#include <linux/fs.h>
#include <linux/ima.h>
#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/kernel.h>
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 8a12a25fa40d..d5a3eb74a657 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -36,7 +36,6 @@
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/perf_event.h>
-#include <linux/static_call.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -54,7 +53,6 @@ static int kprobes_initialized;
* - RCU hlist traversal under disabling preempt (breakpoint handlers)
*/
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
-static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_all_disarmed;
@@ -62,9 +60,6 @@ static bool kprobes_all_disarmed;
/* This protects kprobe_table and optimizing_list */
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
-static struct {
- raw_spinlock_t lock ____cacheline_aligned_in_smp;
-} kretprobe_table_locks[KPROBE_TABLE_SIZE];
kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
unsigned int __unused)
@@ -72,11 +67,6 @@ kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
}
-static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
-{
- return &(kretprobe_table_locks[hash].lock);
-}
-
/* Blacklist -- list of struct kprobe_blacklist_entry */
static LIST_HEAD(kprobe_blacklist);
@@ -1224,64 +1214,26 @@ void kprobes_inc_nmissed_count(struct kprobe *p)
}
NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
-static void recycle_rp_inst(struct kretprobe_instance *ri)
-{
- struct kretprobe *rp = ri->rp;
-
- /* remove rp inst off the rprobe_inst_table */
- hlist_del(&ri->hlist);
- INIT_HLIST_NODE(&ri->hlist);
- if (likely(rp)) {
- raw_spin_lock(&rp->lock);
- hlist_add_head(&ri->hlist, &rp->free_instances);
- raw_spin_unlock(&rp->lock);
- } else
- kfree_rcu(ri, rcu);
-}
-NOKPROBE_SYMBOL(recycle_rp_inst);
-
-static void kretprobe_hash_lock(struct task_struct *tsk,
- struct hlist_head **head, unsigned long *flags)
-__acquires(hlist_lock)
+static void free_rp_inst_rcu(struct rcu_head *head)
{
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- raw_spinlock_t *hlist_lock;
+ struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
- *head = &kretprobe_inst_table[hash];
- hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_lock_irqsave(hlist_lock, *flags);
+ if (refcount_dec_and_test(&ri->rph->ref))
+ kfree(ri->rph);
+ kfree(ri);
}
-NOKPROBE_SYMBOL(kretprobe_hash_lock);
+NOKPROBE_SYMBOL(free_rp_inst_rcu);
-static void kretprobe_table_lock(unsigned long hash,
- unsigned long *flags)
-__acquires(hlist_lock)
-{
- raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_lock_irqsave(hlist_lock, *flags);
-}
-NOKPROBE_SYMBOL(kretprobe_table_lock);
-
-static void kretprobe_hash_unlock(struct task_struct *tsk,
- unsigned long *flags)
-__releases(hlist_lock)
+static void recycle_rp_inst(struct kretprobe_instance *ri)
{
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- raw_spinlock_t *hlist_lock;
-
- hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_unlock_irqrestore(hlist_lock, *flags);
-}
-NOKPROBE_SYMBOL(kretprobe_hash_unlock);
+ struct kretprobe *rp = get_kretprobe(ri);
-static void kretprobe_table_unlock(unsigned long hash,
- unsigned long *flags)
-__releases(hlist_lock)
-{
- raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_unlock_irqrestore(hlist_lock, *flags);
+ if (likely(rp)) {
+ freelist_add(&ri->freelist, &rp->freelist);
+ } else
+ call_rcu(&ri->rcu, free_rp_inst_rcu);
}
-NOKPROBE_SYMBOL(kretprobe_table_unlock);
+NOKPROBE_SYMBOL(recycle_rp_inst);
static struct kprobe kprobe_busy = {
.addr = (void *) get_kprobe,
@@ -1312,24 +1264,21 @@ void kprobe_busy_end(void)
void kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
- struct hlist_head *head;
- struct hlist_node *tmp;
- unsigned long hash, flags = 0;
+ struct llist_node *node;
+ /* Early boot, not yet initialized. */
if (unlikely(!kprobes_initialized))
- /* Early boot. kretprobe_table_locks not yet initialized. */
return;
kprobe_busy_begin();
- hash = hash_ptr(tk, KPROBE_HASH_BITS);
- head = &kretprobe_inst_table[hash];
- kretprobe_table_lock(hash, &flags);
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task == tk)
- recycle_rp_inst(ri);
+ node = __llist_del_all(&tk->kretprobe_instances);
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, llist);
+ node = node->next;
+
+ recycle_rp_inst(ri);
}
- kretprobe_table_unlock(hash, &flags);
kprobe_busy_end();
}
@@ -1338,37 +1287,23 @@ NOKPROBE_SYMBOL(kprobe_flush_task);
static inline void free_rp_inst(struct kretprobe *rp)
{
struct kretprobe_instance *ri;
- struct hlist_node *next;
+ struct freelist_node *node;
+ int count = 0;
+
+ node = rp->freelist.head;
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, freelist);
+ node = node->next;
- hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
- hlist_del(&ri->hlist);
kfree(ri);
+ count++;
}
-}
-static void cleanup_rp_inst(struct kretprobe *rp)
-{
- unsigned long flags, hash;
- struct kretprobe_instance *ri;
- struct hlist_node *next;
- struct hlist_head *head;
-
- /* To avoid recursive kretprobe by NMI, set kprobe busy here */
- kprobe_busy_begin();
- for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
- kretprobe_table_lock(hash, &flags);
- head = &kretprobe_inst_table[hash];
- hlist_for_each_entry_safe(ri, next, head, hlist) {
- if (ri->rp == rp)
- ri->rp = NULL;
- }
- kretprobe_table_unlock(hash, &flags);
+ if (refcount_sub_and_test(count, &rp->rph->ref)) {
+ kfree(rp->rph);
+ rp->rph = NULL;
}
- kprobe_busy_end();
-
- free_rp_inst(rp);
}
-NOKPROBE_SYMBOL(cleanup_rp_inst);
/* Add the new probe to ap->list */
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
@@ -1631,7 +1566,6 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (!kernel_text_address((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
- static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr)) {
ret = -EINVAL;
goto out;
@@ -1930,88 +1864,56 @@ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
void *trampoline_address,
void *frame_pointer)
{
- struct kretprobe_instance *ri = NULL, *last = NULL;
- struct hlist_head *head;
- struct hlist_node *tmp;
- unsigned long flags;
kprobe_opcode_t *correct_ret_addr = NULL;
- bool skipped = false;
-
- kretprobe_hash_lock(current, &head, &flags);
+ struct kretprobe_instance *ri = NULL;
+ struct llist_node *first, *node;
+ struct kretprobe *rp;
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * return probes installed on them, and/or more than one
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always pushed into the head of the list
- * - when multiple return probes are registered for the same
- * function, the (chronologically) first instance's ret_addr
- * will be the real return address, and all the rest will
- * point to kretprobe_trampoline.
- */
- hlist_for_each_entry(ri, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
- /*
- * Return probes must be pushed on this hash list correct
- * order (same as return order) so that it can be popped
- * correctly. However, if we find it is pushed it incorrect
- * order, this means we find a function which should not be
- * probed, because the wrong order entry is pushed on the
- * path of processing other kretprobe itself.
- */
- if (ri->fp != frame_pointer) {
- if (!skipped)
- pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
- skipped = true;
- continue;
- }
+ /* Find all nodes for this frame. */
+ first = node = current->kretprobe_instances.first;
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, llist);
- correct_ret_addr = ri->ret_addr;
- if (skipped)
- pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
- ri->rp->kp.addr);
+ BUG_ON(ri->fp != frame_pointer);
- if (correct_ret_addr != trampoline_address)
+ if (ri->ret_addr != trampoline_address) {
+ correct_ret_addr = ri->ret_addr;
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
- break;
+ goto found;
+ }
+
+ node = node->next;
}
+ pr_err("Oops! Kretprobe fails to find correct return address.\n");
+ BUG_ON(1);
- BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address));
- last = ri;
+found:
+ /* Unlink all nodes for this frame. */
+ current->kretprobe_instances.first = node->next;
+ node->next = NULL;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
- if (ri->fp != frame_pointer)
- continue;
+ /* Run them.. */
+ while (first) {
+ ri = container_of(first, struct kretprobe_instance, llist);
+ first = first->next;
- if (ri->rp && ri->rp->handler) {
+ rp = get_kretprobe(ri);
+ if (rp && rp->handler) {
struct kprobe *prev = kprobe_running();
- __this_cpu_write(current_kprobe, &ri->rp->kp);
+ __this_cpu_write(current_kprobe, &rp->kp);
ri->ret_addr = correct_ret_addr;
- ri->rp->handler(ri, regs);
+ rp->handler(ri, regs);
__this_cpu_write(current_kprobe, prev);
}
recycle_rp_inst(ri);
-
- if (ri == last)
- break;
}
- kretprobe_hash_unlock(current, &flags);
-
return (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
@@ -2023,39 +1925,26 @@ NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe *rp = container_of(p, struct kretprobe, kp);
- unsigned long hash, flags = 0;
struct kretprobe_instance *ri;
+ struct freelist_node *fn;
- /* TODO: consider to only swap the RA after the last pre_handler fired */
- hash = hash_ptr(current, KPROBE_HASH_BITS);
- raw_spin_lock_irqsave(&rp->lock, flags);
- if (!hlist_empty(&rp->free_instances)) {
- ri = hlist_entry(rp->free_instances.first,
- struct kretprobe_instance, hlist);
- hlist_del(&ri->hlist);
- raw_spin_unlock_irqrestore(&rp->lock, flags);
-
- ri->rp = rp;
- ri->task = current;
-
- if (rp->entry_handler && rp->entry_handler(ri, regs)) {
- raw_spin_lock_irqsave(&rp->lock, flags);
- hlist_add_head(&ri->hlist, &rp->free_instances);
- raw_spin_unlock_irqrestore(&rp->lock, flags);
- return 0;
- }
+ fn = freelist_try_get(&rp->freelist);
+ if (!fn) {
+ rp->nmissed++;
+ return 0;
+ }
- arch_prepare_kretprobe(ri, regs);
+ ri = container_of(fn, struct kretprobe_instance, freelist);
- /* XXX(hch): why is there no hlist_move_head? */
- INIT_HLIST_NODE(&ri->hlist);
- kretprobe_table_lock(hash, &flags);
- hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
- kretprobe_table_unlock(hash, &flags);
- } else {
- rp->nmissed++;
- raw_spin_unlock_irqrestore(&rp->lock, flags);
+ if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+ freelist_add(&ri->freelist, &rp->freelist);
+ return 0;
}
+
+ arch_prepare_kretprobe(ri, regs);
+
+ __llist_add(&ri->llist, &current->kretprobe_instances);
+
return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);
@@ -2065,28 +1954,48 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset)
return !offset;
}
-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
+/**
+ * kprobe_on_func_entry() -- check whether given address is function entry
+ * @addr: Target address
+ * @sym: Target symbol name
+ * @offset: The offset from the symbol or the address
+ *
+ * This checks whether the given @addr+@offset or @sym+@offset is on the
+ * function entry address or not.
+ * This returns 0 if it is the function entry, or -EINVAL if it is not.
+ * And also it returns -ENOENT if it fails the symbol or address lookup.
+ * Caller must pass @addr or @sym (either one must be NULL), or this
+ * returns -EINVAL.
+ */
+int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
if (IS_ERR(kp_addr))
- return false;
+ return PTR_ERR(kp_addr);
- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
- !arch_kprobe_on_func_entry(offset))
- return false;
+ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
+ return -ENOENT;
- return true;
+ if (!arch_kprobe_on_func_entry(offset))
+ return -EINVAL;
+
+ return 0;
}
int register_kretprobe(struct kretprobe *rp)
{
- int ret = 0;
+ int ret;
struct kretprobe_instance *inst;
int i;
void *addr;
- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
+ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
+ if (ret)
+ return ret;
+
+ /* If only rp->kp.addr is specified, check reregistering kprobes */
+ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
return -EINVAL;
if (kretprobe_blacklist_size) {
@@ -2112,18 +2021,24 @@ int register_kretprobe(struct kretprobe *rp)
rp->maxactive = num_possible_cpus();
#endif
}
- raw_spin_lock_init(&rp->lock);
- INIT_HLIST_HEAD(&rp->free_instances);
+ rp->freelist.head = NULL;
+ rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
+ if (!rp->rph)
+ return -ENOMEM;
+
+ rp->rph->rp = rp;
for (i = 0; i < rp->maxactive; i++) {
- inst = kmalloc(sizeof(struct kretprobe_instance) +
+ inst = kzalloc(sizeof(struct kretprobe_instance) +
rp->data_size, GFP_KERNEL);
if (inst == NULL) {
+ refcount_set(&rp->rph->ref, i);
free_rp_inst(rp);
return -ENOMEM;
}
- INIT_HLIST_NODE(&inst->hlist);
- hlist_add_head(&inst->hlist, &rp->free_instances);
+ inst->rph = rp->rph;
+ freelist_add(&inst->freelist, &rp->freelist);
}
+ refcount_set(&rp->rph->ref, i);
rp->nmissed = 0;
/* Establish function entry probe point */
@@ -2165,16 +2080,18 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
if (num <= 0)
return;
mutex_lock(&kprobe_mutex);
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
if (__unregister_kprobe_top(&rps[i]->kp) < 0)
rps[i]->kp.addr = NULL;
+ rps[i]->rph->rp = NULL;
+ }
mutex_unlock(&kprobe_mutex);
synchronize_rcu();
for (i = 0; i < num; i++) {
if (rps[i]->kp.addr) {
__unregister_kprobe_bottom(&rps[i]->kp);
- cleanup_rp_inst(rps[i]);
+ free_rp_inst(rps[i]);
}
}
}
@@ -2218,9 +2135,6 @@ static void kill_kprobe(struct kprobe *p)
lockdep_assert_held(&kprobe_mutex);
- if (WARN_ON_ONCE(kprobe_gone(p)))
- return;
-
p->flags |= KPROBE_FLAG_GONE;
if (kprobe_aggrprobe(p)) {
/*
@@ -2501,10 +2415,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry(p, head, hlist) {
- if (kprobe_gone(p))
- continue;
-
+ hlist_for_each_entry(p, head, hlist)
if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore &&
within_module_core((unsigned long)p->addr, mod))) {
@@ -2521,7 +2432,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
*/
kill_kprobe(p);
}
- }
}
if (val == MODULE_STATE_GOING)
remove_module_kprobe_blacklist(mod);
@@ -2566,11 +2476,8 @@ static int __init init_kprobes(void)
/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kprobe_table[i]);
- INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
- raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
- }
err = populate_kprobe_blacklist(__start_kprobe_blacklist,
__stop_kprobe_blacklist);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index e29773c82b70..1578973c5740 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -294,7 +294,7 @@ static int kthread(void *_create)
do_exit(ret);
}
-/* called from do_fork() to get node information for about to be created task */
+/* called from kernel_clone() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
return p;
kthread_bind(p, cpu);
/* CPU hotplug need to bind once again when unparking the thread. */
- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
to_kthread(p)->cpu = cpu;
return p;
}
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return;
+
+ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+ if (cpu < 0) {
+ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ return;
+ }
+
+ kthread->cpu = cpu;
+ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return false;
+
+ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
@@ -704,8 +729,15 @@ repeat:
raw_spin_unlock_irq(&worker->lock);
if (work) {
+ kthread_work_func_t func = work->func;
__set_current_state(TASK_RUNNING);
+ trace_sched_kthread_work_execute_start(work);
work->func(work);
+ /*
+ * Avoid dereferencing work after this point. The trace
+ * event only cares about the address.
+ */
+ trace_sched_kthread_work_execute_end(work, func);
} else if (!freezing(current))
schedule();
@@ -786,7 +818,25 @@ EXPORT_SYMBOL(kthread_create_worker);
* A good practice is to add the cpu number also into the worker name.
* For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
*
- * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * CPU hotplug:
+ * The kthread worker API is simple and generic. It just provides a way
+ * to create, use, and destroy workers.
+ *
+ * It is up to the API user how to handle CPU hotplug. They have to decide
+ * how to handle pending work items, prevent queuing new ones, and
+ * restore the functionality when the CPU goes off and on. There are a
+ * few catches:
+ *
+ * - CPU affinity gets lost when it is scheduled on an offline CPU.
+ *
+ * - The worker might not exist when the CPU was off when the user
+ * created the workers.
+ *
+ * Good practice is to implement two CPU hotplug callbacks and to
+ * destroy/create the worker when the CPU goes down/up.
+ *
+ * Return:
+ * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
* when the needed structures could not get allocated, and ERR_PTR(-EINTR)
* when the worker was SIGKILLed.
*/
@@ -834,6 +884,8 @@ static void kthread_insert_work(struct kthread_worker *worker,
{
kthread_insert_work_sanity_check(worker, work);
+ trace_sched_kthread_work_queue_work(worker, work);
+
list_add_tail(&work->node, pos);
work->worker = worker;
if (!worker->current_work && likely(worker->task))
@@ -897,7 +949,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node);
- kthread_insert_work(worker, work, &worker->work_list);
+ if (!work->canceling)
+ kthread_insert_work(worker, work, &worker->work_list);
raw_spin_unlock_irqrestore(&worker->lock, flags);
}
@@ -1248,6 +1301,7 @@ void kthread_use_mm(struct mm_struct *mm)
tsk->active_mm = mm;
}
tsk->mm = mm;
+ membarrier_update_current_mm(mm);
switch_mm_irqs_off(active_mm, mm, tsk);
local_irq_enable();
task_unlock(tsk);
@@ -1255,8 +1309,19 @@ void kthread_use_mm(struct mm_struct *mm)
finish_arch_post_lock_switch();
#endif
+ /*
+ * When a kthread starts operating on an address space, the loop
+ * in membarrier_{private,global}_expedited() may not observe
+ * that tsk->mm, and not issue an IPI. Membarrier requires a
+ * memory barrier after storing to tsk->mm, before accessing
+ * user-space memory. A full memory barrier for membarrier
+ * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
+ * mmdrop(), or explicitly with smp_mb().
+ */
if (active_mm != mm)
mmdrop(active_mm);
+ else
+ smp_mb();
to_kthread(tsk)->oldfs = force_uaccess_begin();
}
@@ -1276,9 +1341,18 @@ void kthread_unuse_mm(struct mm_struct *mm)
force_uaccess_end(to_kthread(tsk)->oldfs);
task_lock(tsk);
+ /*
+ * When a kthread stops operating on an address space, the loop
+ * in membarrier_{private,global}_expedited() may not observe
+ * that tsk->mm, and not issue an IPI. Membarrier requires a
+ * memory barrier after accessing user-space memory, before
+ * clearing tsk->mm.
+ */
+ smp_mb__after_spinlock();
sync_mm_rss(mm);
local_irq_disable();
tsk->mm = NULL;
+ membarrier_update_current_mm(NULL);
/* active_mm is still 'mm' */
enter_lazy_tlb(mm, tsk);
local_irq_enable();
diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig
index 54102deb50ba..53d51ed619a3 100644
--- a/kernel/livepatch/Kconfig
+++ b/kernel/livepatch/Kconfig
@@ -6,7 +6,7 @@ config HAVE_LIVEPATCH
config LIVEPATCH
bool "Kernel Live Patching"
- depends on DYNAMIC_FTRACE_WITH_REGS
+ depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS
depends on MODULES
depends on SYSFS
depends on KALLSYMS_ALL
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index b552cf2d85f8..e8029aea67f1 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -40,14 +40,18 @@ struct klp_ops *klp_find_ops(void *old_func)
static void notrace klp_ftrace_handler(unsigned long ip,
unsigned long parent_ip,
struct ftrace_ops *fops,
- struct pt_regs *regs)
+ struct ftrace_regs *fregs)
{
struct klp_ops *ops;
struct klp_func *func;
int patch_state;
+ int bit;
ops = container_of(fops, struct klp_ops, fops);
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (WARN_ON_ONCE(bit < 0))
+ return;
/*
* A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
@@ -113,10 +117,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
if (func->nop)
goto unlock;
- klp_arch_set_pc(regs, (unsigned long)func->new_func);
+ klp_arch_set_pc(fregs, (unsigned long)func->new_func);
unlock:
preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
/*
@@ -194,8 +199,10 @@ static int klp_patch_func(struct klp_func *func)
return -ENOMEM;
ops->fops.func = klp_ftrace_handler;
- ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
- FTRACE_OPS_FL_DYNAMIC |
+ ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+ FTRACE_OPS_FL_SAVE_REGS |
+#endif
FTRACE_OPS_FL_IPMODIFY |
FTRACE_OPS_FL_PERMANENT;
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 6d11cfb9b41f..8838f1d7c4a2 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -15,6 +15,7 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif
+obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
diff --git a/kernel/locking/irqflag-debug.c b/kernel/locking/irqflag-debug.c
new file mode 100644
index 000000000000..810b50344d35
--- /dev/null
+++ b/kernel/locking/irqflag-debug.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/irqflags.h>
+
+noinstr void warn_bogus_irq_restore(void)
+{
+ instrumentation_begin();
+ WARN_ONCE(1, "raw_local_irq_restore() called with IRQs enabled\n");
+ instrumentation_end();
+}
+EXPORT_SYMBOL(warn_bogus_irq_restore);
diff --git a/kernel/locking/lock_events_list.h b/kernel/locking/lock_events_list.h
index 239039d0ce21..97fb6f3f840a 100644
--- a/kernel/locking/lock_events_list.h
+++ b/kernel/locking/lock_events_list.h
@@ -56,13 +56,11 @@ LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */
LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */
LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */
LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */
-LOCK_EVENT(rwsem_opt_rlock) /* # of opt-acquired read locks */
-LOCK_EVENT(rwsem_opt_wlock) /* # of opt-acquired write locks */
+LOCK_EVENT(rwsem_opt_lock) /* # of opt-acquired write locks */
LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */
LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */
-LOCK_EVENT(rwsem_opt_norspin) /* # of disabled reader-only optspins */
-LOCK_EVENT(rwsem_opt_rlock2) /* # of opt-acquired 2ndary read locks */
LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
+LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */
LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3e99dfef8408..c6d0c1dc6253 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -79,12 +79,12 @@ module_param(lock_stat, int, 0644);
DEFINE_PER_CPU(unsigned int, lockdep_recursion);
EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
-static inline bool lockdep_enabled(void)
+static __always_inline bool lockdep_enabled(void)
{
if (!debug_locks)
return false;
- if (raw_cpu_read(lockdep_recursion))
+ if (this_cpu_read(lockdep_recursion))
return false;
if (current->lockdep_recursion)
@@ -108,19 +108,21 @@ static inline void lockdep_lock(void)
{
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ __this_cpu_inc(lockdep_recursion);
arch_spin_lock(&__lock);
__owner = current;
- __this_cpu_inc(lockdep_recursion);
}
static inline void lockdep_unlock(void)
{
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
return;
- __this_cpu_dec(lockdep_recursion);
__owner = NULL;
arch_spin_unlock(&__lock);
+ __this_cpu_dec(lockdep_recursion);
}
static inline bool lockdep_assert_locked(void)
@@ -1288,6 +1290,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class->name_version = count_matching_names(class);
class->wait_type_inner = lock->wait_type_inner;
class->wait_type_outer = lock->wait_type_outer;
+ class->lock_type = lock->lock_type;
/*
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
@@ -1669,6 +1672,7 @@ static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
static enum bfs_result __bfs(struct lock_list *source_entry,
void *data,
bool (*match)(struct lock_list *entry, void *data),
+ bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
int offset)
{
@@ -1729,7 +1733,12 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
/*
* Step 3: we haven't visited this and there is a strong
* dependency path to this, so check with @match.
+ * If @skip is provide and returns true, we skip this
+ * lock (and any path this lock is in).
*/
+ if (skip && skip(lock, data))
+ continue;
+
if (match(lock, data)) {
*target_entry = lock;
return BFS_RMATCH;
@@ -1772,9 +1781,10 @@ static inline enum bfs_result
__bfs_forwards(struct lock_list *src_entry,
void *data,
bool (*match)(struct lock_list *entry, void *data),
+ bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry,
+ return __bfs(src_entry, data, match, skip, target_entry,
offsetof(struct lock_class, locks_after));
}
@@ -1783,9 +1793,10 @@ static inline enum bfs_result
__bfs_backwards(struct lock_list *src_entry,
void *data,
bool (*match)(struct lock_list *entry, void *data),
+ bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry,
+ return __bfs(src_entry, data, match, skip, target_entry,
offsetof(struct lock_class, locks_before));
}
@@ -2016,7 +2027,7 @@ static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
unsigned long count = 0;
struct lock_list *target_entry;
- __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
+ __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
return count;
}
@@ -2041,7 +2052,7 @@ static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
unsigned long count = 0;
struct lock_list *target_entry;
- __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
+ __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
return count;
}
@@ -2069,11 +2080,12 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
static noinline enum bfs_result
check_path(struct held_lock *target, struct lock_list *src_entry,
bool (*match)(struct lock_list *entry, void *data),
+ bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
enum bfs_result ret;
- ret = __bfs_forwards(src_entry, target, match, target_entry);
+ ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
if (unlikely(bfs_error(ret)))
print_bfs_bug(ret);
@@ -2100,7 +2112,7 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
debug_atomic_inc(nr_cyclic_checks);
- ret = check_path(target, &src_entry, hlock_conflict, &target_entry);
+ ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
if (unlikely(ret == BFS_RMATCH)) {
if (!*trace) {
@@ -2118,46 +2130,6 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
return ret;
}
-#ifdef CONFIG_LOCKDEP_SMALL
-/*
- * Check that the dependency graph starting at <src> can lead to
- * <target> or not. If it can, <src> -> <target> dependency is already
- * in the graph.
- *
- * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
- * any error appears in the bfs search.
- */
-static noinline enum bfs_result
-check_redundant(struct held_lock *src, struct held_lock *target)
-{
- enum bfs_result ret;
- struct lock_list *target_entry;
- struct lock_list src_entry;
-
- bfs_init_root(&src_entry, src);
- /*
- * Special setup for check_redundant().
- *
- * To report redundant, we need to find a strong dependency path that
- * is equal to or stronger than <src> -> <target>. So if <src> is E,
- * we need to let __bfs() only search for a path starting at a -(E*)->,
- * we achieve this by setting the initial node's ->only_xr to true in
- * that case. And if <prev> is S, we set initial ->only_xr to false
- * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
- */
- src_entry.only_xr = src->read == 0;
-
- debug_atomic_inc(nr_redundant_checks);
-
- ret = check_path(target, &src_entry, hlock_equal, &target_entry);
-
- if (ret == BFS_RMATCH)
- debug_atomic_inc(nr_redundant);
-
- return ret;
-}
-#endif
-
#ifdef CONFIG_TRACE_IRQFLAGS
/*
@@ -2228,6 +2200,44 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
}
+static inline bool usage_skip(struct lock_list *entry, void *mask)
+{
+ /*
+ * Skip local_lock() for irq inversion detection.
+ *
+ * For !RT, local_lock() is not a real lock, so it won't carry any
+ * dependency.
+ *
+ * For RT, an irq inversion happens when we have lock A and B, and on
+ * some CPU we can have:
+ *
+ * lock(A);
+ * <interrupted>
+ * lock(B);
+ *
+ * where lock(B) cannot sleep, and we have a dependency B -> ... -> A.
+ *
+ * Now we prove local_lock() cannot exist in that dependency. First we
+ * have the observation for any lock chain L1 -> ... -> Ln, for any
+ * 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise
+ * wait context check will complain. And since B is not a sleep lock,
+ * therefore B.inner_wait_type >= 2, and since the inner_wait_type of
+ * local_lock() is 3, which is greater than 2, therefore there is no
+ * way the local_lock() exists in the dependency B -> ... -> A.
+ *
+ * As a result, we will skip local_lock(), when we search for irq
+ * inversion bugs.
+ */
+ if (entry->class->lock_type == LD_LOCK_PERCPU) {
+ if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
+ return false;
+
+ return true;
+ }
+
+ return false;
+}
+
/*
* Find a node in the forwards-direction dependency sub-graph starting
* at @root->class that matches @bit.
@@ -2243,7 +2253,7 @@ find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
+ result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
return result;
}
@@ -2260,7 +2270,7 @@ find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
debug_atomic_inc(nr_find_usage_backwards_checks);
- result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
+ result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
return result;
}
@@ -2625,7 +2635,7 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
*/
bfs_init_rootb(&this, prev);
- ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
+ ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
if (bfs_error(ret)) {
print_bfs_bug(ret);
return 0;
@@ -2692,8 +2702,68 @@ static inline int check_irq_usage(struct task_struct *curr,
{
return 1;
}
+
+static inline bool usage_skip(struct lock_list *entry, void *mask)
+{
+ return false;
+}
+
#endif /* CONFIG_TRACE_IRQFLAGS */
+#ifdef CONFIG_LOCKDEP_SMALL
+/*
+ * Check that the dependency graph starting at <src> can lead to
+ * <target> or not. If it can, <src> -> <target> dependency is already
+ * in the graph.
+ *
+ * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
+ * any error appears in the bfs search.
+ */
+static noinline enum bfs_result
+check_redundant(struct held_lock *src, struct held_lock *target)
+{
+ enum bfs_result ret;
+ struct lock_list *target_entry;
+ struct lock_list src_entry;
+
+ bfs_init_root(&src_entry, src);
+ /*
+ * Special setup for check_redundant().
+ *
+ * To report redundant, we need to find a strong dependency path that
+ * is equal to or stronger than <src> -> <target>. So if <src> is E,
+ * we need to let __bfs() only search for a path starting at a -(E*)->,
+ * we achieve this by setting the initial node's ->only_xr to true in
+ * that case. And if <prev> is S, we set initial ->only_xr to false
+ * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
+ */
+ src_entry.only_xr = src->read == 0;
+
+ debug_atomic_inc(nr_redundant_checks);
+
+ /*
+ * Note: we skip local_lock() for redundant check, because as the
+ * comment in usage_skip(), A -> local_lock() -> B and A -> B are not
+ * the same.
+ */
+ ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
+
+ if (ret == BFS_RMATCH)
+ debug_atomic_inc(nr_redundant);
+
+ return ret;
+}
+
+#else
+
+static inline enum bfs_result
+check_redundant(struct held_lock *src, struct held_lock *target)
+{
+ return BFS_RNOMATCH;
+}
+
+#endif
+
static void inc_chains(int irq_context)
{
if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
@@ -2765,7 +2835,9 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
*
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
+ * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
+ * lock class is held but nest_lock is also held, i.e. we rely on the
+ * nest_lock to avoid the deadlock.
*/
static int
check_deadlock(struct task_struct *curr, struct held_lock *next)
@@ -2788,7 +2860,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
if ((next->read == 2) && prev->read)
- return 2;
+ continue;
/*
* We're holding the nest_lock, which serializes this lock's
@@ -2912,7 +2984,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
}
}
-#ifdef CONFIG_LOCKDEP_SMALL
/*
* Is the <prev> -> <next> link redundant?
*/
@@ -2921,7 +2992,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
return 0;
else if (ret == BFS_RMATCH)
return 2;
-#endif
if (!*trace) {
*trace = save_trace();
@@ -3593,15 +3663,12 @@ static int validate_chain(struct task_struct *curr,
if (!ret)
return 0;
/*
- * Mark recursive read, as we jump over it when
- * building dependencies (just like we jump over
- * trylock entries):
- */
- if (ret == 2)
- hlock->read = 2;
- /*
* Add dependency only if this lock is not the head
- * of the chain, and if it's not a secondary read-lock:
+ * of the chain, and if the new lock introduces no more
+ * lock dependency (because we already hold a lock with the
+ * same lock class) nor deadlock (because the nest_lock
+ * serializes nesting locks), see the comments for
+ * check_deadlock().
*/
if (!chain_head && ret != 2) {
if (!check_prevs_add(curr, hlock))
@@ -3706,7 +3773,7 @@ static void
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ if (!debug_locks_off() || debug_locks_silent)
return;
pr_warn("\n");
@@ -3747,6 +3814,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
+ graph_unlock();
print_usage_bug(curr, this, bad_bit, new_bit);
return 0;
}
@@ -4057,7 +4125,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
if (unlikely(in_nmi()))
return;
- if (unlikely(__this_cpu_read(lockdep_recursion)))
+ if (unlikely(this_cpu_read(lockdep_recursion)))
return;
if (unlikely(lockdep_hardirqs_enabled())) {
@@ -4126,7 +4194,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
goto skip_checks;
}
- if (unlikely(__this_cpu_read(lockdep_recursion)))
+ if (unlikely(this_cpu_read(lockdep_recursion)))
return;
if (lockdep_hardirqs_enabled()) {
@@ -4396,6 +4464,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
if (unlikely(hlock_class(this)->usage_mask & new_mask))
goto unlock;
+ if (!hlock_class(this)->usage_mask)
+ debug_atomic_dec(nr_unused_locks);
+
hlock_class(this)->usage_mask |= new_mask;
if (new_bit < LOCK_TRACE_STATES) {
@@ -4403,19 +4474,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
}
- switch (new_bit) {
- case 0 ... LOCK_USED-1:
+ if (new_bit < LOCK_USED) {
ret = mark_lock_irq(curr, this, new_bit);
if (!ret)
return 0;
- break;
-
- case LOCK_USED:
- debug_atomic_dec(nr_unused_locks);
- break;
-
- default:
- break;
}
unlock:
@@ -4508,9 +4570,9 @@ print_lock_invalid_wait_context(struct task_struct *curr,
*/
static int check_wait_context(struct task_struct *curr, struct held_lock *next)
{
- short next_inner = hlock_class(next)->wait_type_inner;
- short next_outer = hlock_class(next)->wait_type_outer;
- short curr_inner;
+ u8 next_inner = hlock_class(next)->wait_type_inner;
+ u8 next_outer = hlock_class(next)->wait_type_outer;
+ u8 curr_inner;
int depth;
if (!curr->lockdep_depth || !next_inner || next->trylock)
@@ -4533,7 +4595,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
for (; depth < curr->lockdep_depth; depth++) {
struct held_lock *prev = curr->held_locks + depth;
- short prev_inner = hlock_class(prev)->wait_type_inner;
+ u8 prev_inner = hlock_class(prev)->wait_type_inner;
if (prev_inner) {
/*
@@ -4582,9 +4644,9 @@ static inline int check_wait_context(struct task_struct *curr,
/*
* Initialize a lock instance's lock-class mapping info:
*/
-void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass,
- short inner, short outer)
+ u8 inner, u8 outer, u8 lock_type)
{
int i;
@@ -4607,6 +4669,7 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
lock->wait_type_outer = outer;
lock->wait_type_inner = inner;
+ lock->lock_type = lock_type;
/*
* No key, no joy, we need to hash something.
@@ -4641,7 +4704,7 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
raw_local_irq_restore(flags);
}
}
-EXPORT_SYMBOL_GPL(lockdep_init_map_waits);
+EXPORT_SYMBOL_GPL(lockdep_init_map_type);
struct lock_class_key __lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
@@ -5276,12 +5339,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
/*
* Check whether we follow the irq-flags state precisely:
*/
-static void check_flags(unsigned long flags)
+static noinstr void check_flags(unsigned long flags)
{
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
if (!debug_locks)
return;
+ /* Get the warning out.. */
+ instrumentation_begin();
+
if (irqs_disabled_flags(flags)) {
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
printk("possible reason: unannotated irqs-off.\n");
@@ -5309,6 +5375,8 @@ static void check_flags(unsigned long flags)
if (!debug_locks)
print_irqtrace_events(current);
+
+ instrumentation_end();
#endif
}
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 62d215b2e39f..0ab94e1f1276 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -27,8 +27,8 @@
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/percpu-rwsem.h>
#include <linux/torture.h>
+#include <linux/reboot.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
@@ -60,6 +60,7 @@ static struct task_struct **reader_tasks;
static bool lock_is_write_held;
static bool lock_is_read_held;
+static unsigned long last_lock_release;
struct lock_stress_stats {
long n_lock_fail;
@@ -74,6 +75,7 @@ static void lock_torture_cleanup(void);
*/
struct lock_torture_ops {
void (*init)(void);
+ void (*exit)(void);
int (*writelock)(void);
void (*write_delay)(struct torture_random_state *trsp);
void (*task_boost)(struct torture_random_state *trsp);
@@ -90,12 +92,13 @@ struct lock_torture_cxt {
int nrealwriters_stress;
int nrealreaders_stress;
bool debug_lock;
+ bool init_called;
atomic_t n_lock_torture_errors;
struct lock_torture_ops *cur_ops;
struct lock_stress_stats *lwsa; /* writer statistics */
struct lock_stress_stats *lrsa; /* reader statistics */
};
-static struct lock_torture_cxt cxt = { 0, 0, false,
+static struct lock_torture_cxt cxt = { 0, 0, false, false,
ATOMIC_INIT(0),
NULL, NULL};
/*
@@ -571,6 +574,11 @@ static void torture_percpu_rwsem_init(void)
BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
}
+static void torture_percpu_rwsem_exit(void)
+{
+ percpu_free_rwsem(&pcpu_rwsem);
+}
+
static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
{
percpu_down_write(&pcpu_rwsem);
@@ -595,6 +603,7 @@ static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
static struct lock_torture_ops percpu_rwsem_lock_ops = {
.init = torture_percpu_rwsem_init,
+ .exit = torture_percpu_rwsem_exit,
.writelock = torture_percpu_rwsem_down_write,
.write_delay = torture_rwsem_write_delay,
.task_boost = torture_boost_dummy,
@@ -632,6 +641,7 @@ static int lock_torture_writer(void *arg)
lwsp->n_lock_acquired++;
cxt.cur_ops->write_delay(&rand);
lock_is_write_held = false;
+ WRITE_ONCE(last_lock_release, jiffies);
cxt.cur_ops->writeunlock();
stutter_wait("lock_torture_writer");
@@ -786,9 +796,10 @@ static void lock_torture_cleanup(void)
/*
* Indicates early cleanup, meaning that the test has not run,
- * such as when passing bogus args when loading the module. As
- * such, only perform the underlying torture-specific cleanups,
- * and avoid anything related to locktorture.
+ * such as when passing bogus args when loading the module.
+ * However cxt->cur_ops.init() may have been invoked, so beside
+ * perform the underlying torture-specific cleanups, cur_ops.exit()
+ * will be invoked if needed.
*/
if (!cxt.lwsa && !cxt.lrsa)
goto end;
@@ -828,6 +839,11 @@ static void lock_torture_cleanup(void)
cxt.lrsa = NULL;
end:
+ if (cxt.init_called) {
+ if (cxt.cur_ops->exit)
+ cxt.cur_ops->exit();
+ cxt.init_called = false;
+ }
torture_cleanup_end();
}
@@ -868,14 +884,17 @@ static int __init lock_torture_init(void)
goto unwind;
}
- if (nwriters_stress == 0 && nreaders_stress == 0) {
+ if (nwriters_stress == 0 &&
+ (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
pr_alert("lock-torture: must run at least one locking thread\n");
firsterr = -EINVAL;
goto unwind;
}
- if (cxt.cur_ops->init)
+ if (cxt.cur_ops->init) {
cxt.cur_ops->init();
+ cxt.init_called = true;
+ }
if (nwriters_stress >= 0)
cxt.nrealwriters_stress = nwriters_stress;
@@ -1038,6 +1057,10 @@ static int __init lock_torture_init(void)
unwind:
torture_init_end();
lock_torture_cleanup();
+ if (shutdown_secs) {
+ WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
+ kernel_power_off();
+ }
return firsterr;
}
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 5352ce50a97e..adb935090768 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -86,16 +86,6 @@ bool mutex_is_locked(struct mutex *lock)
}
EXPORT_SYMBOL(mutex_is_locked);
-__must_check enum mutex_trylock_recursive_enum
-mutex_trylock_recursive(struct mutex *lock)
-{
- if (unlikely(__mutex_owner(lock) == current))
- return MUTEX_TRYLOCK_RECURSIVE;
-
- return mutex_trylock(lock);
-}
-EXPORT_SYMBOL(mutex_trylock_recursive);
-
static inline unsigned long __owner_flags(unsigned long owner)
{
return owner & MUTEX_FLAGS;
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index cfdd5b93264d..03b21135313c 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -267,27 +267,18 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
return 1;
}
+#define __node_2_waiter(node) \
+ rb_entry((node), struct rt_mutex_waiter, tree_entry)
+
+static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
+{
+ return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
+}
+
static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
- struct rb_node **link = &lock->waiters.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct rt_mutex_waiter *entry;
- bool leftmost = true;
-
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
- if (rt_mutex_waiter_less(waiter, entry)) {
- link = &parent->rb_left;
- } else {
- link = &parent->rb_right;
- leftmost = false;
- }
- }
-
- rb_link_node(&waiter->tree_entry, parent, link);
- rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
+ rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
}
static void
@@ -300,27 +291,18 @@ rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
RB_CLEAR_NODE(&waiter->tree_entry);
}
+#define __node_2_pi_waiter(node) \
+ rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
+
+static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
+{
+ return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
+}
+
static void
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
- struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct rt_mutex_waiter *entry;
- bool leftmost = true;
-
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
- if (rt_mutex_waiter_less(waiter, entry)) {
- link = &parent->rb_left;
- } else {
- link = &parent->rb_right;
- leftmost = false;
- }
- }
-
- rb_link_node(&waiter->pi_tree_entry, parent, link);
- rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost);
+ rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
}
static void
@@ -1604,8 +1586,11 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/**
- * Futex variant, that since futex variants do not use the fast-path, can be
- * simple and will not need to retry.
+ * __rt_mutex_futex_unlock - Futex variant, that since futex variants
+ * do not use the fast-path, can be simple and will not need to retry.
+ *
+ * @lock: The rt_mutex to be unlocked
+ * @wake_q: The wake queue head from which to get the next lock waiter
*/
bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
struct wake_q_head *wake_q)
@@ -1662,13 +1647,15 @@ void rt_mutex_destroy(struct rt_mutex *lock)
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
/**
- * __rt_mutex_init - initialize the rt lock
+ * __rt_mutex_init - initialize the rt_mutex
*
- * @lock: the rt lock to be initialized
+ * @lock: The rt_mutex to be initialized
+ * @name: The lock name used for debugging
+ * @key: The lock class key used for debugging
*
- * Initialize the rt lock to unlocked state.
+ * Initialize the rt_mutex to unlocked state.
*
- * Initializing of a locked rt lock is not allowed
+ * Initializing of a locked rt_mutex is not allowed
*/
void __rt_mutex_init(struct rt_mutex *lock, const char *name,
struct lock_class_key *key)
@@ -1716,8 +1703,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
* possible because it belongs to the pi_state which is about to be freed
* and it is not longer visible to other tasks.
*/
-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner)
+void rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index d1d62f942be2..ca6fb489007b 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -133,8 +133,7 @@ enum rtmutex_chainwalk {
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index f11b9bd3431d..ba67600c7b2c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -31,19 +31,13 @@
#include "lock_events.h"
/*
- * The least significant 3 bits of the owner value has the following
+ * The least significant 2 bits of the owner value has the following
* meanings when set.
* - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
- * - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock.
- * - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock.
+ * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
*
- * When the rwsem is either owned by an anonymous writer, or it is
- * reader-owned, but a spinning writer has timed out, both nonspinnable
- * bits will be set to disable optimistic spinning by readers and writers.
- * In the later case, the last unlocking reader should then check the
- * writer nonspinnable bit and clear it only to give writers preference
- * to acquire the lock via optimistic spinning, but not readers. Similar
- * action is also done in the reader slowpath.
+ * When the rwsem is reader-owned and a spinning writer has timed out,
+ * the nonspinnable bit will be set to disable optimistic spinning.
* When a writer acquires a rwsem, it puts its task_struct pointer
* into the owner field. It is cleared after an unlock.
@@ -59,46 +53,14 @@
* is involved. Ideally we would like to track all the readers that own
* a rwsem, but the overhead is simply too big.
*
- * Reader optimistic spinning is helpful when the reader critical section
- * is short and there aren't that many readers around. It makes readers
- * relatively more preferred than writers. When a writer times out spinning
- * on a reader-owned lock and set the nospinnable bits, there are two main
- * reasons for that.
- *
- * 1) The reader critical section is long, perhaps the task sleeps after
- * acquiring the read lock.
- * 2) There are just too many readers contending the lock causing it to
- * take a while to service all of them.
- *
- * In the former case, long reader critical section will impede the progress
- * of writers which is usually more important for system performance. In
- * the later case, reader optimistic spinning tends to make the reader
- * groups that contain readers that acquire the lock together smaller
- * leading to more of them. That may hurt performance in some cases. In
- * other words, the setting of nonspinnable bits indicates that reader
- * optimistic spinning may not be helpful for those workloads that cause
- * it.
- *
- * Therefore, any writers that had observed the setting of the writer
- * nonspinnable bit for a given rwsem after they fail to acquire the lock
- * via optimistic spinning will set the reader nonspinnable bit once they
- * acquire the write lock. Similarly, readers that observe the setting
- * of reader nonspinnable bit at slowpath entry will set the reader
- * nonspinnable bits when they acquire the read lock via the wakeup path.
- *
- * Once the reader nonspinnable bit is on, it will only be reset when
- * a writer is able to acquire the rwsem in the fast path or somehow a
- * reader or writer in the slowpath doesn't observe the nonspinable bit.
- *
- * This is to discourage reader optmistic spinning on that particular
- * rwsem and make writers more preferred. This adaptive disabling of reader
- * optimistic spinning will alleviate the negative side effect of this
- * feature.
+ * A fast path reader optimistic lock stealing is supported when the rwsem
+ * is previously owned by a writer and the following conditions are met:
+ * - OSQ is empty
+ * - rwsem is not currently writer owned
+ * - the handoff isn't set.
*/
#define RWSEM_READER_OWNED (1UL << 0)
-#define RWSEM_RD_NONSPINNABLE (1UL << 1)
-#define RWSEM_WR_NONSPINNABLE (1UL << 2)
-#define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
+#define RWSEM_NONSPINNABLE (1UL << 1)
#define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
#ifdef CONFIG_DEBUG_RWSEMS
@@ -203,7 +165,7 @@ static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
struct task_struct *owner)
{
unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
- (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
+ (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
atomic_long_set(&sem->owner, val);
}
@@ -270,12 +232,31 @@ static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
owner | RWSEM_NONSPINNABLE));
}
-static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
+static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
{
- long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
- if (WARN_ON_ONCE(cnt < 0))
+ *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
+
+ if (WARN_ON_ONCE(*cntp < 0))
rwsem_set_nonspinnable(sem);
- return !(cnt & RWSEM_READ_FAILED_MASK);
+
+ if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
+ rwsem_set_reader_owned(sem);
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
+{
+ long tmp = RWSEM_UNLOCKED_VALUE;
+
+ if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
+ rwsem_set_owner(sem);
+ return true;
+ }
+
+ return false;
}
/*
@@ -353,7 +334,6 @@ struct rwsem_waiter {
struct task_struct *task;
enum rwsem_waiter_type type;
unsigned long timeout;
- unsigned long last_rowner;
};
#define rwsem_first_waiter(sem) \
list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
@@ -467,10 +447,6 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
* the reader is copied over.
*/
owner = waiter->task;
- if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) {
- owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE);
- lockevent_inc(rwsem_opt_norspin);
- }
__rwsem_set_reader_owned(sem, owner);
}
@@ -602,30 +578,6 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
- * Try to acquire read lock before the reader is put on wait queue.
- * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff
- * is ongoing.
- */
-static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
-{
- long count = atomic_long_read(&sem->count);
-
- if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
- return false;
-
- count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
- if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
- rwsem_set_reader_owned(sem);
- lockevent_inc(rwsem_opt_rlock);
- return true;
- }
-
- /* Back out the change */
- atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
- return false;
-}
-
-/*
* Try to acquire write lock before the writer has been put on wait queue.
*/
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
@@ -636,7 +588,7 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
count | RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
- lockevent_inc(rwsem_opt_wlock);
+ lockevent_inc(rwsem_opt_lock);
return true;
}
}
@@ -652,8 +604,7 @@ static inline bool owner_on_cpu(struct task_struct *owner)
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
}
-static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
- unsigned long nonspinnable)
+static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner;
unsigned long flags;
@@ -670,7 +621,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
/*
* Don't check the read-owner as the entry may be stale.
*/
- if ((flags & nonspinnable) ||
+ if ((flags & RWSEM_NONSPINNABLE) ||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
ret = false;
rcu_read_unlock();
@@ -700,9 +651,9 @@ enum owner_state {
#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
static inline enum owner_state
-rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
+rwsem_owner_state(struct task_struct *owner, unsigned long flags)
{
- if (flags & nonspinnable)
+ if (flags & RWSEM_NONSPINNABLE)
return OWNER_NONSPINNABLE;
if (flags & RWSEM_READER_OWNED)
@@ -712,14 +663,14 @@ rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long
}
static noinline enum owner_state
-rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
+rwsem_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *new, *owner;
unsigned long flags, new_flags;
enum owner_state state;
owner = rwsem_owner_flags(sem, &flags);
- state = rwsem_owner_state(owner, flags, nonspinnable);
+ state = rwsem_owner_state(owner, flags);
if (state != OWNER_WRITER)
return state;
@@ -733,7 +684,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
*/
new = rwsem_owner_flags(sem, &new_flags);
if ((new != owner) || (new_flags != flags)) {
- state = rwsem_owner_state(new, new_flags, nonspinnable);
+ state = rwsem_owner_state(new, new_flags);
break;
}
@@ -782,14 +733,12 @@ static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
return sched_clock() + delta;
}
-static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
+static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
bool taken = false;
int prev_owner_state = OWNER_NULL;
int loop = 0;
u64 rspin_threshold = 0;
- unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
- : RWSEM_RD_NONSPINNABLE;
preempt_disable();
@@ -806,15 +755,14 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
for (;;) {
enum owner_state owner_state;
- owner_state = rwsem_spin_on_owner(sem, nonspinnable);
+ owner_state = rwsem_spin_on_owner(sem);
if (!(owner_state & OWNER_SPINNABLE))
break;
/*
* Try to acquire the lock
*/
- taken = wlock ? rwsem_try_write_lock_unqueued(sem)
- : rwsem_try_read_lock_unqueued(sem);
+ taken = rwsem_try_write_lock_unqueued(sem);
if (taken)
break;
@@ -822,7 +770,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
/*
* Time-based reader-owned rwsem optimistic spinning
*/
- if (wlock && (owner_state == OWNER_READER)) {
+ if (owner_state == OWNER_READER) {
/*
* Re-initialize rspin_threshold every time when
* the owner state changes from non-reader to reader.
@@ -831,7 +779,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
* the beginning of the 2nd reader phase.
*/
if (prev_owner_state != OWNER_READER) {
- if (rwsem_test_oflags(sem, nonspinnable))
+ if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
break;
rspin_threshold = rwsem_rspin_threshold(sem);
loop = 0;
@@ -907,78 +855,30 @@ done:
}
/*
- * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should
+ * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
* only be called when the reader count reaches 0.
- *
- * This give writers better chance to acquire the rwsem first before
- * readers when the rwsem was being held by readers for a relatively long
- * period of time. Race can happen that an optimistic spinner may have
- * just stolen the rwsem and set the owner, but just clearing the
- * RWSEM_WR_NONSPINNABLE bit will do no harm anyway.
*/
-static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
+static inline void clear_nonspinnable(struct rw_semaphore *sem)
{
- if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
- atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
+ if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
+ atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
}
-/*
- * This function is called when the reader fails to acquire the lock via
- * optimistic spinning. In this case we will still attempt to do a trylock
- * when comparing the rwsem state right now with the state when entering
- * the slowpath indicates that the reader is still in a valid reader phase.
- * This happens when the following conditions are true:
- *
- * 1) The lock is currently reader owned, and
- * 2) The lock is previously not reader-owned or the last read owner changes.
- *
- * In the former case, we have transitioned from a writer phase to a
- * reader-phase while spinning. In the latter case, it means the reader
- * phase hasn't ended when we entered the optimistic spinning loop. In
- * both cases, the reader is eligible to acquire the lock. This is the
- * secondary path where a read lock is acquired optimistically.
- *
- * The reader non-spinnable bit wasn't set at time of entry or it will
- * not be here at all.
- */
-static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
- unsigned long last_rowner)
-{
- unsigned long owner = atomic_long_read(&sem->owner);
-
- if (!(owner & RWSEM_READER_OWNED))
- return false;
-
- if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) &&
- rwsem_try_read_lock_unqueued(sem)) {
- lockevent_inc(rwsem_opt_rlock2);
- lockevent_add(rwsem_opt_fail, -1);
- return true;
- }
- return false;
-}
#else
-static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
- unsigned long nonspinnable)
+static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
return false;
}
-static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
+static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
return false;
}
-static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
-
-static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
- unsigned long last_rowner)
-{
- return false;
-}
+static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
static inline int
-rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
+rwsem_spin_on_owner(struct rw_semaphore *sem)
{
return 0;
}
@@ -989,36 +889,35 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
* Wait for the read lock to be granted
*/
static struct rw_semaphore __sched *
-rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
+rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
{
- long count, adjustment = -RWSEM_READER_BIAS;
+ long adjustment = -RWSEM_READER_BIAS;
+ long rcnt = (count >> RWSEM_READER_SHIFT);
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
bool wake = false;
/*
- * Save the current read-owner of rwsem, if available, and the
- * reader nonspinnable bit.
+ * To prevent a constant stream of readers from starving a sleeping
+ * waiter, don't attempt optimistic lock stealing if the lock is
+ * currently owned by readers.
*/
- waiter.last_rowner = atomic_long_read(&sem->owner);
- if (!(waiter.last_rowner & RWSEM_READER_OWNED))
- waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
-
- if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
+ if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
+ (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
goto queue;
/*
- * Undo read bias from down_read() and do optimistic spinning.
+ * Reader optimistic lock stealing.
*/
- atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
- adjustment = 0;
- if (rwsem_optimistic_spin(sem, false)) {
- /* rwsem_optimistic_spin() implies ACQUIRE on success */
+ if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
+ rwsem_set_reader_owned(sem);
+ lockevent_inc(rwsem_rlock_steal);
+
/*
- * Wake up other readers in the wait list if the front
- * waiter is a reader.
+ * Wake up other readers in the wait queue if it is
+ * the first reader.
*/
- if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
+ if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
raw_spin_lock_irq(&sem->wait_lock);
if (!list_empty(&sem->wait_list))
rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
@@ -1027,9 +926,6 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
wake_up_q(&wake_q);
}
return sem;
- } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
- /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
- return sem;
}
queue:
@@ -1045,7 +941,7 @@ queue:
* exit the slowpath and return immediately as its
* RWSEM_READER_BIAS has already been set in the count.
*/
- if (adjustment && !(atomic_long_read(&sem->count) &
+ if (!(atomic_long_read(&sem->count) &
(RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
/* Provide lock ACQUIRE */
smp_acquire__after_ctrl_dep();
@@ -1059,10 +955,7 @@ queue:
list_add_tail(&waiter.list, &sem->wait_list);
/* we're now waiting on the lock, but no longer actively locking */
- if (adjustment)
- count = atomic_long_add_return(adjustment, &sem->count);
- else
- count = atomic_long_read(&sem->count);
+ count = atomic_long_add_return(adjustment, &sem->count);
/*
* If there are no active locks, wake the front queued process(es).
@@ -1071,7 +964,7 @@ queue:
* wake our own waiter to join the existing active readers !
*/
if (!(count & RWSEM_LOCK_MASK)) {
- clear_wr_nonspinnable(sem);
+ clear_nonspinnable(sem);
wake = true;
}
if (wake || (!(count & RWSEM_WRITER_MASK) &&
@@ -1117,46 +1010,24 @@ out_nolock:
}
/*
- * This function is called by the a write lock owner. So the owner value
- * won't get changed by others.
- */
-static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
- bool disable)
-{
- if (unlikely(disable)) {
- atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
- lockevent_inc(rwsem_opt_norspin);
- }
-}
-
-/*
* Wait until we successfully acquire the write lock
*/
static struct rw_semaphore *
rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
{
long count;
- bool disable_rspin;
enum writer_wait_state wstate;
struct rwsem_waiter waiter;
struct rw_semaphore *ret = sem;
DEFINE_WAKE_Q(wake_q);
/* do optimistic spinning and steal lock if possible */
- if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
- rwsem_optimistic_spin(sem, true)) {
+ if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
/* rwsem_optimistic_spin() implies ACQUIRE on success */
return sem;
}
/*
- * Disable reader optimistic spinning for this rwsem after
- * acquiring the write lock when the setting of the nonspinnable
- * bits are observed.
- */
- disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
-
- /*
* Optimistic spinning failed, proceed to the slowpath
* and block until we can acquire the sem.
*/
@@ -1224,7 +1095,7 @@ wait:
* without sleeping.
*/
if (wstate == WRITER_HANDOFF &&
- rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
+ rwsem_spin_on_owner(sem) == OWNER_NULL)
goto trylock_again;
/* Block until there are no active lockers. */
@@ -1266,7 +1137,6 @@ trylock_again:
}
__set_current_state(TASK_RUNNING);
list_del(&waiter.list);
- rwsem_disable_reader_optspin(sem, disable_rspin);
raw_spin_unlock_irq(&sem->wait_lock);
lockevent_inc(rwsem_wlock);
@@ -1335,26 +1205,31 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
/*
* lock for reading
*/
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int __down_read_common(struct rw_semaphore *sem, int state)
{
- if (!rwsem_read_trylock(sem)) {
- rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
+ long count;
+
+ if (!rwsem_read_trylock(sem, &count)) {
+ if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
+ return -EINTR;
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
- } else {
- rwsem_set_reader_owned(sem);
}
+ return 0;
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ __down_read_common(sem, TASK_UNINTERRUPTIBLE);
+}
+
+static inline int __down_read_interruptible(struct rw_semaphore *sem)
+{
+ return __down_read_common(sem, TASK_INTERRUPTIBLE);
}
static inline int __down_read_killable(struct rw_semaphore *sem)
{
- if (!rwsem_read_trylock(sem)) {
- if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
- return -EINTR;
- DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
- } else {
- rwsem_set_reader_owned(sem);
- }
- return 0;
+ return __down_read_common(sem, TASK_KILLABLE);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
@@ -1380,44 +1255,30 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write(struct rw_semaphore *sem)
+static inline int __down_write_common(struct rw_semaphore *sem, int state)
{
- long tmp = RWSEM_UNLOCKED_VALUE;
+ if (unlikely(!rwsem_write_trylock(sem))) {
+ if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
+ return -EINTR;
+ }
- if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
- RWSEM_WRITER_LOCKED)))
- rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
- else
- rwsem_set_owner(sem);
+ return 0;
}
-static inline int __down_write_killable(struct rw_semaphore *sem)
+static inline void __down_write(struct rw_semaphore *sem)
{
- long tmp = RWSEM_UNLOCKED_VALUE;
+ __down_write_common(sem, TASK_UNINTERRUPTIBLE);
+}
- if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
- RWSEM_WRITER_LOCKED))) {
- if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
- return -EINTR;
- } else {
- rwsem_set_owner(sem);
- }
- return 0;
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+ return __down_write_common(sem, TASK_KILLABLE);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- long tmp;
-
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
-
- tmp = RWSEM_UNLOCKED_VALUE;
- if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
- RWSEM_WRITER_LOCKED)) {
- rwsem_set_owner(sem);
- return true;
- }
- return false;
+ return rwsem_write_trylock(sem);
}
/*
@@ -1435,7 +1296,7 @@ static inline void __up_read(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
RWSEM_FLAG_WAITERS)) {
- clear_wr_nonspinnable(sem);
+ clear_nonspinnable(sem);
rwsem_wake(sem, tmp);
}
}
@@ -1495,6 +1356,20 @@ void __sched down_read(struct rw_semaphore *sem)
}
EXPORT_SYMBOL(down_read);
+int __sched down_read_interruptible(struct rw_semaphore *sem)
+{
+ might_sleep();
+ rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
+
+ if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
+ rwsem_release(&sem->dep_map, _RET_IP_);
+ return -EINTR;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(down_read_interruptible);
+
int __sched down_read_killable(struct rw_semaphore *sem)
{
might_sleep();
@@ -1605,6 +1480,20 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
}
EXPORT_SYMBOL(down_read_nested);
+int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
+{
+ might_sleep();
+ rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
+
+ if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
+ rwsem_release(&sem->dep_map, _RET_IP_);
+ return -EINTR;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(down_read_killable_nested);
+
void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
{
might_sleep();
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/kernel/locking/rwsem.h
+++ /dev/null
diff --git a/kernel/module.c b/kernel/module.c
index a4fa44a652a7..4bf30e4b3eaa 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- Copyright (C) 2002 Richard Henderson
- Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
-
-*/
+ * Copyright (C) 2002 Richard Henderson
+ * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
+ */
#define INCLUDE_VERMAGIC
@@ -86,7 +85,8 @@
* 1) List of modules (also safely readable with preempt_disable),
* 2) module_use links,
* 3) module_addr_min/module_addr_max.
- * (delete and add uses RCU list operations). */
+ * (delete and add uses RCU list operations).
+ */
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
@@ -380,6 +380,35 @@ static void *section_objs(const struct load_info *info,
return (void *)info->sechdrs[sec].sh_addr;
}
+/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */
+static unsigned int find_any_sec(const struct load_info *info, const char *name)
+{
+ unsigned int i;
+
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ Elf_Shdr *shdr = &info->sechdrs[i];
+ if (strcmp(info->secstrings + shdr->sh_name, name) == 0)
+ return i;
+ }
+ return 0;
+}
+
+/*
+ * Find a module section, or NULL. Fill in number of "objects" in section.
+ * Ignores SHF_ALLOC flag.
+ */
+static __maybe_unused void *any_section_objs(const struct load_info *info,
+ const char *name,
+ size_t object_size,
+ unsigned int *num)
+{
+ unsigned int sec = find_any_sec(info, name);
+
+ /* Section 0 has sh_addr 0 and sh_size 0. */
+ *num = info->sechdrs[sec].sh_size / object_size;
+ return (void *)info->sechdrs[sec].sh_addr;
+}
+
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
@@ -586,8 +615,10 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms,
return false;
}
-/* Find an exported symbol and return it, along with, (optional) crc and
- * (optional) module which owns it. Needs preempt disabled or module_mutex. */
+/*
+ * Find an exported symbol and return it, along with, (optional) crc and
+ * (optional) module which owns it. Needs preempt disabled or module_mutex.
+ */
static const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const s32 **crc,
@@ -727,13 +758,12 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
}
/**
- * is_module_percpu_address - test whether address is from module static percpu
+ * is_module_percpu_address() - test whether address is from module static percpu
* @addr: address to test
*
* Test whether @addr belongs to module static percpu area.
*
- * RETURNS:
- * %true if @addr is from module static percpu area
+ * Return: %true if @addr is from module static percpu area
*/
bool is_module_percpu_address(unsigned long addr)
{
@@ -957,11 +987,10 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
}
/**
- * module_refcount - return the refcount or -1 if unloading
- *
+ * module_refcount() - return the refcount or -1 if unloading
* @mod: the module we're checking
*
- * Returns:
+ * Return:
* -1 if the module is in the process of unloading
* otherwise the number of references in the kernel to the module
*/
@@ -1646,8 +1675,10 @@ static void remove_sect_attrs(struct module *mod)
if (mod->sect_attrs) {
sysfs_remove_group(&mod->mkobj.kobj,
&mod->sect_attrs->grp);
- /* We are positive that no one is using any sect attrs
- * at this point. Deallocate immediately. */
+ /*
+ * We are positive that no one is using any sect attrs
+ * at this point. Deallocate immediately.
+ */
free_sect_attrs(mod->sect_attrs);
mod->sect_attrs = NULL;
}
@@ -1895,7 +1926,6 @@ static int mod_sysfs_init(struct module *mod)
if (err)
mod_kobject_put(mod);
- /* delay uevent until full sysfs population */
out:
return err;
}
@@ -1932,7 +1962,6 @@ static int mod_sysfs_setup(struct module *mod,
add_sect_attrs(mod, info);
add_notes_attrs(mod, info);
- kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
return 0;
out_unreg_modinfo_attrs:
@@ -2218,8 +2247,10 @@ static void free_module(struct module *mod)
mod_sysfs_teardown(mod);
- /* We leave it in list to prevent duplicate loads, but make sure
- * that noone uses it while it's being deconstructed. */
+ /*
+ * We leave it in list to prevent duplicate loads, but make sure
+ * that noone uses it while it's being deconstructed.
+ */
mutex_lock(&module_mutex);
mod->state = MODULE_STATE_UNFORMED;
mutex_unlock(&module_mutex);
@@ -2336,8 +2367,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
if (!strncmp(name, "__gnu_lto", 9))
break;
- /* We compiled with -fno-common. These are not
- supposed to happen. */
+ /*
+ * We compiled with -fno-common. These are not
+ * supposed to happen.
+ */
pr_debug("Common symbol: %s\n", name);
pr_warn("%s: please compile with -fno-common\n",
mod->name);
@@ -2440,16 +2473,20 @@ static long get_offset(struct module *mod, unsigned int *size,
return ret;
}
-/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
- might -- code, read-only data, read-write data, small data. Tally
- sizes, and place the offsets into sh_entsize fields: high bit means it
- belongs in init. */
+/*
+ * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
+ * might -- code, read-only data, read-write data, small data. Tally
+ * sizes, and place the offsets into sh_entsize fields: high bit means it
+ * belongs in init.
+ */
static void layout_sections(struct module *mod, struct load_info *info)
{
static unsigned long const masks[][2] = {
- /* NOTE: all executable code must be the first section
+ /*
+ * NOTE: all executable code must be the first section
* in this array; otherwise modify the text_size
- * finder in the two loops below */
+ * finder in the two loops below
+ */
{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
{ SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
@@ -2895,40 +2932,43 @@ static int module_sig_check(struct load_info *info, int flags)
/* We truncate the module to discard the signature */
info->len -= markerlen;
err = mod_verify_sig(mod, info);
+ if (!err) {
+ info->sig_ok = true;
+ return 0;
+ }
}
+ /*
+ * We don't permit modules to be loaded into the trusted kernels
+ * without a valid signature on them, but if we're not enforcing,
+ * certain errors are non-fatal.
+ */
switch (err) {
- case 0:
- info->sig_ok = true;
- return 0;
-
- /* We don't permit modules to be loaded into trusted kernels
- * without a valid signature on them, but if we're not
- * enforcing, certain errors are non-fatal.
- */
case -ENODATA:
- reason = "Loading of unsigned module";
- goto decide;
+ reason = "unsigned module";
+ break;
case -ENOPKG:
- reason = "Loading of module with unsupported crypto";
- goto decide;
+ reason = "module with unsupported crypto";
+ break;
case -ENOKEY:
- reason = "Loading of module with unavailable key";
- decide:
- if (is_module_sig_enforced()) {
- pr_notice("%s: %s is rejected\n", info->name, reason);
- return -EKEYREJECTED;
- }
-
- return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
+ reason = "module with unavailable key";
+ break;
- /* All other errors are fatal, including nomem, unparseable
- * signatures and signature check failures - even if signatures
- * aren't required.
- */
default:
+ /*
+ * All other errors are fatal, including lack of memory,
+ * unparseable signatures, and signature check failures --
+ * even if signatures aren't required.
+ */
return err;
}
+
+ if (is_module_sig_enforced()) {
+ pr_notice("%s: loading of %s is rejected\n", info->name, reason);
+ return -EKEYREJECTED;
+ }
+
+ return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
}
#else /* !CONFIG_MODULE_SIG */
static int module_sig_check(struct load_info *info, int flags)
@@ -3061,8 +3101,10 @@ static int rewrite_section_headers(struct load_info *info, int flags)
return -ENOEXEC;
}
- /* Mark all sections sh_addr with their address in the
- temporary image. */
+ /*
+ * Mark all sections sh_addr with their address in the
+ * temporary image.
+ */
shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
#ifndef CONFIG_MODULE_UNLOAD
@@ -3250,6 +3292,9 @@ static int find_module_sections(struct module *mod, struct load_info *info)
sizeof(*mod->bpf_raw_events),
&mod->num_bpf_raw_events);
#endif
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+ mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size);
+#endif
#ifdef CONFIG_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
sizeof(*mod->jump_entries),
@@ -3493,9 +3538,11 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
- /* Determine total sizes, and put offsets in sh_entsize. For now
- this is done generically; there doesn't appear to be any
- special cases for the architectures. */
+ /*
+ * Determine total sizes, and put offsets in sh_entsize. For now
+ * this is done generically; there doesn't appear to be any
+ * special cases for the architectures.
+ */
layout_sections(info->mod, info);
layout_symtab(info->mod, info);
@@ -3639,6 +3686,9 @@ static noinline int do_init_module(struct module *mod)
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_LIVE, mod);
+ /* Delay uevent until module has finished its init routine */
+ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
+
/*
* We need to finish all async code before the module init sequence
* is done. This has potential to deadlock. For example, a newly
@@ -3677,6 +3727,10 @@ static noinline int do_init_module(struct module *mod)
mod->init_layout.ro_size = 0;
mod->init_layout.ro_after_init_size = 0;
mod->init_layout.text_size = 0;
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+ /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
+ mod->btf_data = NULL;
+#endif
/*
* We want to free module_init, but be aware that kallsyms may be
* walking this with preempt disabled. In all the failure paths, we
@@ -3779,8 +3833,10 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_enable_nx(mod);
module_enable_x(mod);
- /* Mark state as coming so strong_try_module_get() ignores us,
- * but kallsyms etc. can see us. */
+ /*
+ * Mark state as coming so strong_try_module_get() ignores us,
+ * but kallsyms etc. can see us.
+ */
mod->state = MODULE_STATE_COMING;
mutex_unlock(&module_mutex);
@@ -3827,8 +3883,10 @@ static int unknown_module_param_cb(char *param, char *val, const char *modname,
return 0;
}
-/* Allocate and load the module: note that size of section 0 is always
- zero, and we rely on this for optional sections. */
+/*
+ * Allocate and load the module: note that size of section 0 is always
+ * zero, and we rely on this for optional sections.
+ */
static int load_module(struct load_info *info, const char __user *uargs,
int flags)
{
@@ -3902,8 +3960,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
init_param_lock(mod);
- /* Now we've got everything in the final locations, we can
- * find optional sections. */
+ /*
+ * Now we've got everything in the final locations, we can
+ * find optional sections.
+ */
err = find_module_sections(mod, info);
if (err)
goto free_unload;
@@ -3991,6 +4051,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
bug_cleanup:
+ mod->state = MODULE_STATE_GOING;
/* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex);
module_bug_cleanup(mod);
@@ -4116,8 +4177,10 @@ static const char *find_kallsyms_symbol(struct module *mod,
bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
- /* Scan for closest preceding symbol, and next symbol. (ELF
- starts real symbols at 1). */
+ /*
+ * Scan for closest preceding symbol, and next symbol. (ELF
+ * starts real symbols at 1).
+ */
for (i = 1; i < kallsyms->num_symtab; i++) {
const Elf_Sym *sym = &kallsyms->symtab[i];
unsigned long thisval = kallsyms_symbol_value(sym);
@@ -4125,8 +4188,10 @@ static const char *find_kallsyms_symbol(struct module *mod,
if (sym->st_shndx == SHN_UNDEF)
continue;
- /* We ignore unnamed symbols: they're uninformative
- * and inserted at a whim. */
+ /*
+ * We ignore unnamed symbols: they're uninformative
+ * and inserted at a whim.
+ */
if (*kallsyms_symbol_name(kallsyms, i) == '\0'
|| is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
continue;
@@ -4156,8 +4221,10 @@ void * __weak dereference_module_function_descriptor(struct module *mod,
return ptr;
}
-/* For kallsyms to ask for address resolution. NULL means not found. Careful
- * not to lock to avoid deadlock on oopses, simply disable preemption. */
+/*
+ * For kallsyms to ask for address resolution. NULL means not found. Careful
+ * not to lock to avoid deadlock on oopses, simply disable preemption.
+ */
const char *module_address_lookup(unsigned long addr,
unsigned long *size,
unsigned long *offset,
@@ -4415,11 +4482,12 @@ static int m_show(struct seq_file *m, void *p)
return 0;
}
-/* Format: modulename size refcount deps address
-
- Where refcount is a number or -, and deps is a comma-separated list
- of depends or -.
-*/
+/*
+ * Format: modulename size refcount deps address
+ *
+ * Where refcount is a number or -, and deps is a comma-separated list
+ * of depends or -.
+ */
static const struct seq_operations modules_op = {
.start = m_start,
.next = m_next,
@@ -4489,8 +4557,8 @@ out:
return e;
}
-/*
- * is_module_address - is this address inside a module?
+/**
+ * is_module_address() - is this address inside a module?
* @addr: the address to check.
*
* See is_module_text_address() if you simply want to see if the address
@@ -4507,8 +4575,8 @@ bool is_module_address(unsigned long addr)
return ret;
}
-/*
- * __module_address - get the module which contains an address.
+/**
+ * __module_address() - get the module which contains an address.
* @addr: the address.
*
* Must be called with preempt disabled or module mutex held so that
@@ -4532,8 +4600,8 @@ struct module *__module_address(unsigned long addr)
return mod;
}
-/*
- * is_module_text_address - is this address inside module code?
+/**
+ * is_module_text_address() - is this address inside module code?
* @addr: the address to check.
*
* See is_module_address() if you simply want to see if the address is
@@ -4551,8 +4619,8 @@ bool is_module_text_address(unsigned long addr)
return ret;
}
-/*
- * __module_text_address - get the module whose code contains an address.
+/**
+ * __module_text_address() - get the module whose code contains an address.
* @addr: the address.
*
* Must be called with preempt disabled or module mutex held so that
@@ -4591,8 +4659,10 @@ void print_modules(void)
}
#ifdef CONFIG_MODVERSIONS
-/* Generate the signature for all relevant module structures here.
- * If these change, we don't want to try to parse the module. */
+/*
+ * Generate the signature for all relevant module structures here.
+ * If these change, we don't want to try to parse the module.
+ */
void module_layout(struct module *mod,
struct modversion_info *ver,
struct kernel_param *kp,
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 12dd41b39a7f..abc01fcad8c7 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -153,7 +153,6 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
struct nsproxy *old_ns = tsk->nsproxy;
struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns);
struct nsproxy *new_ns;
- int ret;
if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
CLONE_NEWPID | CLONE_NEWNET |
@@ -173,18 +172,14 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
* it along with CLONE_NEWIPC.
*/
if ((flags & (CLONE_NEWIPC | CLONE_SYSVSEM)) ==
- (CLONE_NEWIPC | CLONE_SYSVSEM))
+ (CLONE_NEWIPC | CLONE_SYSVSEM))
return -EINVAL;
new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs);
if (IS_ERR(new_ns))
return PTR_ERR(new_ns);
- ret = timens_on_fork(new_ns, tsk);
- if (ret) {
- free_nsproxy(new_ns);
- return ret;
- }
+ timens_on_fork(new_ns, tsk);
tsk->nsproxy = new_ns;
return 0;
@@ -250,8 +245,8 @@ void switch_task_namespaces(struct task_struct *p, struct nsproxy *new)
p->nsproxy = new;
task_unlock(p);
- if (ns && atomic_dec_and_test(&ns->count))
- free_nsproxy(ns);
+ if (ns)
+ put_nsproxy(ns);
}
void exit_task_namespaces(struct task_struct *p)
diff --git a/kernel/panic.c b/kernel/panic.c
index 396142ee43fd..332736a72a58 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -605,7 +605,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
panic("panic_on_warn set ...\n");
}
- dump_stack();
+ if (!regs)
+ dump_stack();
print_irqtrace_events(current);
diff --git a/kernel/params.c b/kernel/params.c
index 3835fb82c64b..2daa2780a92c 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -530,7 +530,7 @@ struct module_param_attrs
{
unsigned int num;
struct attribute_group grp;
- struct param_attribute attrs[0];
+ struct param_attribute attrs[];
};
#ifdef CONFIG_SYSFS
@@ -843,18 +843,16 @@ ssize_t __modver_version_show(struct module_attribute *mattr,
return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version);
}
-extern const struct module_version_attribute *__start___modver[];
-extern const struct module_version_attribute *__stop___modver[];
+extern const struct module_version_attribute __start___modver[];
+extern const struct module_version_attribute __stop___modver[];
static void __init version_sysfs_builtin(void)
{
- const struct module_version_attribute **p;
+ const struct module_version_attribute *vattr;
struct module_kobject *mk;
int err;
- for (p = __start___modver; p < __stop___modver; p++) {
- const struct module_version_attribute *vattr = *p;
-
+ for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
mk = locate_module_kobject(vattr->module_name);
if (mk) {
err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
diff --git a/kernel/pid.c b/kernel/pid.c
index a96bc4bf4f86..ebdf9c60cd0b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -73,7 +73,7 @@ int pid_max_max = PID_MAX_LIMIT;
* the scheme scales to up to 4 million PIDs, runtime.
*/
struct pid_namespace init_pid_ns = {
- .kref = KREF_INIT(2),
+ .ns.count = REFCOUNT_INIT(2),
.idr = IDR_INIT(init_pid_ns.idr),
.pid_allocated = PIDNS_ADDING,
.level = 0,
@@ -628,7 +628,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd)
struct file *file;
int ret;
- ret = mutex_lock_killable(&task->signal->exec_update_mutex);
+ ret = down_read_killable(&task->signal->exec_update_lock);
if (ret)
return ERR_PTR(ret);
@@ -637,7 +637,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd)
else
file = ERR_PTR(-EPERM);
- mutex_unlock(&task->signal->exec_update_mutex);
+ up_read(&task->signal->exec_update_lock);
return file ?: ERR_PTR(-EBADF);
}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 9de21803a8ae..ca43239a255a 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -102,7 +102,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
goto out_free_idr;
ns->ns.ops = &pidns_operations;
- kref_init(&ns->kref);
+ refcount_set(&ns->ns.count, 1);
ns->level = level;
ns->parent = get_pid_ns(parent_pid_ns);
ns->user_ns = get_user_ns(user_ns);
@@ -148,22 +148,15 @@ struct pid_namespace *copy_pid_ns(unsigned long flags,
return create_pid_namespace(user_ns, old_ns);
}
-static void free_pid_ns(struct kref *kref)
-{
- struct pid_namespace *ns;
-
- ns = container_of(kref, struct pid_namespace, kref);
- destroy_pid_namespace(ns);
-}
-
void put_pid_ns(struct pid_namespace *ns)
{
struct pid_namespace *parent;
while (ns != &init_pid_ns) {
parent = ns->parent;
- if (!kref_put(&ns->kref, free_pid_ns))
+ if (!refcount_dec_and_test(&ns->ns.count))
break;
+ destroy_pid_namespace(ns);
ns = parent;
}
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index a7320f07689d..6bfe3ead10ad 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -139,7 +139,6 @@ config PM_SLEEP_SMP_NONZERO_CPU
config PM_AUTOSLEEP
bool "Opportunistic sleep"
depends on PM_SLEEP
- default n
help
Allow the kernel to trigger a system transition into a global sleep
state automatically whenever there are no active wakeup sources.
@@ -147,7 +146,6 @@ config PM_AUTOSLEEP
config PM_WAKELOCKS
bool "User space wakeup sources interface"
depends on PM_SLEEP
- default n
help
Allow user space to create, activate and deactivate wakeup source
objects with the help of a sysfs-based interface.
@@ -293,7 +291,6 @@ config PM_GENERIC_DOMAINS
config WQ_POWER_EFFICIENT_DEFAULT
bool "Enable workqueue power-efficient mode by default"
depends on PM
- default n
help
Per-cpu workqueues are generally preferred because they show
better performance thanks to cache locality; unfortunately,
@@ -322,15 +319,14 @@ config CPU_PM
bool
config ENERGY_MODEL
- bool "Energy Model for CPUs"
+ bool "Energy Model for devices with DVFS (CPUs, GPUs, etc)"
depends on SMP
depends on CPU_FREQ
- default n
help
Several subsystems (thermal and/or the task scheduler for example)
- can leverage information about the energy consumed by CPUs to make
- smarter decisions. This config option enables the framework from
- which subsystems can access the energy models.
+ can leverage information about the energy consumed by devices to
+ make smarter decisions. This config option enables the framework
+ from which subsystems can access the energy models.
The exact usage of the energy model is subsystem-dependent.
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index c1ff7fa030ab..1358fa4abfa8 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -52,6 +52,17 @@ static int em_debug_cpus_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
+static int em_debug_units_show(struct seq_file *s, void *unused)
+{
+ struct em_perf_domain *pd = s->private;
+ char *units = pd->milliwatts ? "milliWatts" : "bogoWatts";
+
+ seq_printf(s, "%s\n", units);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(em_debug_units);
+
static void em_debug_create_pd(struct device *dev)
{
struct dentry *d;
@@ -64,6 +75,8 @@ static void em_debug_create_pd(struct device *dev)
debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
&em_debug_cpus_fops);
+ debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops);
+
/* Create a sub-directory for each performance state */
for (i = 0; i < dev->em_pd->nr_perf_states; i++)
em_debug_create_ps(&dev->em_pd->table[i], d);
@@ -130,7 +143,7 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
/*
* The power returned by active_state() is expected to be
- * positive, in milli-watts and to fit into 16 bits.
+ * positive and to fit into 16 bits.
*/
if (!power || power > EM_MAX_POWER) {
dev_err(dev, "EM: invalid power: %lu\n",
@@ -250,17 +263,24 @@ EXPORT_SYMBOL_GPL(em_cpu_get);
* @cpus : Pointer to cpumask_t, which in case of a CPU device is
* obligatory. It can be taken from i.e. 'policy->cpus'. For other
* type of devices this should be set to NULL.
+ * @milliwatts : Flag indicating that the power values are in milliWatts or
+ * in some other scale. It must be set properly.
*
* Create Energy Model tables for a performance domain using the callbacks
* defined in cb.
*
+ * The @milliwatts is important to set with correct value. Some kernel
+ * sub-systems might rely on this flag and check if all devices in the EM are
+ * using the same scale.
+ *
* If multiple clients register the same performance domain, all but the first
* registration will be ignored.
*
* Return 0 on success
*/
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
- struct em_data_callback *cb, cpumask_t *cpus)
+ struct em_data_callback *cb, cpumask_t *cpus,
+ bool milliwatts)
{
unsigned long cap, prev_cap = 0;
int cpu, ret;
@@ -313,6 +333,8 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
if (ret)
goto unlock;
+ dev->em_pd->milliwatts = milliwatts;
+
em_debug_create_pd(dev);
dev_info(dev, "EM: created perf domain\n");
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 2fc7d509a34f..da0b41914177 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -326,7 +326,7 @@ static int create_image(int platform_mode)
if (!in_suspend) {
events_check_enabled = false;
- clear_free_pages();
+ clear_or_poison_free_pages();
}
platform_leave(platform_mode);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 0aefd6f57e0a..12c7e1bb442f 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -387,7 +387,7 @@ static struct attribute *suspend_attrs[] = {
NULL,
};
-static struct attribute_group suspend_attr_group = {
+static const struct attribute_group suspend_attr_group = {
.name = "suspend_stats",
.attrs = suspend_attrs,
};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 24f12d534515..778bf431ec02 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -106,7 +106,7 @@ extern int create_basic_memory_bitmaps(void);
extern void free_basic_memory_bitmaps(void);
extern int hibernate_preallocate_memory(void);
-extern void clear_free_pages(void);
+extern void clear_or_poison_free_pages(void);
/**
* Auxiliary structure used for reading the snapshot image data and
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 4b6a54da7e65..50cc63534486 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -146,7 +146,7 @@ int freeze_processes(void)
BUG_ON(in_atomic());
/*
- * Now that the whole userspace is frozen we need to disbale
+ * Now that the whole userspace is frozen we need to disable
* the OOM killer to disallow any further interference with
* killable tasks. There is no guarantee oom victims will
* ever reach a point they go away we have to wait with a timeout.
@@ -235,7 +235,7 @@ void thaw_kernel_threads(void)
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
- if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
+ if (p->flags & PF_KTHREAD)
__thaw_task(p);
}
read_unlock(&tasklist_lock);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 46b1804c1ddf..d63560e1cf87 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -76,6 +76,40 @@ static inline void hibernate_restore_protect_page(void *page_address) {}
static inline void hibernate_restore_unprotect_page(void *page_address) {}
#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
+
+/*
+ * The calls to set_direct_map_*() should not fail because remapping a page
+ * here means that we only update protection bits in an existing PTE.
+ * It is still worth to have a warning here if something changes and this
+ * will no longer be the case.
+ */
+static inline void hibernate_map_page(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
+ int ret = set_direct_map_default_noflush(page);
+
+ if (ret)
+ pr_warn_once("Failed to remap page\n");
+ } else {
+ debug_pagealloc_map_pages(page, 1);
+ }
+}
+
+static inline void hibernate_unmap_page(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
+ unsigned long addr = (unsigned long)page_address(page);
+ int ret = set_direct_map_invalid_noflush(page);
+
+ if (ret)
+ pr_warn_once("Failed to remap page\n");
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ } else {
+ debug_pagealloc_unmap_pages(page, 1);
+ }
+}
+
static int swsusp_page_is_free(struct page *);
static void swsusp_set_page_forbidden(struct page *);
static void swsusp_unset_page_forbidden(struct page *);
@@ -1144,7 +1178,15 @@ void free_basic_memory_bitmaps(void)
pr_debug("Basic memory bitmaps freed\n");
}
-void clear_free_pages(void)
+static void clear_or_poison_free_page(struct page *page)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_poison_pages(page, 1);
+ else if (want_init_on_free())
+ clear_highpage(page);
+}
+
+void clear_or_poison_free_pages(void)
{
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
@@ -1152,12 +1194,12 @@ void clear_free_pages(void)
if (WARN_ON(!(free_pages_map)))
return;
- if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
+ if (page_poisoning_enabled() || want_init_on_free()) {
memory_bm_position_reset(bm);
pfn = memory_bm_next_pfn(bm);
while (pfn != BM_END_OF_MAP) {
if (pfn_valid(pfn))
- clear_highpage(pfn_to_page(pfn));
+ clear_or_poison_free_page(pfn_to_page(pfn));
pfn = memory_bm_next_pfn(bm);
}
@@ -1355,9 +1397,9 @@ static void safe_copy_page(void *dst, struct page *s_page)
if (kernel_page_present(s_page)) {
do_copy_page(dst, page_address(s_page));
} else {
- kernel_map_pages(s_page, 1, 1);
+ hibernate_map_page(s_page);
do_copy_page(dst, page_address(s_page));
- kernel_map_pages(s_page, 1, 0);
+ hibernate_unmap_page(s_page);
}
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 32391acc806b..d8cae434f9eb 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -224,6 +224,7 @@ EXPORT_SYMBOL_GPL(suspend_set_ops);
/**
* suspend_valid_only_mem - Generic memory-only valid callback.
+ * @state: Target system sleep state.
*
* Platform drivers that implement mem suspend only and only need to check for
* that in their .valid() callback can use this instead of rolling their own
@@ -335,6 +336,7 @@ static int suspend_test(int level)
/**
* suspend_prepare - Prepare for entering system sleep state.
+ * @state: Target system sleep state.
*
* Common code run for every system sleep state that can be entered (except for
* hibernation). Run suspend notifiers, allocate the "suspend" console and
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index c73f2e295167..72e33054a2e1 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -497,10 +497,10 @@ static int swap_writer_finish(struct swap_map_handle *handle,
unsigned int flags, int error)
{
if (!error) {
- flush_swap_writer(handle);
pr_info("S");
error = mark_swapfiles(handle, flags);
pr_cont("|\n");
+ flush_swap_writer(handle);
}
if (error)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index fe64a49344bf..5a95c688621f 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -491,52 +491,6 @@ static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
*trunc_msg_len = 0;
}
-/* insert record into the buffer, discard old ones, update heads */
-static int log_store(u32 caller_id, int facility, int level,
- enum log_flags flags, u64 ts_nsec,
- const struct dev_printk_info *dev_info,
- const char *text, u16 text_len)
-{
- struct prb_reserved_entry e;
- struct printk_record r;
- u16 trunc_msg_len = 0;
-
- prb_rec_init_wr(&r, text_len);
-
- if (!prb_reserve(&e, prb, &r)) {
- /* truncate the message if it is too long for empty buffer */
- truncate_msg(&text_len, &trunc_msg_len);
- prb_rec_init_wr(&r, text_len + trunc_msg_len);
- /* survive when the log buffer is too small for trunc_msg */
- if (!prb_reserve(&e, prb, &r))
- return 0;
- }
-
- /* fill message */
- memcpy(&r.text_buf[0], text, text_len);
- if (trunc_msg_len)
- memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
- r.info->text_len = text_len + trunc_msg_len;
- r.info->facility = facility;
- r.info->level = level & 7;
- r.info->flags = flags & 0x1f;
- if (ts_nsec > 0)
- r.info->ts_nsec = ts_nsec;
- else
- r.info->ts_nsec = local_clock();
- r.info->caller_id = caller_id;
- if (dev_info)
- memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
-
- /* insert message */
- if ((flags & LOG_CONT) || !(flags & LOG_NEWLINE))
- prb_commit(&e);
- else
- prb_final_commit(&e);
-
- return (text_len + trunc_msg_len);
-}
-
int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
static int syslog_action_restricted(int type)
@@ -741,7 +695,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
if (LOG_FACILITY(u) != 0)
facility = LOG_FACILITY(u);
endp++;
- len -= endp - line;
line = endp;
}
}
@@ -1172,7 +1125,7 @@ void __init setup_log_buf(int early)
new_descs, ilog2(new_descs_count),
new_infos);
- logbuf_lock_irqsave(flags);
+ printk_safe_enter_irqsave(flags);
log_buf_len = new_log_buf_len;
log_buf = new_log_buf;
@@ -1189,7 +1142,7 @@ void __init setup_log_buf(int early)
*/
prb = &printk_rb_dynamic;
- logbuf_unlock_irqrestore(flags);
+ printk_safe_exit_irqrestore(flags);
if (seq != prb_next_seq(&printk_rb_static)) {
pr_err("dropped %llu messages\n",
@@ -1338,11 +1291,16 @@ static size_t info_print_prefix(const struct printk_info *info, bool syslog,
* done:
*
* - Add prefix for each line.
+ * - Drop truncated lines that no longer fit into the buffer.
* - Add the trailing newline that has been removed in vprintk_store().
- * - Drop truncated lines that do not longer fit into the buffer.
+ * - Add a string terminator.
+ *
+ * Since the produced string is always terminated, the maximum possible
+ * return value is @r->text_buf_size - 1;
*
* Return: The length of the updated/prepared text, including the added
- * prefixes and the newline. The dropped line(s) are not counted.
+ * prefixes and the newline. The terminator is not counted. The dropped
+ * line(s) are not counted.
*/
static size_t record_print_text(struct printk_record *r, bool syslog,
bool time)
@@ -1385,26 +1343,31 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
/*
* Truncate the text if there is not enough space to add the
- * prefix and a trailing newline.
+ * prefix and a trailing newline and a terminator.
*/
- if (len + prefix_len + text_len + 1 > buf_size) {
+ if (len + prefix_len + text_len + 1 + 1 > buf_size) {
/* Drop even the current line if no space. */
- if (len + prefix_len + line_len + 1 > buf_size)
+ if (len + prefix_len + line_len + 1 + 1 > buf_size)
break;
- text_len = buf_size - len - prefix_len - 1;
+ text_len = buf_size - len - prefix_len - 1 - 1;
truncated = true;
}
memmove(text + prefix_len, text, text_len);
memcpy(text, prefix, prefix_len);
+ /*
+ * Increment the prepared length to include the text and
+ * prefix that were just moved+copied. Also increment for the
+ * newline at the end of this line. If this is the last line,
+ * there is no newline, but it will be added immediately below.
+ */
len += prefix_len + line_len + 1;
-
if (text_len == line_len) {
/*
- * Add the trailing newline removed in
- * vprintk_store().
+ * This is the last line. Add the trailing newline
+ * removed in vprintk_store().
*/
text[prefix_len + line_len] = '\n';
break;
@@ -1429,6 +1392,14 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
text_len -= line_len + 1;
}
+ /*
+ * If a buffer was provided, it will be terminated. Space for the
+ * string terminator is guaranteed to be available. The terminator is
+ * not counted in the return value.
+ */
+ if (buf_size > 0)
+ r->text_buf[len] = 0;
+
return len;
}
@@ -1907,83 +1878,177 @@ static inline u32 printk_caller_id(void)
0x80000000 + raw_smp_processor_id();
}
-static size_t log_output(int facility, int level, enum log_flags lflags,
- const struct dev_printk_info *dev_info,
- char *text, size_t text_len)
+/**
+ * parse_prefix - Parse level and control flags.
+ *
+ * @text: The terminated text message.
+ * @level: A pointer to the current level value, will be updated.
+ * @lflags: A pointer to the current log flags, will be updated.
+ *
+ * @level may be NULL if the caller is not interested in the parsed value.
+ * Otherwise the variable pointed to by @level must be set to
+ * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
+ *
+ * @lflags may be NULL if the caller is not interested in the parsed value.
+ * Otherwise the variable pointed to by @lflags will be OR'd with the parsed
+ * value.
+ *
+ * Return: The length of the parsed level and control flags.
+ */
+static u16 parse_prefix(char *text, int *level, enum log_flags *lflags)
{
- const u32 caller_id = printk_caller_id();
+ u16 prefix_len = 0;
+ int kern_level;
- if (lflags & LOG_CONT) {
- struct prb_reserved_entry e;
- struct printk_record r;
+ while (*text) {
+ kern_level = printk_get_level(text);
+ if (!kern_level)
+ break;
- prb_rec_init_wr(&r, text_len);
- if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
- memcpy(&r.text_buf[r.info->text_len], text, text_len);
- r.info->text_len += text_len;
- if (lflags & LOG_NEWLINE) {
- r.info->flags |= LOG_NEWLINE;
- prb_final_commit(&e);
- } else {
- prb_commit(&e);
- }
- return text_len;
+ switch (kern_level) {
+ case '0' ... '7':
+ if (level && *level == LOGLEVEL_DEFAULT)
+ *level = kern_level - '0';
+ break;
+ case 'c': /* KERN_CONT */
+ if (lflags)
+ *lflags |= LOG_CONT;
+ }
+
+ prefix_len += 2;
+ text += 2;
+ }
+
+ return prefix_len;
+}
+
+static u16 printk_sprint(char *text, u16 size, int facility, enum log_flags *lflags,
+ const char *fmt, va_list args)
+{
+ u16 text_len;
+
+ text_len = vscnprintf(text, size, fmt, args);
+
+ /* Mark and strip a trailing newline. */
+ if (text_len && text[text_len - 1] == '\n') {
+ text_len--;
+ *lflags |= LOG_NEWLINE;
+ }
+
+ /* Strip log level and control flags. */
+ if (facility == 0) {
+ u16 prefix_len;
+
+ prefix_len = parse_prefix(text, NULL, NULL);
+ if (prefix_len) {
+ text_len -= prefix_len;
+ memmove(text, text + prefix_len, text_len);
}
}
- /* Store it in the record log */
- return log_store(caller_id, facility, level, lflags, 0,
- dev_info, text, text_len);
+ return text_len;
}
-/* Must be called under logbuf_lock. */
+__printf(4, 0)
int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
- static char textbuf[LOG_LINE_MAX];
- char *text = textbuf;
- size_t text_len;
+ const u32 caller_id = printk_caller_id();
+ struct prb_reserved_entry e;
enum log_flags lflags = 0;
+ struct printk_record r;
+ u16 trunc_msg_len = 0;
+ char prefix_buf[8];
+ u16 reserve_size;
+ va_list args2;
+ u16 text_len;
+ u64 ts_nsec;
/*
- * The printf needs to come first; we need the syslog
- * prefix which might be passed-in as a parameter.
+ * Since the duration of printk() can vary depending on the message
+ * and state of the ringbuffer, grab the timestamp now so that it is
+ * close to the call of printk(). This provides a more deterministic
+ * timestamp with respect to the caller.
*/
- text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
+ ts_nsec = local_clock();
- /* mark and strip a trailing newline */
- if (text_len && text[text_len-1] == '\n') {
- text_len--;
+ /*
+ * The sprintf needs to come first since the syslog prefix might be
+ * passed in as a parameter. An extra byte must be reserved so that
+ * later the vscnprintf() into the reserved buffer has room for the
+ * terminating '\0', which is not counted by vsnprintf().
+ */
+ va_copy(args2, args);
+ reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
+ va_end(args2);
+
+ if (reserve_size > LOG_LINE_MAX)
+ reserve_size = LOG_LINE_MAX;
+
+ /* Extract log level or control flags. */
+ if (facility == 0)
+ parse_prefix(&prefix_buf[0], &level, &lflags);
+
+ if (level == LOGLEVEL_DEFAULT)
+ level = default_message_loglevel;
+
+ if (dev_info)
lflags |= LOG_NEWLINE;
- }
- /* strip kernel syslog prefix and extract log level or control flags */
- if (facility == 0) {
- int kern_level;
+ if (lflags & LOG_CONT) {
+ prb_rec_init_wr(&r, reserve_size);
+ if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
+ text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
+ facility, &lflags, fmt, args);
+ r.info->text_len += text_len;
- while ((kern_level = printk_get_level(text)) != 0) {
- switch (kern_level) {
- case '0' ... '7':
- if (level == LOGLEVEL_DEFAULT)
- level = kern_level - '0';
- break;
- case 'c': /* KERN_CONT */
- lflags |= LOG_CONT;
+ if (lflags & LOG_NEWLINE) {
+ r.info->flags |= LOG_NEWLINE;
+ prb_final_commit(&e);
+ } else {
+ prb_commit(&e);
}
- text_len -= 2;
- text += 2;
+ return text_len;
}
}
- if (level == LOGLEVEL_DEFAULT)
- level = default_message_loglevel;
+ /*
+ * Explicitly initialize the record before every prb_reserve() call.
+ * prb_reserve_in_last() and prb_reserve() purposely invalidate the
+ * structure when they fail.
+ */
+ prb_rec_init_wr(&r, reserve_size);
+ if (!prb_reserve(&e, prb, &r)) {
+ /* truncate the message if it is too long for empty buffer */
+ truncate_msg(&reserve_size, &trunc_msg_len);
+ prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
+ if (!prb_reserve(&e, prb, &r))
+ return 0;
+ }
+
+ /* fill message */
+ text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &lflags, fmt, args);
+ if (trunc_msg_len)
+ memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
+ r.info->text_len = text_len + trunc_msg_len;
+ r.info->facility = facility;
+ r.info->level = level & 7;
+ r.info->flags = lflags & 0x1f;
+ r.info->ts_nsec = ts_nsec;
+ r.info->caller_id = caller_id;
if (dev_info)
- lflags |= LOG_NEWLINE;
+ memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
+
+ /* A message without a trailing newline can be continued. */
+ if (!(lflags & LOG_NEWLINE))
+ prb_commit(&e);
+ else
+ prb_final_commit(&e);
- return log_output(facility, level, lflags, dev_info, text, text_len);
+ return (text_len + trunc_msg_len);
}
asmlinkage int vprintk_emit(int facility, int level,
@@ -2006,10 +2071,9 @@ asmlinkage int vprintk_emit(int facility, int level,
boot_delay_msec(level);
printk_delay();
- /* This stops the holder of console_sem just where we want him */
- logbuf_lock_irqsave(flags);
+ printk_safe_enter_irqsave(flags);
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
- logbuf_unlock_irqrestore(flags);
+ printk_safe_exit_irqrestore(flags);
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
@@ -2189,8 +2253,15 @@ static int __init console_setup(char *str)
char *s, *options, *brl_options = NULL;
int idx;
- if (str[0] == 0)
+ /*
+ * console="" or console=null have been suggested as a way to
+ * disable console output. Use ttynull that has been created
+ * for exacly this purpose.
+ */
+ if (str[0] == 0 || strcmp(str, "null") == 0) {
+ __add_preferred_console("ttynull", 0, NULL, NULL, true);
return 1;
+ }
if (_braille_console_setup(&str, &brl_options))
return 1;
@@ -3025,10 +3096,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
wake_up_interruptible(&log_wait);
}
-static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
- .func = wake_up_klogd_work_func,
- .flags = ATOMIC_INIT(IRQ_WORK_LAZY),
-};
+static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
+ IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
void wake_up_klogd(void)
{
@@ -3376,7 +3445,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
while (prb_read_valid_info(prb, seq, &info, &line_count)) {
if (r.info->seq >= dumper->next_seq)
break;
- l += get_record_print_text_size(&info, line_count, true, time);
+ l += get_record_print_text_size(&info, line_count, syslog, time);
seq = r.info->seq + 1;
}
@@ -3386,7 +3455,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
&info, &line_count)) {
if (r.info->seq >= dumper->next_seq)
break;
- l -= get_record_print_text_size(&info, line_count, true, time);
+ l -= get_record_print_text_size(&info, line_count, syslog, time);
seq = r.info->seq + 1;
}
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index 24a960a89aa8..8a7b7362c0dd 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -345,7 +345,7 @@ DESC_ID((id) - DESCS_COUNT(desc_ring))
*/
struct prb_data_block {
unsigned long id;
- char data[0];
+ char data[];
};
/*
@@ -559,11 +559,12 @@ static void desc_make_reusable(struct prb_desc_ring *desc_ring,
* on error the caller can re-load the tail lpos to determine the situation.
*/
static bool data_make_reusable(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring,
unsigned long lpos_begin,
unsigned long lpos_end,
unsigned long *lpos_out)
{
+
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_desc_ring *desc_ring = &rb->desc_ring;
struct prb_data_block *blk;
enum desc_state d_state;
@@ -625,10 +626,9 @@ static bool data_make_reusable(struct printk_ringbuffer *rb,
* descriptors into the reusable state if the tail is pushed beyond
* their associated data block.
*/
-static bool data_push_tail(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring,
- unsigned long lpos)
+static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
{
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
unsigned long tail_lpos_new;
unsigned long tail_lpos;
unsigned long next_lpos;
@@ -669,8 +669,7 @@ static bool data_push_tail(struct printk_ringbuffer *rb,
* Make all descriptors reusable that are associated with
* data blocks before @lpos.
*/
- if (!data_make_reusable(rb, data_ring, tail_lpos, lpos,
- &next_lpos)) {
+ if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) {
/*
* 1. Guarantee the block ID loaded in
* data_make_reusable() is performed before
@@ -807,7 +806,7 @@ static bool desc_push_tail(struct printk_ringbuffer *rb,
* data blocks once their associated descriptor is gone.
*/
- if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next))
+ if (!data_push_tail(rb, desc.text_blk_lpos.next))
return false;
/*
@@ -882,8 +881,6 @@ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
do {
- desc = to_desc(desc_ring, head_id);
-
id = DESC_ID(head_id + 1);
id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
@@ -1021,10 +1018,10 @@ static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
* if necessary. This function also associates the data block with
* a specified descriptor.
*/
-static char *data_alloc(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring, unsigned int size,
+static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
struct prb_data_blk_lpos *blk_lpos, unsigned long id)
{
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_data_block *blk;
unsigned long begin_lpos;
unsigned long next_lpos;
@@ -1043,7 +1040,7 @@ static char *data_alloc(struct printk_ringbuffer *rb,
do {
next_lpos = get_next_lpos(data_ring, begin_lpos, size);
- if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) {
+ if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
/* Failed to allocate, specify a data-less block. */
blk_lpos->begin = FAILED_LPOS;
blk_lpos->next = FAILED_LPOS;
@@ -1102,10 +1099,10 @@ static char *data_alloc(struct printk_ringbuffer *rb,
* Return a pointer to the beginning of the entire data buffer or NULL on
* failure.
*/
-static char *data_realloc(struct printk_ringbuffer *rb,
- struct prb_data_ring *data_ring, unsigned int size,
+static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
struct prb_data_blk_lpos *blk_lpos, unsigned long id)
{
+ struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_data_block *blk;
unsigned long head_lpos;
unsigned long next_lpos;
@@ -1132,7 +1129,7 @@ static char *data_realloc(struct printk_ringbuffer *rb,
return &blk->data[0];
}
- if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring)))
+ if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring)))
return NULL;
/* The memory barrier involvement is the same as data_alloc:A. */
@@ -1397,7 +1394,7 @@ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer
if (r->text_buf_size > max_size)
goto fail;
- r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
+ r->text_buf = data_alloc(rb, r->text_buf_size,
&d->text_blk_lpos, id);
} else {
if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
@@ -1421,7 +1418,7 @@ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer
if (r->text_buf_size > max_size)
goto fail;
- r->text_buf = data_realloc(rb, &rb->text_data_ring, r->text_buf_size,
+ r->text_buf = data_realloc(rb, r->text_buf_size,
&d->text_blk_lpos, id);
}
if (r->text_buf_size && !r->text_buf)
@@ -1549,8 +1546,7 @@ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
if (info->seq > 0)
desc_make_final(desc_ring, DESC_ID(id - 1));
- r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
- &d->text_blk_lpos, id);
+ r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
/* If text data allocation fails, a data-less record is committed. */
if (r->text_buf_size && !r->text_buf) {
prb_commit(e);
@@ -1722,7 +1718,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
/* Caller interested in the line count? */
if (line_count)
- *line_count = count_lines(data, data_size);
+ *line_count = count_lines(data, len);
/* Caller interested in the data content? */
if (!buf || !buf_size)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 43d6179508d6..61db50f7ca86 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -57,7 +57,7 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
return 0;
}
- ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
+ ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
mmput(mm);
return ret;
@@ -117,9 +117,9 @@ void __ptrace_unlink(struct task_struct *child)
const struct cred *old_cred;
BUG_ON(!child->ptrace);
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-#ifdef TIF_SYSCALL_EMU
- clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+ clear_task_syscall_work(child, SYSCALL_TRACE);
+#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
+ clear_task_syscall_work(child, SYSCALL_EMU);
#endif
child->parent = child->real_parent;
@@ -264,17 +264,11 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
return ret;
}
-static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
- unsigned int mode)
+static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
{
- int ret;
-
if (mode & PTRACE_MODE_NOAUDIT)
- ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
- else
- ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
-
- return ret == 0;
+ return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
+ return ns_capable(ns, CAP_SYS_PTRACE);
}
/* Returns 0 on success, -errno on denial. */
@@ -326,7 +320,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
gid_eq(caller_gid, tcred->sgid) &&
gid_eq(caller_gid, tcred->gid))
goto ok;
- if (ptrace_has_cap(cred, tcred->user_ns, mode))
+ if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
rcu_read_unlock();
return -EPERM;
@@ -345,7 +339,7 @@ ok:
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
- !ptrace_has_cap(cred, mm->user_ns, mode)))
+ !ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
return security_ptrace_access_check(task, mode);
@@ -812,15 +806,15 @@ static int ptrace_resume(struct task_struct *child, long request,
return -EIO;
if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ set_task_syscall_work(child, SYSCALL_TRACE);
else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ clear_task_syscall_work(child, SYSCALL_TRACE);
-#ifdef TIF_SYSCALL_EMU
+#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
- set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+ set_task_syscall_work(child, SYSCALL_EMU);
else
- clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+ clear_task_syscall_work(child, SYSCALL_EMU);
#endif
if (is_singleblock(request)) {
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index b71e21f73c40..3128b7cf8e1f 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -95,6 +95,7 @@ config TASKS_RUDE_RCU
config TASKS_TRACE_RCU
def_bool 0
+ select IRQ_WORK
help
This option enables a task-based RCU implementation that uses
explicit rcu_read_lock_trace() read-side markers, and allows
@@ -188,8 +189,8 @@ config RCU_FAST_NO_HZ
config RCU_BOOST
bool "Enable RCU priority boosting"
- depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
- default n
+ depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT
+ default y if PREEMPT_RT
help
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
@@ -221,19 +222,23 @@ config RCU_NOCB_CPU
Use this option to reduce OS jitter for aggressive HPC or
real-time workloads. It can also be used to offload RCU
callback invocation to energy-efficient CPUs in battery-powered
- asymmetric multiprocessors.
+ asymmetric multiprocessors. The price of this reduced jitter
+ is that the overhead of call_rcu() increases and that some
+ workloads will incur significant increases in context-switch
+ rates.
This option offloads callback invocation from the set of CPUs
specified at boot time by the rcu_nocbs parameter. For each
such CPU, a kthread ("rcuox/N") will be created to invoke
callbacks, where the "N" is the CPU being offloaded, and where
- the "p" for RCU-preempt (PREEMPTION kernels) and "s" for RCU-sched
- (!PREEMPTION kernels). Nothing prevents this kthread from running
- on the specified CPUs, but (1) the kthreads may be preempted
- between each callback, and (2) affinity or cgroups can be used
- to force the kthreads to run on whatever set of CPUs is desired.
-
- Say Y here if you want to help to debug reduced OS jitter.
+ the "x" is "p" for RCU-preempt (PREEMPTION kernels) and "s" for
+ RCU-sched (!PREEMPTION kernels). Nothing prevents this kthread
+ from running on the specified CPUs, but (1) the kthreads may be
+ preempted between each callback, and (2) affinity or cgroups can
+ be used to force the kthreads to run on whatever set of CPUs is
+ desired.
+
+ Say Y here if you need reduced OS jitter, despite added overhead.
Say N here if you are unsure.
config TASKS_TRACE_RCU_READ_MB
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index e01cba5e4b52..bf0827d4b659 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -378,7 +378,11 @@ do { \
smp_mb__after_unlock_lock(); \
} while (0)
-#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
+#define raw_spin_unlock_rcu_node(p) \
+do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
+} while (0)
#define raw_spin_lock_irq_rcu_node(p) \
do { \
@@ -387,7 +391,10 @@ do { \
} while (0)
#define raw_spin_unlock_irq_rcu_node(p) \
- raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
+do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
+} while (0)
#define raw_spin_lock_irqsave_rcu_node(p, flags) \
do { \
@@ -396,7 +403,10 @@ do { \
} while (0)
#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
- raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
+do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
+} while (0)
#define raw_spin_trylock_rcu_node(p) \
({ \
@@ -533,4 +543,20 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
static inline void rcu_bind_current_to_nocb(void) { }
#endif
+#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
+void show_rcu_tasks_classic_gp_kthread(void);
+#else
+static inline void show_rcu_tasks_classic_gp_kthread(void) {}
+#endif
+#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
+void show_rcu_tasks_rude_gp_kthread(void);
+#else
+static inline void show_rcu_tasks_rude_gp_kthread(void) {}
+#endif
+#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
+void show_rcu_tasks_trace_gp_kthread(void);
+#else
+static inline void show_rcu_tasks_trace_gp_kthread(void) {}
+#endif
+
#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
index 2d2a6b6b9dfb..7f181c9675f7 100644
--- a/kernel/rcu/rcu_segcblist.c
+++ b/kernel/rcu/rcu_segcblist.c
@@ -7,10 +7,10 @@
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/kernel.h>
+#include <linux/cpu.h>
#include <linux/interrupt.h>
-#include <linux/rcupdate.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
#include "rcu_segcblist.h"
@@ -88,23 +88,135 @@ static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
#endif
}
+/* Get the length of a segment of the rcu_segcblist structure. */
+static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg)
+{
+ return READ_ONCE(rsclp->seglen[seg]);
+}
+
+/* Return number of callbacks in segmented callback list by summing seglen. */
+long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp)
+{
+ long len = 0;
+ int i;
+
+ for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
+ len += rcu_segcblist_get_seglen(rsclp, i);
+
+ return len;
+}
+
+/* Set the length of a segment of the rcu_segcblist structure. */
+static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v)
+{
+ WRITE_ONCE(rsclp->seglen[seg], v);
+}
+
+/* Increase the numeric length of a segment by a specified amount. */
+static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v)
+{
+ WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v);
+}
+
+/* Move from's segment length to to's segment. */
+static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to)
+{
+ long len;
+
+ if (from == to)
+ return;
+
+ len = rcu_segcblist_get_seglen(rsclp, from);
+ if (!len)
+ return;
+
+ rcu_segcblist_add_seglen(rsclp, to, len);
+ rcu_segcblist_set_seglen(rsclp, from, 0);
+}
+
+/* Increment segment's length. */
+static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg)
+{
+ rcu_segcblist_add_seglen(rsclp, seg, 1);
+}
+
/*
* Increase the numeric length of an rcu_segcblist structure by the
* specified amount, which can be negative. This can cause the ->len
* field to disagree with the actual number of callbacks on the structure.
* This increase is fully ordered with respect to the callers accesses
* both before and after.
+ *
+ * So why on earth is a memory barrier required both before and after
+ * the update to the ->len field???
+ *
+ * The reason is that rcu_barrier() locklessly samples each CPU's ->len
+ * field, and if a given CPU's field is zero, avoids IPIing that CPU.
+ * This can of course race with both queuing and invoking of callbacks.
+ * Failing to correctly handle either of these races could result in
+ * rcu_barrier() failing to IPI a CPU that actually had callbacks queued
+ * which rcu_barrier() was obligated to wait on. And if rcu_barrier()
+ * failed to wait on such a callback, unloading certain kernel modules
+ * would result in calls to functions whose code was no longer present in
+ * the kernel, for but one example.
+ *
+ * Therefore, ->len transitions from 1->0 and 0->1 have to be carefully
+ * ordered with respect with both list modifications and the rcu_barrier().
+ *
+ * The queuing case is CASE 1 and the invoking case is CASE 2.
+ *
+ * CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes
+ * call_rcu() just as CPU 1 invokes rcu_barrier(). CPU 0's ->len field
+ * will transition from 0->1, which is one of the transitions that must
+ * be handled carefully. Without the full memory barriers after the ->len
+ * update and at the beginning of rcu_barrier(), the following could happen:
+ *
+ * CPU 0 CPU 1
+ *
+ * call_rcu().
+ * rcu_barrier() sees ->len as 0.
+ * set ->len = 1.
+ * rcu_barrier() does nothing.
+ * module is unloaded.
+ * callback invokes unloaded function!
+ *
+ * With the full barriers, any case where rcu_barrier() sees ->len as 0 will
+ * have unambiguously preceded the return from the racing call_rcu(), which
+ * means that this call_rcu() invocation is OK to not wait on. After all,
+ * you are supposed to make sure that any problematic call_rcu() invocations
+ * happen before the rcu_barrier().
+ *
+ *
+ * CASE 2: Suppose that CPU 0 is invoking its last callback just as
+ * CPU 1 invokes rcu_barrier(). CPU 0's ->len field will transition from
+ * 1->0, which is one of the transitions that must be handled carefully.
+ * Without the full memory barriers before the ->len update and at the
+ * end of rcu_barrier(), the following could happen:
+ *
+ * CPU 0 CPU 1
+ *
+ * start invoking last callback
+ * set ->len = 0 (reordered)
+ * rcu_barrier() sees ->len as 0
+ * rcu_barrier() does nothing.
+ * module is unloaded
+ * callback executing after unloaded!
+ *
+ * With the full barriers, any case where rcu_barrier() sees ->len as 0
+ * will be fully ordered after the completion of the callback function,
+ * so that the module unloading operation is completely safe.
+ *
*/
-static void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
+void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
{
#ifdef CONFIG_RCU_NOCB_CPU
- smp_mb__before_atomic(); /* Up to the caller! */
+ smp_mb__before_atomic(); // Read header comment above.
atomic_long_add(v, &rsclp->len);
- smp_mb__after_atomic(); /* Up to the caller! */
+ smp_mb__after_atomic(); // Read header comment above.
#else
- smp_mb(); /* Up to the caller! */
+ smp_mb(); // Read header comment above.
WRITE_ONCE(rsclp->len, rsclp->len + v);
- smp_mb(); /* Up to the caller! */
+ smp_mb(); // Read header comment above.
#endif
}
@@ -120,26 +232,6 @@ void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp)
}
/*
- * Exchange the numeric length of the specified rcu_segcblist structure
- * with the specified value. This can cause the ->len field to disagree
- * with the actual number of callbacks on the structure. This exchange is
- * fully ordered with respect to the callers accesses both before and after.
- */
-static long rcu_segcblist_xchg_len(struct rcu_segcblist *rsclp, long v)
-{
-#ifdef CONFIG_RCU_NOCB_CPU
- return atomic_long_xchg(&rsclp->len, v);
-#else
- long ret = rsclp->len;
-
- smp_mb(); /* Up to the caller! */
- WRITE_ONCE(rsclp->len, v);
- smp_mb(); /* Up to the caller! */
- return ret;
-#endif
-}
-
-/*
* Initialize an rcu_segcblist structure.
*/
void rcu_segcblist_init(struct rcu_segcblist *rsclp)
@@ -149,10 +241,12 @@ void rcu_segcblist_init(struct rcu_segcblist *rsclp)
BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq));
BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq));
rsclp->head = NULL;
- for (i = 0; i < RCU_CBLIST_NSEGS; i++)
+ for (i = 0; i < RCU_CBLIST_NSEGS; i++) {
rsclp->tails[i] = &rsclp->head;
+ rcu_segcblist_set_seglen(rsclp, i, 0);
+ }
rcu_segcblist_set_len(rsclp, 0);
- rsclp->enabled = 1;
+ rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED);
}
/*
@@ -163,16 +257,21 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
{
WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
- rsclp->enabled = 0;
+ rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED);
}
/*
* Mark the specified rcu_segcblist structure as offloaded. This
* structure must be empty.
*/
-void rcu_segcblist_offload(struct rcu_segcblist *rsclp)
+void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
{
- rsclp->offloaded = 1;
+ if (offload) {
+ rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY);
+ rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED);
+ } else {
+ rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
+ }
}
/*
@@ -245,7 +344,7 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
struct rcu_head *rhp)
{
rcu_segcblist_inc_len(rsclp);
- smp_mb(); /* Ensure counts are updated before callback is enqueued. */
+ rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL);
rhp->next = NULL;
WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next);
@@ -274,6 +373,7 @@ bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
if (rsclp->tails[i] != rsclp->tails[i - 1])
break;
+ rcu_segcblist_inc_seglen(rsclp, i);
WRITE_ONCE(*rsclp->tails[i], rhp);
for (; i <= RCU_NEXT_TAIL; i++)
WRITE_ONCE(rsclp->tails[i], &rhp->next);
@@ -281,21 +381,6 @@ bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
}
/*
- * Extract only the counts from the specified rcu_segcblist structure,
- * and place them in the specified rcu_cblist structure. This function
- * supports both callback orphaning and invocation, hence the separation
- * of counts and callbacks. (Callbacks ready for invocation must be
- * orphaned and adopted separately from pending callbacks, but counts
- * apply to all callbacks. Locking must be used to make sure that
- * both orphaned-callbacks lists are consistent.)
- */
-void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
- struct rcu_cblist *rclp)
-{
- rclp->len = rcu_segcblist_xchg_len(rsclp, 0);
-}
-
-/*
* Extract only those callbacks ready to be invoked from the specified
* rcu_segcblist structure and place them in the specified rcu_cblist
* structure.
@@ -307,6 +392,7 @@ void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
if (!rcu_segcblist_ready_cbs(rsclp))
return; /* Nothing to do. */
+ rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL);
*rclp->tail = rsclp->head;
WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
@@ -314,6 +400,7 @@ void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
WRITE_ONCE(rsclp->tails[i], &rsclp->head);
+ rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0);
}
/*
@@ -330,11 +417,15 @@ void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
if (!rcu_segcblist_pend_cbs(rsclp))
return; /* Nothing to do. */
+ rclp->len = 0;
*rclp->tail = *rsclp->tails[RCU_DONE_TAIL];
rclp->tail = rsclp->tails[RCU_NEXT_TAIL];
WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
- for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++)
+ for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) {
+ rclp->len += rcu_segcblist_get_seglen(rsclp, i);
WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]);
+ rcu_segcblist_set_seglen(rsclp, i, 0);
+ }
}
/*
@@ -345,7 +436,6 @@ void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp)
{
rcu_segcblist_add_len(rsclp, rclp->len);
- rclp->len = 0;
}
/*
@@ -359,6 +449,7 @@ void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
if (!rclp->head)
return; /* No callbacks to move. */
+ rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len);
*rclp->tail = rsclp->head;
WRITE_ONCE(rsclp->head, rclp->head);
for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
@@ -379,6 +470,8 @@ void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
{
if (!rclp->head)
return; /* Nothing to do. */
+
+ rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len);
WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head);
WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail);
}
@@ -403,6 +496,7 @@ void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
if (ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
break;
WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]);
+ rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL);
}
/* If no callbacks moved, nothing more need be done. */
@@ -423,6 +517,7 @@ void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
break; /* No more callbacks. */
WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]);
+ rcu_segcblist_move_seglen(rsclp, i, j);
rsclp->gp_seq[j] = rsclp->gp_seq[i];
}
}
@@ -444,7 +539,7 @@ void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
*/
bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
{
- int i;
+ int i, j;
WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
@@ -487,6 +582,10 @@ bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL)
return false;
+ /* Accounting: everything below i is about to get merged into i. */
+ for (j = i + 1; j <= RCU_NEXT_TAIL; j++)
+ rcu_segcblist_move_seglen(rsclp, j, i);
+
/*
* Merge all later callbacks, including newly arrived callbacks,
* into the segment located by the for-loop above. Assign "seq"
@@ -514,13 +613,24 @@ void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
struct rcu_cblist donecbs;
struct rcu_cblist pendcbs;
+ lockdep_assert_cpus_held();
+
rcu_cblist_init(&donecbs);
rcu_cblist_init(&pendcbs);
- rcu_segcblist_extract_count(src_rsclp, &donecbs);
+
rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs);
rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs);
+
+ /*
+ * No need smp_mb() before setting length to 0, because CPU hotplug
+ * lock excludes rcu_barrier.
+ */
+ rcu_segcblist_set_len(src_rsclp, 0);
+
rcu_segcblist_insert_count(dst_rsclp, &donecbs);
+ rcu_segcblist_insert_count(dst_rsclp, &pendcbs);
rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs);
rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs);
+
rcu_segcblist_init(src_rsclp);
}
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
index 5c293afc07b8..9a19328ff251 100644
--- a/kernel/rcu/rcu_segcblist.h
+++ b/kernel/rcu/rcu_segcblist.h
@@ -15,6 +15,9 @@ static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
return READ_ONCE(rclp->len);
}
+/* Return number of callbacks in segmented callback list by summing seglen. */
+long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp);
+
void rcu_cblist_init(struct rcu_cblist *rclp);
void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
@@ -50,19 +53,51 @@ static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
#endif
}
+static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp,
+ int flags)
+{
+ rsclp->flags |= flags;
+}
+
+static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp,
+ int flags)
+{
+ rsclp->flags &= ~flags;
+}
+
+static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp,
+ int flags)
+{
+ return READ_ONCE(rsclp->flags) & flags;
+}
+
/*
* Is the specified rcu_segcblist enabled, for example, not corresponding
* to an offline CPU?
*/
static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
{
- return rsclp->enabled;
+ return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
}
-/* Is the specified rcu_segcblist offloaded? */
+/* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
{
- return rsclp->offloaded;
+ if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
+ !rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
+ return true;
+
+ return false;
+}
+
+static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
+{
+ int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED;
+
+ if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags)
+ return true;
+
+ return false;
}
/*
@@ -75,10 +110,22 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
return !READ_ONCE(*READ_ONCE(rsclp->tails[seg]));
}
+/*
+ * Is the specified segment of the specified rcu_segcblist structure
+ * empty of callbacks?
+ */
+static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg)
+{
+ if (seg == RCU_DONE_TAIL)
+ return &rsclp->head == rsclp->tails[RCU_DONE_TAIL];
+ return rsclp->tails[seg - 1] == rsclp->tails[seg];
+}
+
void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp);
+void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v);
void rcu_segcblist_init(struct rcu_segcblist *rsclp);
void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
-void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
+void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload);
bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
@@ -88,8 +135,6 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
struct rcu_head *rhp);
bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
struct rcu_head *rhp);
-void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
- struct rcu_cblist *rclp);
void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp);
void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index 2819b95479af..06491d5530db 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -38,6 +38,7 @@
#include <asm/byteorder.h>
#include <linux/torture.h>
#include <linux/vmalloc.h>
+#include <linux/rcupdate_trace.h>
#include "rcu.h"
@@ -294,6 +295,35 @@ static struct rcu_scale_ops tasks_ops = {
.name = "tasks"
};
+/*
+ * Definitions for RCU-tasks-trace scalability testing.
+ */
+
+static int tasks_trace_scale_read_lock(void)
+{
+ rcu_read_lock_trace();
+ return 0;
+}
+
+static void tasks_trace_scale_read_unlock(int idx)
+{
+ rcu_read_unlock_trace();
+}
+
+static struct rcu_scale_ops tasks_tracing_ops = {
+ .ptype = RCU_TASKS_FLAVOR,
+ .init = rcu_sync_scale_init,
+ .readlock = tasks_trace_scale_read_lock,
+ .readunlock = tasks_trace_scale_read_unlock,
+ .get_gp_seq = rcu_no_completed,
+ .gp_diff = rcu_seq_diff,
+ .async = call_rcu_tasks_trace,
+ .gp_barrier = rcu_barrier_tasks_trace,
+ .sync = synchronize_rcu_tasks_trace,
+ .exp_sync = synchronize_rcu_tasks_trace,
+ .name = "tasks-tracing"
+};
+
static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
{
if (!cur_ops->gp_diff)
@@ -754,7 +784,7 @@ rcu_scale_init(void)
long i;
int firsterr = 0;
static struct rcu_scale_ops *scale_ops[] = {
- &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
+ &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops, &tasks_tracing_ops
};
if (!torture_init_begin(scale_type, verbose))
@@ -772,7 +802,6 @@ rcu_scale_init(void)
for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
pr_cont(" %s", scale_ops[i]->name);
pr_cont("\n");
- WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
firsterr = -EINVAL;
cur_ops = NULL;
goto unwind;
@@ -846,6 +875,10 @@ rcu_scale_init(void)
unwind:
torture_init_end();
rcu_scale_cleanup();
+ if (shutdown) {
+ WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
+ kernel_power_off();
+ }
return firsterr;
}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 916ea4f66e4b..99657ffa6688 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -85,6 +85,7 @@ torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(bool, gp_normal, false,
"Use normal (non-expedited) GP wait primitives");
+torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
@@ -97,6 +98,8 @@ torture_param(int, object_debug, 0,
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
"Time between CPU hotplugs (jiffies), 0=disable");
+torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
+torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
torture_param(int, read_exit_delay, 13,
"Delay between read-then-exit episodes (s)");
torture_param(int, read_exit_burst, 16,
@@ -127,10 +130,12 @@ static char *torture_type = "rcu";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
+static int nrealnocbers;
static int nrealreaders;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks;
+static struct task_struct **nocb_tasks;
static struct task_struct *stats_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
@@ -142,11 +147,22 @@ static struct task_struct *read_exit_task;
#define RCU_TORTURE_PIPE_LEN 10
+// Mailbox-like structure to check RCU global memory ordering.
+struct rcu_torture_reader_check {
+ unsigned long rtc_myloops;
+ int rtc_chkrdr;
+ unsigned long rtc_chkloops;
+ int rtc_ready;
+ struct rcu_torture_reader_check *rtc_assigner;
+} ____cacheline_internodealigned_in_smp;
+
+// Update-side data structure used to check RCU readers.
struct rcu_torture {
struct rcu_head rtort_rcu;
int rtort_pipe_count;
struct list_head rtort_free;
int rtort_mbtest;
+ struct rcu_torture_reader_check *rtort_chkp;
};
static LIST_HEAD(rcu_torture_freelist);
@@ -157,10 +173,13 @@ static DEFINE_SPINLOCK(rcu_torture_lock);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
static atomic_t n_rcu_torture_alloc;
static atomic_t n_rcu_torture_alloc_fail;
static atomic_t n_rcu_torture_free;
static atomic_t n_rcu_torture_mberror;
+static atomic_t n_rcu_torture_mbchk_fail;
+static atomic_t n_rcu_torture_mbchk_tries;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_barrier_error;
static long n_rcu_torture_boost_ktrerror;
@@ -174,6 +193,8 @@ static unsigned long n_read_exits;
static struct list_head rcu_torture_removed;
static unsigned long shutdown_jiffies;
static unsigned long start_gp_seq;
+static atomic_long_t n_nocb_offload;
+static atomic_long_t n_nocb_deoffload;
static int rcu_torture_writer_state;
#define RTWS_FIXED_DELAY 0
@@ -183,9 +204,11 @@ static int rcu_torture_writer_state;
#define RTWS_EXP_SYNC 4
#define RTWS_COND_GET 5
#define RTWS_COND_SYNC 6
-#define RTWS_SYNC 7
-#define RTWS_STUTTER 8
-#define RTWS_STOPPING 9
+#define RTWS_POLL_GET 7
+#define RTWS_POLL_WAIT 8
+#define RTWS_SYNC 9
+#define RTWS_STUTTER 10
+#define RTWS_STOPPING 11
static const char * const rcu_torture_writer_state_names[] = {
"RTWS_FIXED_DELAY",
"RTWS_DELAY",
@@ -194,6 +217,8 @@ static const char * const rcu_torture_writer_state_names[] = {
"RTWS_EXP_SYNC",
"RTWS_COND_GET",
"RTWS_COND_SYNC",
+ "RTWS_POLL_GET",
+ "RTWS_POLL_WAIT",
"RTWS_SYNC",
"RTWS_STUTTER",
"RTWS_STOPPING",
@@ -311,12 +336,15 @@ struct rcu_torture_ops {
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*exp_sync)(void);
- unsigned long (*get_state)(void);
+ unsigned long (*get_gp_state)(void);
+ unsigned long (*start_gp_poll)(void);
+ bool (*poll_gp_state)(unsigned long oldstate);
void (*cond_sync)(unsigned long oldstate);
call_rcu_func_t call;
void (*cb_barrier)(void);
void (*fqs)(void);
void (*stats)(void);
+ void (*gp_kthread_dbg)(void);
int (*stall_dur)(void);
int irq_capable;
int can_boost;
@@ -385,7 +413,12 @@ static bool
rcu_torture_pipe_update_one(struct rcu_torture *rp)
{
int i;
+ struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
+ if (rtrcp) {
+ WRITE_ONCE(rp->rtort_chkp, NULL);
+ smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
+ }
i = READ_ONCE(rp->rtort_pipe_count);
if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
@@ -460,12 +493,13 @@ static struct rcu_torture_ops rcu_ops = {
.deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited,
- .get_state = get_state_synchronize_rcu,
+ .get_gp_state = get_state_synchronize_rcu,
.cond_sync = cond_synchronize_rcu,
.call = call_rcu,
.cb_barrier = rcu_barrier,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
+ .gp_kthread_dbg = show_rcu_gp_kthreads,
.stall_dur = rcu_jiffies_till_stall_check,
.irq_capable = 1,
.can_boost = rcu_can_boost(),
@@ -568,6 +602,21 @@ static void srcu_torture_synchronize(void)
synchronize_srcu(srcu_ctlp);
}
+static unsigned long srcu_torture_get_gp_state(void)
+{
+ return get_state_synchronize_srcu(srcu_ctlp);
+}
+
+static unsigned long srcu_torture_start_gp_poll(void)
+{
+ return start_poll_synchronize_srcu(srcu_ctlp);
+}
+
+static bool srcu_torture_poll_gp_state(unsigned long oldstate)
+{
+ return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
+}
+
static void srcu_torture_call(struct rcu_head *head,
rcu_callback_t func)
{
@@ -599,6 +648,9 @@ static struct rcu_torture_ops srcu_ops = {
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
+ .get_gp_state = srcu_torture_get_gp_state,
+ .start_gp_poll = srcu_torture_start_gp_poll,
+ .poll_gp_state = srcu_torture_poll_gp_state,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
@@ -693,6 +745,7 @@ static struct rcu_torture_ops tasks_ops = {
.exp_sync = synchronize_rcu_mult_test,
.call = call_rcu_tasks,
.cb_barrier = rcu_barrier_tasks,
+ .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
.fqs = NULL,
.stats = NULL,
.irq_capable = 1,
@@ -762,6 +815,7 @@ static struct rcu_torture_ops tasks_rude_ops = {
.exp_sync = synchronize_rcu_tasks_rude,
.call = call_rcu_tasks_rude,
.cb_barrier = rcu_barrier_tasks_rude,
+ .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
.fqs = NULL,
.stats = NULL,
.irq_capable = 1,
@@ -800,6 +854,7 @@ static struct rcu_torture_ops tasks_tracing_ops = {
.exp_sync = synchronize_rcu_tasks_trace,
.call = call_rcu_tasks_trace,
.cb_barrier = rcu_barrier_tasks_trace,
+ .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
.fqs = NULL,
.stats = NULL,
.irq_capable = 1,
@@ -912,7 +967,8 @@ static int rcu_torture_boost(void *arg)
oldstarttime = boost_starttime;
while (time_before(jiffies, oldstarttime)) {
schedule_timeout_interruptible(oldstarttime - jiffies);
- stutter_wait("rcu_torture_boost");
+ if (stutter_wait("rcu_torture_boost"))
+ sched_set_fifo_low(current);
if (torture_must_stop())
goto checkwait;
}
@@ -932,7 +988,8 @@ static int rcu_torture_boost(void *arg)
jiffies);
call_rcu_time = jiffies;
}
- stutter_wait("rcu_torture_boost");
+ if (stutter_wait("rcu_torture_boost"))
+ sched_set_fifo_low(current);
if (torture_must_stop())
goto checkwait;
}
@@ -964,7 +1021,8 @@ static int rcu_torture_boost(void *arg)
}
/* Go do the stutter. */
-checkwait: stutter_wait("rcu_torture_boost");
+checkwait: if (stutter_wait("rcu_torture_boost"))
+ sched_set_fifo_low(current);
} while (!torture_must_stop());
/* Clean up and exit. */
@@ -987,6 +1045,7 @@ rcu_torture_fqs(void *arg)
{
unsigned long fqs_resume_time;
int fqs_burst_remaining;
+ int oldnice = task_nice(current);
VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
do {
@@ -1002,46 +1061,33 @@ rcu_torture_fqs(void *arg)
udelay(fqs_holdoff);
fqs_burst_remaining -= fqs_holdoff;
}
- stutter_wait("rcu_torture_fqs");
+ if (stutter_wait("rcu_torture_fqs"))
+ sched_set_normal(current, oldnice);
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_fqs");
return 0;
}
+// Used by writers to randomly choose from the available grace-period
+// primitives. The only purpose of the initialization is to size the array.
+static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
+static int nsynctypes;
+
/*
- * RCU torture writer kthread. Repeatedly substitutes a new structure
- * for that pointed to by rcu_torture_current, freeing the old structure
- * after a series of grace periods (the "pipeline").
+ * Determine which grace-period primitives are available.
*/
-static int
-rcu_torture_writer(void *arg)
+static void rcu_torture_write_types(void)
{
- bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
- int expediting = 0;
- unsigned long gp_snap;
bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
- bool gp_sync1 = gp_sync;
- int i;
- struct rcu_torture *rp;
- struct rcu_torture *old_rp;
- static DEFINE_TORTURE_RANDOM(rand);
- int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
- RTWS_COND_GET, RTWS_SYNC };
- int nsynctypes = 0;
-
- VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
- if (!can_expedite)
- pr_alert("%s" TORTURE_FLAG
- " GP expediting controlled from boot/sysfs for %s.\n",
- torture_type, cur_ops->name);
+ bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
/* Initialize synctype[] array. If none set, take default. */
- if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
- gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
- if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
+ if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
+ gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
+ if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
synctype[nsynctypes++] = RTWS_COND_GET;
pr_info("%s: Testing conditional GPs.\n", __func__);
- } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
+ } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
pr_alert("%s: gp_cond without primitives.\n", __func__);
}
if (gp_exp1 && cur_ops->exp_sync) {
@@ -1056,12 +1102,46 @@ rcu_torture_writer(void *arg)
} else if (gp_normal && !cur_ops->deferred_free) {
pr_alert("%s: gp_normal without primitives.\n", __func__);
}
+ if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
+ synctype[nsynctypes++] = RTWS_POLL_GET;
+ pr_info("%s: Testing polling GPs.\n", __func__);
+ } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
+ pr_alert("%s: gp_poll without primitives.\n", __func__);
+ }
if (gp_sync1 && cur_ops->sync) {
synctype[nsynctypes++] = RTWS_SYNC;
pr_info("%s: Testing normal GPs.\n", __func__);
} else if (gp_sync && !cur_ops->sync) {
pr_alert("%s: gp_sync without primitives.\n", __func__);
}
+}
+
+/*
+ * RCU torture writer kthread. Repeatedly substitutes a new structure
+ * for that pointed to by rcu_torture_current, freeing the old structure
+ * after a series of grace periods (the "pipeline").
+ */
+static int
+rcu_torture_writer(void *arg)
+{
+ bool boot_ended;
+ bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
+ unsigned long cookie;
+ int expediting = 0;
+ unsigned long gp_snap;
+ int i;
+ int idx;
+ int oldnice = task_nice(current);
+ struct rcu_torture *rp;
+ struct rcu_torture *old_rp;
+ static DEFINE_TORTURE_RANDOM(rand);
+ bool stutter_waited;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
+ if (!can_expedite)
+ pr_alert("%s" TORTURE_FLAG
+ " GP expediting controlled from boot/sysfs for %s.\n",
+ torture_type, cur_ops->name);
if (WARN_ONCE(nsynctypes == 0,
"rcu_torture_writer: No update-side primitives.\n")) {
/*
@@ -1075,7 +1155,7 @@ rcu_torture_writer(void *arg)
do {
rcu_torture_writer_state = RTWS_FIXED_DELAY;
- schedule_timeout_uninterruptible(1);
+ torture_hrtimeout_us(500, 1000, &rand);
rp = rcu_torture_alloc();
if (rp == NULL)
continue;
@@ -1095,6 +1175,18 @@ rcu_torture_writer(void *arg)
atomic_inc(&rcu_torture_wcount[i]);
WRITE_ONCE(old_rp->rtort_pipe_count,
old_rp->rtort_pipe_count + 1);
+ if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
+ idx = cur_ops->readlock();
+ cookie = cur_ops->get_gp_state();
+ WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
+ cur_ops->poll_gp_state(cookie),
+ "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
+ __func__,
+ rcu_torture_writer_state_getname(),
+ rcu_torture_writer_state,
+ cookie, cur_ops->get_gp_state());
+ cur_ops->readunlock(idx);
+ }
switch (synctype[torture_random(&rand) % nsynctypes]) {
case RTWS_DEF_FREE:
rcu_torture_writer_state = RTWS_DEF_FREE;
@@ -1107,15 +1199,21 @@ rcu_torture_writer(void *arg)
break;
case RTWS_COND_GET:
rcu_torture_writer_state = RTWS_COND_GET;
- gp_snap = cur_ops->get_state();
- i = torture_random(&rand) % 16;
- if (i != 0)
- schedule_timeout_interruptible(i);
- udelay(torture_random(&rand) % 1000);
+ gp_snap = cur_ops->get_gp_state();
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC;
cur_ops->cond_sync(gp_snap);
rcu_torture_pipe_update(old_rp);
break;
+ case RTWS_POLL_GET:
+ rcu_torture_writer_state = RTWS_POLL_GET;
+ gp_snap = cur_ops->start_gp_poll();
+ rcu_torture_writer_state = RTWS_POLL_WAIT;
+ while (!cur_ops->poll_gp_state(gp_snap))
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16,
+ &rand);
+ rcu_torture_pipe_update(old_rp);
+ break;
case RTWS_SYNC:
rcu_torture_writer_state = RTWS_SYNC;
cur_ops->sync();
@@ -1125,6 +1223,14 @@ rcu_torture_writer(void *arg)
WARN_ON_ONCE(1);
break;
}
+ if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
+ WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
+ !cur_ops->poll_gp_state(cookie),
+ "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
+ __func__,
+ rcu_torture_writer_state_getname(),
+ rcu_torture_writer_state,
+ cookie, cur_ops->get_gp_state());
}
WRITE_ONCE(rcu_torture_current_version,
rcu_torture_current_version + 1);
@@ -1143,11 +1249,13 @@ rcu_torture_writer(void *arg)
!rcu_gp_is_normal();
}
rcu_torture_writer_state = RTWS_STUTTER;
- if (stutter_wait("rcu_torture_writer") &&
+ boot_ended = rcu_inkernel_boot_has_ended();
+ stutter_waited = stutter_wait("rcu_torture_writer");
+ if (stutter_waited &&
!READ_ONCE(rcu_fwd_cb_nodelay) &&
!cur_ops->slow_gps &&
!torture_must_stop() &&
- rcu_inkernel_boot_has_ended())
+ boot_ended)
for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
if (list_empty(&rcu_tortures[i].rtort_free) &&
rcu_access_pointer(rcu_torture_current) !=
@@ -1155,6 +1263,8 @@ rcu_torture_writer(void *arg)
rcu_ftrace_dump(DUMP_ALL);
WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
}
+ if (stutter_waited)
+ sched_set_normal(current, oldnice);
} while (!torture_must_stop());
rcu_torture_current = NULL; // Let stats task know that we are done.
/* Reset expediting back to unexpedited. */
@@ -1179,26 +1289,43 @@ rcu_torture_writer(void *arg)
static int
rcu_torture_fakewriter(void *arg)
{
+ unsigned long gp_snap;
DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
set_user_nice(current, MAX_NICE);
do {
- schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
- udelay(torture_random(&rand) & 0x3ff);
+ torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
if (cur_ops->cb_barrier != NULL &&
torture_random(&rand) % (nfakewriters * 8) == 0) {
cur_ops->cb_barrier();
- } else if (gp_normal == gp_exp) {
- if (cur_ops->sync && torture_random(&rand) & 0x80)
- cur_ops->sync();
- else if (cur_ops->exp_sync)
+ } else {
+ switch (synctype[torture_random(&rand) % nsynctypes]) {
+ case RTWS_DEF_FREE:
+ break;
+ case RTWS_EXP_SYNC:
cur_ops->exp_sync();
- } else if (gp_normal && cur_ops->sync) {
- cur_ops->sync();
- } else if (cur_ops->exp_sync) {
- cur_ops->exp_sync();
+ break;
+ case RTWS_COND_GET:
+ gp_snap = cur_ops->get_gp_state();
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ cur_ops->cond_sync(gp_snap);
+ break;
+ case RTWS_POLL_GET:
+ gp_snap = cur_ops->start_gp_poll();
+ while (!cur_ops->poll_gp_state(gp_snap)) {
+ torture_hrtimeout_jiffies(torture_random(&rand) % 16,
+ &rand);
+ }
+ break;
+ case RTWS_SYNC:
+ cur_ops->sync();
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
}
stutter_wait("rcu_torture_fakewriter");
} while (!torture_must_stop());
@@ -1212,6 +1339,62 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
kfree(rhp);
}
+// Set up and carry out testing of RCU's global memory ordering
+static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
+ struct torture_random_state *trsp)
+{
+ unsigned long loops;
+ int noc = torture_num_online_cpus();
+ int rdrchked;
+ int rdrchker;
+ struct rcu_torture_reader_check *rtrcp; // Me.
+ struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
+ struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
+ struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
+
+ if (myid < 0)
+ return; // Don't try this from timer handlers.
+
+ // Increment my counter.
+ rtrcp = &rcu_torture_reader_mbchk[myid];
+ WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
+
+ // Attempt to assign someone else some checking work.
+ rdrchked = torture_random(trsp) % nrealreaders;
+ rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
+ rdrchker = torture_random(trsp) % nrealreaders;
+ rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
+ if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
+ smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
+ !READ_ONCE(rtp->rtort_chkp) &&
+ !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
+ rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
+ WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
+ rtrcp->rtc_chkrdr = rdrchked;
+ WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
+ if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
+ cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
+ (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
+ }
+
+ // If assigned some completed work, do it!
+ rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
+ if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
+ return; // No work or work not yet ready.
+ rdrchked = rtrcp_assigner->rtc_chkrdr;
+ if (WARN_ON_ONCE(rdrchked < 0))
+ return;
+ rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
+ loops = READ_ONCE(rtrcp_chked->rtc_myloops);
+ atomic_inc(&n_rcu_torture_mbchk_tries);
+ if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
+ atomic_inc(&n_rcu_torture_mbchk_fail);
+ rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
+ rtrcp_assigner->rtc_ready = 0;
+ smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
+ smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
+}
+
/*
* Do one extension of an RCU read-side critical section using the
* current reader state in readstate (set to zero for initial entry
@@ -1347,8 +1530,9 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
* no data to read. Can be invoked both from process context and
* from a timer handler.
*/
-static bool rcu_torture_one_read(struct torture_random_state *trsp)
+static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
{
+ unsigned long cookie;
int i;
unsigned long started;
unsigned long completed;
@@ -1364,6 +1548,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp)
WARN_ON_ONCE(!rcu_is_watching());
newstate = rcutorture_extend_mask(readstate, trsp);
rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
+ if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
+ cookie = cur_ops->get_gp_state();
started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
@@ -1379,6 +1565,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp)
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
+ rcu_torture_reader_do_mbchk(myid, p, trsp);
rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
preempt_disable();
pipe_count = READ_ONCE(p->rtort_pipe_count);
@@ -1400,6 +1587,13 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp)
}
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
+ if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
+ WARN_ONCE(cur_ops->poll_gp_state(cookie),
+ "%s: Cookie check 3 failed %s(%d) %lu->%lu\n",
+ __func__,
+ rcu_torture_writer_state_getname(),
+ rcu_torture_writer_state,
+ cookie, cur_ops->get_gp_state());
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
// This next splat is expected behavior if leakpointer, especially
@@ -1428,7 +1622,7 @@ static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
static void rcu_torture_timer(struct timer_list *unused)
{
atomic_long_inc(&n_rcu_torture_timers);
- (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
+ (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
/* Test call_rcu() invocation from interrupt handler. */
if (cur_ops->call) {
@@ -1464,13 +1658,13 @@ rcu_torture_reader(void *arg)
if (!timer_pending(&t))
mod_timer(&t, jiffies + 1);
}
- if (!rcu_torture_one_read(&rand) && !torture_must_stop())
+ if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
schedule_timeout_interruptible(HZ);
if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
- schedule_timeout_interruptible(1);
+ torture_hrtimeout_us(500, 1000, &rand);
lastsleep = jiffies + 10;
}
- while (num_online_cpus() < mynumonline && !torture_must_stop())
+ while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
schedule_timeout_interruptible(HZ / 5);
stutter_wait("rcu_torture_reader");
} while (!torture_must_stop());
@@ -1484,6 +1678,53 @@ rcu_torture_reader(void *arg)
}
/*
+ * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
+ * increase race probabilities and fuzzes the interval between toggling.
+ */
+static int rcu_nocb_toggle(void *arg)
+{
+ int cpu;
+ int maxcpu = -1;
+ int oldnice = task_nice(current);
+ long r;
+ DEFINE_TORTURE_RANDOM(rand);
+ ktime_t toggle_delay;
+ unsigned long toggle_fuzz;
+ ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
+
+ VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
+ while (!rcu_inkernel_boot_has_ended())
+ schedule_timeout_interruptible(HZ / 10);
+ for_each_online_cpu(cpu)
+ maxcpu = cpu;
+ WARN_ON(maxcpu < 0);
+ if (toggle_interval > ULONG_MAX)
+ toggle_fuzz = ULONG_MAX >> 3;
+ else
+ toggle_fuzz = toggle_interval >> 3;
+ if (toggle_fuzz <= 0)
+ toggle_fuzz = NSEC_PER_USEC;
+ do {
+ r = torture_random(&rand);
+ cpu = (r >> 4) % (maxcpu + 1);
+ if (r & 0x1) {
+ rcu_nocb_cpu_offload(cpu);
+ atomic_long_inc(&n_nocb_offload);
+ } else {
+ rcu_nocb_cpu_deoffload(cpu);
+ atomic_long_inc(&n_nocb_deoffload);
+ }
+ toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
+ if (stutter_wait("rcu_nocb_toggle"))
+ sched_set_normal(current, oldnice);
+ } while (!torture_must_stop());
+ torture_kthread_stopping("rcu_nocb_toggle");
+ return 0;
+}
+
+/*
* Print torture statistics. Caller must ensure that there is only
* one call to this function at a given time!!! This is normally
* accomplished by relying on the module system to only have one copy
@@ -1524,8 +1765,9 @@ rcu_torture_stats_print(void)
atomic_read(&n_rcu_torture_alloc),
atomic_read(&n_rcu_torture_alloc_fail),
atomic_read(&n_rcu_torture_free));
- pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
+ pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
atomic_read(&n_rcu_torture_mberror),
+ atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
n_rcu_torture_barrier_error,
n_rcu_torture_boost_ktrerror,
n_rcu_torture_boost_rterror);
@@ -1538,16 +1780,20 @@ rcu_torture_stats_print(void)
data_race(n_barrier_successes),
data_race(n_barrier_attempts),
data_race(n_rcu_torture_barrier_error));
- pr_cont("read-exits: %ld\n", data_race(n_read_exits));
+ pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
+ pr_cont("nocb-toggles: %ld:%ld\n",
+ atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
if (atomic_read(&n_rcu_torture_mberror) ||
+ atomic_read(&n_rcu_torture_mbchk_fail) ||
n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
i > 1) {
pr_cont("%s", "!!! ");
atomic_inc(&n_rcu_torture_error);
WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
+ WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
@@ -1594,7 +1840,8 @@ rcu_torture_stats_print(void)
sched_show_task(wtp);
splatted = true;
}
- show_rcu_gp_kthreads();
+ if (cur_ops->gp_kthread_dbg)
+ cur_ops->gp_kthread_dbg();
rcu_ftrace_dump(DUMP_ALL);
}
rtcv_snap = rcu_torture_current_version;
@@ -1631,7 +1878,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
"stall_cpu_block=%d "
"n_barrier_cbs=%d "
"onoff_interval=%d onoff_holdoff=%d "
- "read_exit_delay=%d read_exit_burst=%d\n",
+ "read_exit_delay=%d read_exit_burst=%d "
+ "nocbs_nthreads=%d nocbs_toggle=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
@@ -1641,7 +1889,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
stall_cpu_block,
n_barrier_cbs,
onoff_interval, onoff_holdoff,
- read_exit_delay, read_exit_burst);
+ read_exit_delay, read_exit_burst,
+ nocbs_nthreads, nocbs_toggle);
}
static int rcutorture_booster_cleanup(unsigned int cpu)
@@ -1913,7 +2162,9 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
unsigned long stopat;
static DEFINE_TORTURE_RANDOM(trs);
- if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
+ if (!cur_ops->sync)
+ return; // Cannot do need_resched() forward progress testing without ->sync.
+ if (cur_ops->call && cur_ops->cb_barrier) {
init_rcu_head_on_stack(&fcs.rh);
selfpropcb = true;
}
@@ -2103,6 +2354,7 @@ static struct notifier_block rcutorture_oom_nb = {
/* Carry out grace-period forward-progress testing. */
static int rcu_torture_fwd_prog(void *args)
{
+ int oldnice = task_nice(current);
struct rcu_fwd *rfp = args;
int tested = 0;
int tested_tries = 0;
@@ -2121,7 +2373,8 @@ static int rcu_torture_fwd_prog(void *args)
rcu_torture_fwd_prog_cr(rfp);
/* Avoid slow periods, better to test when busy. */
- stutter_wait("rcu_torture_fwd_prog");
+ if (stutter_wait("rcu_torture_fwd_prog"))
+ sched_set_normal(current, oldnice);
} while (!torture_must_stop());
/* Short runs might not contain a valid forward-progress attempt. */
WARN_ON(!tested && tested_tries >= 5);
@@ -2137,8 +2390,8 @@ static int __init rcu_torture_fwd_prog_init(void)
if (!fwd_progress)
return 0; /* Not requested, so don't do it. */
- if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
- cur_ops == &rcu_busted_ops) {
+ if ((!cur_ops->sync && !cur_ops->call) ||
+ !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
return 0;
}
@@ -2372,7 +2625,7 @@ static int rcu_torture_read_exit_child(void *trsp_in)
// Minimize time between reading and exiting.
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
- (void)rcu_torture_one_read(trsp);
+ (void)rcu_torture_one_read(trsp, -1);
return 0;
}
@@ -2472,25 +2725,35 @@ rcu_torture_cleanup(void)
return;
}
- show_rcu_gp_kthreads();
+ if (cur_ops->gp_kthread_dbg)
+ cur_ops->gp_kthread_dbg();
rcu_torture_read_exit_cleanup();
rcu_torture_barrier_cleanup();
rcu_torture_fwd_prog_cleanup();
torture_stop_kthread(rcu_torture_stall, stall_task);
torture_stop_kthread(rcu_torture_writer, writer_task);
+ if (nocb_tasks) {
+ for (i = 0; i < nrealnocbers; i++)
+ torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
+ kfree(nocb_tasks);
+ nocb_tasks = NULL;
+ }
+
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
torture_stop_kthread(rcu_torture_reader,
reader_tasks[i]);
kfree(reader_tasks);
+ reader_tasks = NULL;
}
+ kfree(rcu_torture_reader_mbchk);
+ rcu_torture_reader_mbchk = NULL;
if (fakewriter_tasks) {
- for (i = 0; i < nfakewriters; i++) {
+ for (i = 0; i < nfakewriters; i++)
torture_stop_kthread(rcu_torture_fakewriter,
fakewriter_tasks[i]);
- }
kfree(fakewriter_tasks);
fakewriter_tasks = NULL;
}
@@ -2583,6 +2846,7 @@ static void rcu_test_debug_objects(void)
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
struct rcu_head rh1;
struct rcu_head rh2;
+ struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
init_rcu_head_on_stack(&rh1);
init_rcu_head_on_stack(&rh2);
@@ -2595,6 +2859,10 @@ static void rcu_test_debug_objects(void)
local_irq_disable(); /* Make it harder to start a new grace period. */
call_rcu(&rh2, rcu_torture_leak_cb);
call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
+ if (rhp) {
+ call_rcu(rhp, rcu_torture_leak_cb);
+ call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
+ }
local_irq_enable();
rcu_read_unlock();
preempt_enable();
@@ -2647,7 +2915,6 @@ rcu_torture_init(void)
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
pr_cont(" %s", torture_ops[i]->name);
pr_cont("\n");
- WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
firsterr = -EINVAL;
cur_ops = NULL;
goto unwind;
@@ -2690,6 +2957,8 @@ rcu_torture_init(void)
atomic_set(&n_rcu_torture_alloc_fail, 0);
atomic_set(&n_rcu_torture_free, 0);
atomic_set(&n_rcu_torture_mberror, 0);
+ atomic_set(&n_rcu_torture_mbchk_fail, 0);
+ atomic_set(&n_rcu_torture_mbchk_tries, 0);
atomic_set(&n_rcu_torture_error, 0);
n_rcu_torture_barrier_error = 0;
n_rcu_torture_boost_ktrerror = 0;
@@ -2709,6 +2978,7 @@ rcu_torture_init(void)
/* Start up the kthreads. */
+ rcu_torture_write_types();
firsterr = torture_create_kthread(rcu_torture_writer, NULL,
writer_task);
if (firsterr)
@@ -2731,17 +3001,40 @@ rcu_torture_init(void)
}
reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
GFP_KERNEL);
- if (reader_tasks == NULL) {
+ rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
+ GFP_KERNEL);
+ if (!reader_tasks || !rcu_torture_reader_mbchk) {
VERBOSE_TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < nrealreaders; i++) {
+ rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
reader_tasks[i]);
if (firsterr)
goto unwind;
}
+ nrealnocbers = nocbs_nthreads;
+ if (WARN_ON(nrealnocbers < 0))
+ nrealnocbers = 1;
+ if (WARN_ON(nocbs_toggle < 0))
+ nocbs_toggle = HZ;
+ if (nrealnocbers > 0) {
+ nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
+ if (nocb_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ } else {
+ nocb_tasks = NULL;
+ }
+ for (i = 0; i < nrealnocbers; i++) {
+ firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
+ if (firsterr)
+ goto unwind;
+ }
if (stat_interval > 0) {
firsterr = torture_create_kthread(rcu_torture_stats, NULL,
stats_task);
@@ -2815,6 +3108,10 @@ rcu_torture_init(void)
unwind:
torture_init_end();
rcu_torture_cleanup();
+ if (shutdown_secs) {
+ WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
+ kernel_power_off();
+ }
return firsterr;
}
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index 952595c678b3..02dd9767b559 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -46,6 +46,18 @@
#define VERBOSE_SCALEOUT(s, x...) \
do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0)
+static atomic_t verbose_batch_ctr;
+
+#define VERBOSE_SCALEOUT_BATCH(s, x...) \
+do { \
+ if (verbose && \
+ (verbose_batched <= 0 || \
+ !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \
+ schedule_timeout_uninterruptible(1); \
+ pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \
+ } \
+} while (0)
+
#define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \
do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0)
@@ -57,6 +69,7 @@ module_param(scale_type, charp, 0444);
MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
+torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
// Wait until there are multiple CPUs before starting test.
torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
@@ -368,14 +381,14 @@ ref_scale_reader(void *arg)
u64 start;
s64 duration;
- VERBOSE_SCALEOUT("ref_scale_reader %ld: task started", me);
+ VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
set_user_nice(current, MAX_NICE);
atomic_inc(&n_init);
if (holdoff)
schedule_timeout_interruptible(holdoff * HZ);
repeat:
- VERBOSE_SCALEOUT("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
+ VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
// Wait for signal that this reader can start.
wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
@@ -392,7 +405,7 @@ repeat:
while (atomic_read_acquire(&n_started))
cpu_relax();
- VERBOSE_SCALEOUT("ref_scale_reader %ld: experiment %d started", me, exp_idx);
+ VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
// To reduce noise, do an initial cache-warming invocation, check
@@ -421,8 +434,8 @@ repeat:
if (atomic_dec_and_test(&nreaders_exp))
wake_up(&main_wq);
- VERBOSE_SCALEOUT("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
- me, exp_idx, atomic_read(&nreaders_exp));
+ VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
+ me, exp_idx, atomic_read(&nreaders_exp));
if (!torture_must_stop())
goto repeat;
@@ -658,7 +671,6 @@ ref_scale_init(void)
for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
pr_cont(" %s", scale_ops[i]->name);
pr_cont("\n");
- WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
firsterr = -EINVAL;
cur_ops = NULL;
goto unwind;
@@ -681,6 +693,12 @@ ref_scale_init(void)
// Reader tasks (default to ~75% of online CPUs).
if (nreaders < 0)
nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
+ if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
+ loops = 1;
+ if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
+ nreaders = 1;
+ if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
+ nruns = 1;
reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
GFP_KERNEL);
if (!reader_tasks) {
@@ -712,6 +730,10 @@ ref_scale_init(void)
unwind:
torture_init_end();
ref_scale_cleanup();
+ if (shutdown) {
+ WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
+ kernel_power_off();
+ }
return firsterr;
}
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 6208c1dae5c9..26344dc6483b 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -34,6 +34,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp)
ssp->srcu_gp_running = false;
ssp->srcu_gp_waiting = false;
ssp->srcu_idx = 0;
+ ssp->srcu_idx_max = 0;
INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
INIT_LIST_HEAD(&ssp->srcu_work.entry);
return 0;
@@ -84,6 +85,8 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
WARN_ON(ssp->srcu_gp_waiting);
WARN_ON(ssp->srcu_cb_head);
WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
+ WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
+ WARN_ON(ssp->srcu_idx & 0x1);
}
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
@@ -114,7 +117,7 @@ void srcu_drive_gp(struct work_struct *wp)
struct srcu_struct *ssp;
ssp = container_of(wp, struct srcu_struct, srcu_work);
- if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
+ if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */
@@ -124,11 +127,12 @@ void srcu_drive_gp(struct work_struct *wp)
ssp->srcu_cb_head = NULL;
ssp->srcu_cb_tail = &ssp->srcu_cb_head;
local_irq_enable();
- idx = ssp->srcu_idx;
- WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
+ idx = (ssp->srcu_idx & 0x2) / 2;
+ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
+ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
/* Invoke the callbacks we removed above. */
while (lh) {
@@ -146,11 +150,27 @@ void srcu_drive_gp(struct work_struct *wp)
* straighten that out.
*/
WRITE_ONCE(ssp->srcu_gp_running, false);
- if (READ_ONCE(ssp->srcu_cb_head))
+ if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
schedule_work(&ssp->srcu_work);
}
EXPORT_SYMBOL_GPL(srcu_drive_gp);
+static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
+{
+ unsigned short cookie;
+
+ cookie = get_state_synchronize_srcu(ssp);
+ if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
+ return;
+ WRITE_ONCE(ssp->srcu_idx_max, cookie);
+ if (!READ_ONCE(ssp->srcu_gp_running)) {
+ if (likely(srcu_init_done))
+ schedule_work(&ssp->srcu_work);
+ else if (list_empty(&ssp->srcu_work.entry))
+ list_add(&ssp->srcu_work.entry, &srcu_boot_list);
+ }
+}
+
/*
* Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running.
@@ -166,12 +186,7 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
*ssp->srcu_cb_tail = rhp;
ssp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags);
- if (!READ_ONCE(ssp->srcu_gp_running)) {
- if (likely(srcu_init_done))
- schedule_work(&ssp->srcu_work);
- else if (list_empty(&ssp->srcu_work.entry))
- list_add(&ssp->srcu_work.entry, &srcu_boot_list);
- }
+ srcu_gp_start_if_needed(ssp);
}
EXPORT_SYMBOL_GPL(call_srcu);
@@ -190,6 +205,48 @@ void synchronize_srcu(struct srcu_struct *ssp)
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
+/*
+ * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
+ */
+unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
+{
+ unsigned long ret;
+
+ barrier();
+ ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
+ barrier();
+ return ret & USHRT_MAX;
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
+
+/*
+ * start_poll_synchronize_srcu - Provide cookie and start grace period
+ *
+ * The difference between this and get_state_synchronize_srcu() is that
+ * this function ensures that the poll_state_synchronize_srcu() will
+ * eventually return the value true.
+ */
+unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
+{
+ unsigned long ret = get_state_synchronize_srcu(ssp);
+
+ srcu_gp_start_if_needed(ssp);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
+
+/*
+ * poll_state_synchronize_srcu - Has cookie's grace period ended?
+ */
+bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
+{
+ bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
+
+ barrier();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
+
/* Lockdep diagnostics. */
void __init rcu_scheduler_starting(void)
{
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index c13348ee80a5..e26547b34ad3 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -177,11 +177,13 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
INIT_DELAYED_WORK(&ssp->work, process_srcu);
if (!is_static)
ssp->sda = alloc_percpu(struct srcu_data);
+ if (!ssp->sda)
+ return -ENOMEM;
init_srcu_struct_nodes(ssp, is_static);
ssp->srcu_gp_seq_needed_exp = 0;
ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
- return ssp->sda ? 0 : -ENOMEM;
+ return 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -806,6 +808,46 @@ static void srcu_leak_callback(struct rcu_head *rhp)
}
/*
+ * Start an SRCU grace period, and also queue the callback if non-NULL.
+ */
+static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
+ struct rcu_head *rhp, bool do_norm)
+{
+ unsigned long flags;
+ int idx;
+ bool needexp = false;
+ bool needgp = false;
+ unsigned long s;
+ struct srcu_data *sdp;
+
+ check_init_srcu_struct(ssp);
+ idx = srcu_read_lock(ssp);
+ sdp = raw_cpu_ptr(ssp->sda);
+ spin_lock_irqsave_rcu_node(sdp, flags);
+ if (rhp)
+ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_gp_seq));
+ s = rcu_seq_snap(&ssp->srcu_gp_seq);
+ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
+ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+ sdp->srcu_gp_seq_needed = s;
+ needgp = true;
+ }
+ if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
+ sdp->srcu_gp_seq_needed_exp = s;
+ needexp = true;
+ }
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
+ if (needgp)
+ srcu_funnel_gp_start(ssp, sdp, s, do_norm);
+ else if (needexp)
+ srcu_funnel_exp_start(ssp, sdp->mynode, s);
+ srcu_read_unlock(ssp, idx);
+ return s;
+}
+
+/*
* Enqueue an SRCU callback on the srcu_data structure associated with
* the current CPU and the specified srcu_struct structure, initiating
* grace-period processing if it is not already running.
@@ -836,14 +878,6 @@ static void srcu_leak_callback(struct rcu_head *rhp)
static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func, bool do_norm)
{
- unsigned long flags;
- int idx;
- bool needexp = false;
- bool needgp = false;
- unsigned long s;
- struct srcu_data *sdp;
-
- check_init_srcu_struct(ssp);
if (debug_rcu_head_queue(rhp)) {
/* Probable double call_srcu(), so leak the callback. */
WRITE_ONCE(rhp->func, srcu_leak_callback);
@@ -851,28 +885,7 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
return;
}
rhp->func = func;
- idx = srcu_read_lock(ssp);
- sdp = raw_cpu_ptr(ssp->sda);
- spin_lock_irqsave_rcu_node(sdp, flags);
- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&ssp->srcu_gp_seq));
- s = rcu_seq_snap(&ssp->srcu_gp_seq);
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
- sdp->srcu_gp_seq_needed = s;
- needgp = true;
- }
- if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
- sdp->srcu_gp_seq_needed_exp = s;
- needexp = true;
- }
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- if (needgp)
- srcu_funnel_gp_start(ssp, sdp, s, do_norm);
- else if (needexp)
- srcu_funnel_exp_start(ssp, sdp->mynode, s);
- srcu_read_unlock(ssp, idx);
+ (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
}
/**
@@ -906,7 +919,7 @@ static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
{
struct rcu_synchronize rcu;
- RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
+ RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
@@ -1001,6 +1014,77 @@ void synchronize_srcu(struct srcu_struct *ssp)
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
+/**
+ * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
+ * @ssp: srcu_struct to provide cookie for.
+ *
+ * This function returns a cookie that can be passed to
+ * poll_state_synchronize_srcu(), which will return true if a full grace
+ * period has elapsed in the meantime. It is the caller's responsibility
+ * to make sure that grace period happens, for example, by invoking
+ * call_srcu() after return from get_state_synchronize_srcu().
+ */
+unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
+{
+ // Any prior manipulation of SRCU-protected data must happen
+ // before the load from ->srcu_gp_seq.
+ smp_mb();
+ return rcu_seq_snap(&ssp->srcu_gp_seq);
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
+
+/**
+ * start_poll_synchronize_srcu - Provide cookie and start grace period
+ * @ssp: srcu_struct to provide cookie for.
+ *
+ * This function returns a cookie that can be passed to
+ * poll_state_synchronize_srcu(), which will return true if a full grace
+ * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
+ * this function also ensures that any needed SRCU grace period will be
+ * started. This convenience does come at a cost in terms of CPU overhead.
+ */
+unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
+{
+ return srcu_gp_start_if_needed(ssp, NULL, true);
+}
+EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
+
+/**
+ * poll_state_synchronize_srcu - Has cookie's grace period ended?
+ * @ssp: srcu_struct to provide cookie for.
+ * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
+ *
+ * This function takes the cookie that was returned from either
+ * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
+ * returns @true if an SRCU grace period elapsed since the time that the
+ * cookie was created.
+ *
+ * Because cookies are finite in size, wrapping/overflow is possible.
+ * This is more pronounced on 32-bit systems where cookies are 32 bits,
+ * where in theory wrapping could happen in about 14 hours assuming
+ * 25-microsecond expedited SRCU grace periods. However, a more likely
+ * overflow lower bound is on the order of 24 days in the case of
+ * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
+ * system requires geologic timespans, as in more than seven million years
+ * even for expedited SRCU grace periods.
+ *
+ * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
+ * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
+ * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
+ * few minutes. If this proves to be a problem, this counter will be
+ * expanded to the same size as for Tree SRCU.
+ */
+bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
+{
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
+ return false;
+ // Ensure that the end of the SRCU grace period happens before
+ // any subsequent code that the caller might execute.
+ smp_mb(); // ^^^
+ return true;
+}
+EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
+
/*
* Callback function for srcu_barrier() use.
*/
@@ -1158,6 +1242,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
*/
static void srcu_invoke_callbacks(struct work_struct *work)
{
+ long len;
bool more;
struct rcu_cblist ready_cbs;
struct rcu_head *rhp;
@@ -1180,6 +1265,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
/* We are on the job! Extract and invoke ready callbacks. */
sdp->srcu_cblist_invoking = true;
rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
+ len = ready_cbs.len;
spin_unlock_irq_rcu_node(sdp);
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
@@ -1188,13 +1274,14 @@ static void srcu_invoke_callbacks(struct work_struct *work)
rhp->func(rhp);
local_bh_enable();
}
+ WARN_ON_ONCE(ready_cbs.len);
/*
* Update counts, accelerate new callbacks, and if needed,
* schedule another round of callback invocation.
*/
spin_lock_irq_rcu_node(sdp);
- rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
+ rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
rcu_seq_snap(&ssp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false;
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index d5d9f2d03e8a..af7c19439f4e 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -241,7 +241,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
}
}
-/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
+/* Spawn RCU-tasks grace-period kthread. */
static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
{
struct task_struct *t;
@@ -290,7 +290,7 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
".C"[!!data_race(rtp->cbs_head)],
s);
}
-#endif /* #ifndef CONFIG_TINY_RCU */
+#endif // #ifndef CONFIG_TINY_RCU
static void exit_tasks_rcu_finish_trace(struct task_struct *t);
@@ -335,23 +335,18 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
// Start off with initial wait and slowly back off to 1 HZ wait.
fract = rtp->init_fract;
- if (fract > HZ)
- fract = HZ;
- for (;;) {
+ while (!list_empty(&holdouts)) {
bool firstreport;
bool needreport;
int rtst;
- if (list_empty(&holdouts))
- break;
-
/* Slowly back off waiting for holdouts */
set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
- schedule_timeout_idle(HZ/fract);
+ schedule_timeout_idle(fract);
- if (fract > 1)
- fract--;
+ if (fract < HZ)
+ fract++;
rtst = READ_ONCE(rcu_task_stall_timeout);
needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
@@ -560,7 +555,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
static int __init rcu_spawn_tasks_kthread(void)
{
rcu_tasks.gp_sleep = HZ / 10;
- rcu_tasks.init_fract = 10;
+ rcu_tasks.init_fract = HZ / 10;
rcu_tasks.pregp_func = rcu_tasks_pregp_step;
rcu_tasks.pertask_func = rcu_tasks_pertask;
rcu_tasks.postscan_func = rcu_tasks_postscan;
@@ -569,14 +564,14 @@ static int __init rcu_spawn_tasks_kthread(void)
rcu_spawn_tasks_kthread_generic(&rcu_tasks);
return 0;
}
-core_initcall(rcu_spawn_tasks_kthread);
-#ifndef CONFIG_TINY_RCU
-static void show_rcu_tasks_classic_gp_kthread(void)
+#if !defined(CONFIG_TINY_RCU)
+void show_rcu_tasks_classic_gp_kthread(void)
{
show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
}
-#endif /* #ifndef CONFIG_TINY_RCU */
+EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
+#endif // !defined(CONFIG_TINY_RCU)
/* Do the srcu_read_lock() for the above synchronize_srcu(). */
void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
@@ -598,7 +593,6 @@ void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
}
#else /* #ifdef CONFIG_TASKS_RCU */
-static inline void show_rcu_tasks_classic_gp_kthread(void) { }
void exit_tasks_rcu_start(void) { }
void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -697,18 +691,15 @@ static int __init rcu_spawn_tasks_rude_kthread(void)
rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
return 0;
}
-core_initcall(rcu_spawn_tasks_rude_kthread);
-#ifndef CONFIG_TINY_RCU
-static void show_rcu_tasks_rude_gp_kthread(void)
+#if !defined(CONFIG_TINY_RCU)
+void show_rcu_tasks_rude_gp_kthread(void)
{
show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
}
-#endif /* #ifndef CONFIG_TINY_RCU */
-
-#else /* #ifdef CONFIG_TASKS_RUDE_RCU */
-static void show_rcu_tasks_rude_gp_kthread(void) {}
-#endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
+EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
+#endif // !defined(CONFIG_TINY_RCU)
+#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
////////////////////////////////////////////////////////////////////////
//
@@ -975,6 +966,11 @@ static void rcu_tasks_trace_pregp_step(void)
static void rcu_tasks_trace_pertask(struct task_struct *t,
struct list_head *hop)
{
+ // During early boot when there is only the one boot CPU, there
+ // is no idle task for the other CPUs. Just return.
+ if (unlikely(t == NULL))
+ return;
+
WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
WRITE_ONCE(t->trc_reader_checked, false);
t->trc_ipi_to_cpu = -1;
@@ -1183,12 +1179,12 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
{
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
rcu_tasks_trace.gp_sleep = HZ / 10;
- rcu_tasks_trace.init_fract = 10;
+ rcu_tasks_trace.init_fract = HZ / 10;
} else {
rcu_tasks_trace.gp_sleep = HZ / 200;
if (rcu_tasks_trace.gp_sleep <= 0)
rcu_tasks_trace.gp_sleep = 1;
- rcu_tasks_trace.init_fract = HZ / 5;
+ rcu_tasks_trace.init_fract = HZ / 200;
if (rcu_tasks_trace.init_fract <= 0)
rcu_tasks_trace.init_fract = 1;
}
@@ -1200,10 +1196,9 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
return 0;
}
-core_initcall(rcu_spawn_tasks_trace_kthread);
-#ifndef CONFIG_TINY_RCU
-static void show_rcu_tasks_trace_gp_kthread(void)
+#if !defined(CONFIG_TINY_RCU)
+void show_rcu_tasks_trace_gp_kthread(void)
{
char buf[64];
@@ -1213,11 +1208,11 @@ static void show_rcu_tasks_trace_gp_kthread(void)
data_race(n_heavy_reader_attempts));
show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
}
-#endif /* #ifndef CONFIG_TINY_RCU */
+EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
+#endif // !defined(CONFIG_TINY_RCU)
#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
-static inline void show_rcu_tasks_trace_gp_kthread(void) {}
#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
#ifndef CONFIG_TINY_RCU
@@ -1229,6 +1224,100 @@ void show_rcu_tasks_gp_kthreads(void)
}
#endif /* #ifndef CONFIG_TINY_RCU */
+#ifdef CONFIG_PROVE_RCU
+struct rcu_tasks_test_desc {
+ struct rcu_head rh;
+ const char *name;
+ bool notrun;
+};
+
+static struct rcu_tasks_test_desc tests[] = {
+ {
+ .name = "call_rcu_tasks()",
+ /* If not defined, the test is skipped. */
+ .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
+ },
+ {
+ .name = "call_rcu_tasks_rude()",
+ /* If not defined, the test is skipped. */
+ .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
+ },
+ {
+ .name = "call_rcu_tasks_trace()",
+ /* If not defined, the test is skipped. */
+ .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
+ }
+};
+
+static void test_rcu_tasks_callback(struct rcu_head *rhp)
+{
+ struct rcu_tasks_test_desc *rttd =
+ container_of(rhp, struct rcu_tasks_test_desc, rh);
+
+ pr_info("Callback from %s invoked.\n", rttd->name);
+
+ rttd->notrun = true;
+}
+
+static void rcu_tasks_initiate_self_tests(void)
+{
+ pr_info("Running RCU-tasks wait API self tests\n");
+#ifdef CONFIG_TASKS_RCU
+ synchronize_rcu_tasks();
+ call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
+#endif
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+ synchronize_rcu_tasks_rude();
+ call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
+#endif
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+ synchronize_rcu_tasks_trace();
+ call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
+#endif
+}
+
+static int rcu_tasks_verify_self_tests(void)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!tests[i].notrun) { // still hanging.
+ pr_err("%s has been failed.\n", tests[i].name);
+ ret = -1;
+ }
+ }
+
+ if (ret)
+ WARN_ON(1);
+
+ return ret;
+}
+late_initcall(rcu_tasks_verify_self_tests);
+#else /* #ifdef CONFIG_PROVE_RCU */
+static void rcu_tasks_initiate_self_tests(void) { }
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
+
+void __init rcu_init_tasks_generic(void)
+{
+#ifdef CONFIG_TASKS_RCU
+ rcu_spawn_tasks_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+ rcu_spawn_tasks_rude_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+ rcu_spawn_tasks_trace_kthread();
+#endif
+
+ // Run the self-tests.
+ rcu_tasks_initiate_self_tests();
+}
+
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
static inline void rcu_tasks_bootup_oddness(void) {}
void show_rcu_tasks_gp_kthreads(void) {}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 06895ef85d69..da6f5213fb74 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -83,6 +83,9 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.dynticks_nesting = 1,
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
+#ifdef CONFIG_RCU_NOCB_CPU
+ .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
+#endif
};
static struct rcu_state rcu_state = {
.level = { &rcu_state.node[0] },
@@ -100,8 +103,10 @@ static struct rcu_state rcu_state = {
static bool dump_tree;
module_param(dump_tree, bool, 0444);
/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
-static bool use_softirq = true;
+static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
+#ifndef CONFIG_PREEMPT_RT
module_param(use_softirq, bool, 0444);
+#endif
/* Control rcu_node-tree auto-balancing at boot time. */
static bool rcu_fanout_exact;
module_param(rcu_fanout_exact, bool, 0444);
@@ -177,7 +182,7 @@ module_param(rcu_unlock_delay, int, 0444);
* per-CPU. Object size is equal to one page. This value
* can be changed at boot time.
*/
-static int rcu_min_cached_objs = 2;
+static int rcu_min_cached_objs = 5;
module_param(rcu_min_cached_objs, int, 0444);
/* Retrieve RCU kthreads priority for rcutorture */
@@ -341,6 +346,14 @@ static bool rcu_dynticks_in_eqs(int snap)
return !(snap & RCU_DYNTICK_CTRL_CTR);
}
+/* Return true if the specified CPU is currently idle from an RCU viewpoint. */
+bool rcu_is_idle_cpu(int cpu)
+{
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+ return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
+}
+
/*
* Return true if the CPU corresponding to the specified rcu_data
* structure has spent some time in an extended quiescent state since
@@ -409,7 +422,7 @@ bool rcu_eqs_special_set(int cpu)
*
* The caller must have disabled interrupts and must not be idle.
*/
-void rcu_momentary_dyntick_idle(void)
+notrace void rcu_momentary_dyntick_idle(void)
{
int special;
@@ -546,12 +559,12 @@ static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param
return ret;
}
-static struct kernel_param_ops first_fqs_jiffies_ops = {
+static const struct kernel_param_ops first_fqs_jiffies_ops = {
.set = param_set_first_fqs_jiffies,
.get = param_get_ulong,
};
-static struct kernel_param_ops next_fqs_jiffies_ops = {
+static const struct kernel_param_ops next_fqs_jiffies_ops = {
.set = param_set_next_fqs_jiffies,
.get = param_get_ulong,
};
@@ -636,7 +649,6 @@ static noinstr void rcu_eqs_enter(bool user)
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rdp = this_cpu_ptr(&rcu_data);
- do_nocb_deferred_wakeup(rdp);
rcu_prepare_for_idle();
rcu_preempt_deferred_qs(current);
@@ -670,6 +682,50 @@ void rcu_idle_enter(void)
EXPORT_SYMBOL_GPL(rcu_idle_enter);
#ifdef CONFIG_NO_HZ_FULL
+
+#if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
+/*
+ * An empty function that will trigger a reschedule on
+ * IRQ tail once IRQs get re-enabled on userspace/guest resume.
+ */
+static void late_wakeup_func(struct irq_work *work)
+{
+}
+
+static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
+ IRQ_WORK_INIT(late_wakeup_func);
+
+/*
+ * If either:
+ *
+ * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
+ * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
+ *
+ * In these cases the late RCU wake ups aren't supported in the resched loops and our
+ * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
+ * get re-enabled again.
+ */
+noinstr static void rcu_irq_work_resched(void)
+{
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+
+ if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
+ return;
+
+ if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
+ return;
+
+ instrumentation_begin();
+ if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
+ irq_work_queue(this_cpu_ptr(&late_wakeup_work));
+ }
+ instrumentation_end();
+}
+
+#else
+static inline void rcu_irq_work_resched(void) { }
+#endif
+
/**
* rcu_user_enter - inform RCU that we are resuming userspace.
*
@@ -684,8 +740,16 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
noinstr void rcu_user_enter(void)
{
lockdep_assert_irqs_disabled();
+
+ /*
+ * Other than generic entry implementation, we may be past the last
+ * rescheduling opportunity in the entry code. Trigger a self IPI
+ * that will fire and reschedule once we resume in user/guest mode.
+ */
+ rcu_irq_work_resched();
rcu_eqs_enter(true);
}
+
#endif /* CONFIG_NO_HZ_FULL */
/**
@@ -928,8 +992,8 @@ void __rcu_irq_enter_check_tick(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
- // Enabling the tick is unsafe in NMI handlers.
- if (WARN_ON_ONCE(in_nmi()))
+ // If we're here from NMI there's nothing to do.
+ if (in_nmi())
return;
RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
@@ -1093,8 +1157,11 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
* CPU can safely enter RCU read-side critical sections. In other words,
* if the current CPU is not in its idle loop or is in an interrupt or
* NMI handler, return true.
+ *
+ * Make notrace because it can be called by the internal functions of
+ * ftrace, and making this notrace removes unnecessary recursion calls.
*/
-bool rcu_is_watching(void)
+notrace bool rcu_is_watching(void)
{
bool ret;
@@ -1149,7 +1216,7 @@ bool rcu_lockdep_current_cpu_online(void)
preempt_disable_notrace();
rdp = this_cpu_ptr(&rcu_data);
rnp = rdp->mynode;
- if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
+ if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
ret = true;
preempt_enable_notrace();
return ret;
@@ -1311,8 +1378,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
- init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
- atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
rdp->rcu_iw_pending = true;
rdp->rcu_iw_gp_seq = rnp->gp_seq;
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
@@ -1486,6 +1551,8 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
if (!rcu_segcblist_pend_cbs(&rdp->cblist))
return false;
+ trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
+
/*
* Callbacks are often registered with incomplete grace-period
* information. Something about the fact that getting exact
@@ -1506,6 +1573,8 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
else
trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
+ trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
+
return ret;
}
@@ -1603,8 +1672,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
{
bool ret = false;
bool need_qs;
- const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- rcu_segcblist_is_offloaded(&rdp->cblist);
+ const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
raw_lockdep_assert_held_rcu_node(rnp);
@@ -1715,6 +1783,7 @@ static void rcu_strict_gp_boundary(void *unused)
*/
static bool rcu_gp_init(void)
{
+ unsigned long firstseq;
unsigned long flags;
unsigned long oldmask;
unsigned long mask;
@@ -1756,8 +1825,14 @@ static bool rcu_gp_init(void)
* go offline later. Please also refer to "Hotplug CPU" section
* of RCU's Requirements documentation.
*/
- rcu_state.gp_state = RCU_GP_ONOFF;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
rcu_for_each_leaf_node(rnp) {
+ smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
+ firstseq = READ_ONCE(rnp->ofl_seq);
+ if (firstseq & 0x1)
+ while (firstseq == READ_ONCE(rnp->ofl_seq))
+ schedule_timeout_idle(1); // Can't wake unless RCU is watching.
+ smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
raw_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_irq_rcu_node(rnp);
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
@@ -1816,7 +1891,7 @@ static bool rcu_gp_init(void)
* The grace period cannot complete until the initialization
* process finishes, because this kthread handles both.
*/
- rcu_state.gp_state = RCU_GP_INIT;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
rcu_for_each_node_breadth_first(rnp) {
rcu_gp_slow(gp_init_delay);
raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1915,17 +1990,22 @@ static void rcu_gp_fqs_loop(void)
ret = 0;
for (;;) {
if (!ret) {
- rcu_state.jiffies_force_qs = jiffies + j;
+ WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
+ /*
+ * jiffies_force_qs before RCU_GP_WAIT_FQS state
+ * update; required for stall checks.
+ */
+ smp_wmb();
WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
jiffies + (j ? 3 * j : 2));
}
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
TPS("fqswait"));
- rcu_state.gp_state = RCU_GP_WAIT_FQS;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
ret = swait_event_idle_timeout_exclusive(
rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
rcu_gp_torture_wait();
- rcu_state.gp_state = RCU_GP_DOING_FQS;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
/* Locking provides needed memory barriers. */
/* If grace period done, leave loop. */
if (!READ_ONCE(rnp->qsmask) &&
@@ -2039,7 +2119,7 @@ static void rcu_gp_cleanup(void)
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
rcu_seq_end(&rcu_state.gp_seq);
ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
- rcu_state.gp_state = RCU_GP_IDLE;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(&rcu_data);
if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
@@ -2048,8 +2128,7 @@ static void rcu_gp_cleanup(void)
needgp = true;
}
/* Advance CBs to reduce false positives below. */
- offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- rcu_segcblist_is_offloaded(&rdp->cblist);
+ offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
@@ -2079,12 +2158,12 @@ static int __noreturn rcu_gp_kthread(void *unused)
for (;;) {
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
TPS("reqwait"));
- rcu_state.gp_state = RCU_GP_WAIT_GPS;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
swait_event_idle_exclusive(rcu_state.gp_wq,
READ_ONCE(rcu_state.gp_flags) &
RCU_GP_FLAG_INIT);
rcu_gp_torture_wait();
- rcu_state.gp_state = RCU_GP_DONE_GPS;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
/* Locking provides needed memory barrier. */
if (rcu_gp_init())
break;
@@ -2099,9 +2178,9 @@ static int __noreturn rcu_gp_kthread(void *unused)
rcu_gp_fqs_loop();
/* Handle grace-period end. */
- rcu_state.gp_state = RCU_GP_CLEANUP;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
rcu_gp_cleanup();
- rcu_state.gp_state = RCU_GP_CLEANED;
+ WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
}
}
@@ -2248,8 +2327,7 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
unsigned long flags;
unsigned long mask;
bool needwake = false;
- const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- rcu_segcblist_is_offloaded(&rdp->cblist);
+ const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
struct rcu_node *rnp;
WARN_ON_ONCE(rdp->cpu != smp_processor_id());
@@ -2399,6 +2477,7 @@ int rcutree_dead_cpu(unsigned int cpu)
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
return 0;
+ WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
/* Adjust any no-longer-needed kthreads. */
rcu_boost_kthread_setaffinity(rnp, -1);
/* Do any needed no-CB deferred wakeups from this CPU. */
@@ -2416,12 +2495,12 @@ int rcutree_dead_cpu(unsigned int cpu)
static void rcu_do_batch(struct rcu_data *rdp)
{
int div;
+ bool __maybe_unused empty;
unsigned long flags;
- const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- rcu_segcblist_is_offloaded(&rdp->cblist);
+ const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
struct rcu_head *rhp;
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
- long bl, count;
+ long bl, count = 0;
long pending, tlimit = 0;
/* If no callbacks are ready, just return. */
@@ -2458,14 +2537,18 @@ static void rcu_do_batch(struct rcu_data *rdp)
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
if (offloaded)
rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
+
+ trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
rcu_nocb_unlock_irqrestore(rdp, flags);
/* Invoke callbacks. */
tick_dep_set_task(current, TICK_DEP_BIT_RCU);
rhp = rcu_cblist_dequeue(&rcl);
+
for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
rcu_callback_t f;
+ count++;
debug_rcu_head_unqueue(rhp);
rcu_lock_acquire(&rcu_callback_map);
@@ -2479,21 +2562,19 @@ static void rcu_do_batch(struct rcu_data *rdp)
/*
* Stop only if limit reached and CPU has something to do.
- * Note: The rcl structure counts down from zero.
*/
- if (-rcl.len >= bl && !offloaded &&
+ if (count >= bl && !offloaded &&
(need_resched() ||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
if (unlikely(tlimit)) {
/* only call local_clock() every 32 callbacks */
- if (likely((-rcl.len & 31) || local_clock() < tlimit))
+ if (likely((count & 31) || local_clock() < tlimit))
continue;
/* Exceeded the time limit, so leave. */
break;
}
- if (offloaded) {
- WARN_ON_ONCE(in_serving_softirq());
+ if (!in_serving_softirq()) {
local_bh_enable();
lockdep_assert_irqs_enabled();
cond_resched_tasks_rcu_qs();
@@ -2504,15 +2585,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
local_irq_save(flags);
rcu_nocb_lock(rdp);
- count = -rcl.len;
rdp->n_cbs_invoked += count;
trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
is_idle_task(current), rcu_is_callbacks_kthread());
/* Update counts and requeue any remaining callbacks. */
rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
- smp_mb(); /* List handling before counting for rcu_barrier(). */
- rcu_segcblist_insert_count(&rdp->cblist, &rcl);
+ rcu_segcblist_add_len(&rdp->cblist, -count);
/* Reinstate batch limit if we have worked down the excess. */
count = rcu_segcblist_n_cbs(&rdp->cblist);
@@ -2530,9 +2609,12 @@ static void rcu_do_batch(struct rcu_data *rdp)
* The following usually indicates a double call_rcu(). To track
* this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
*/
- WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
+ empty = rcu_segcblist_empty(&rdp->cblist);
+ WARN_ON_ONCE(count == 0 && !empty);
WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- count != 0 && rcu_segcblist_empty(&rdp->cblist));
+ count != 0 && empty);
+ WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
+ WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
rcu_nocb_unlock_irqrestore(rdp, flags);
@@ -2553,6 +2635,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
void rcu_sched_clock_irq(int user)
{
trace_rcu_utilization(TPS("Start scheduler-tick"));
+ lockdep_assert_irqs_disabled();
raw_cpu_inc(rcu_data.ticks_this_gp);
/* The load-acquire pairs with the store-release setting to true. */
if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
@@ -2566,6 +2649,7 @@ void rcu_sched_clock_irq(int user)
rcu_flavor_sched_clock_irq(user);
if (rcu_pending(user))
invoke_rcu_core();
+ lockdep_assert_irqs_disabled();
trace_rcu_utilization(TPS("End scheduler-tick"));
}
@@ -2675,8 +2759,7 @@ static __latent_entropy void rcu_core(void)
unsigned long flags;
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
- const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- rcu_segcblist_is_offloaded(&rdp->cblist);
+ const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
if (cpu_is_offline(smp_processor_id()))
return;
@@ -2696,17 +2779,17 @@ static __latent_entropy void rcu_core(void)
/* No grace period and unregistered callbacks? */
if (!rcu_gp_in_progress() &&
- rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
- local_irq_save(flags);
+ rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
+ rcu_nocb_lock_irqsave(rdp, flags);
if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
rcu_accelerate_cbs_unlocked(rnp, rdp);
- local_irq_restore(flags);
+ rcu_nocb_unlock_irqrestore(rdp, flags);
}
rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */
- if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
+ if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
likely(READ_ONCE(rcu_scheduler_fully_active)))
rcu_do_batch(rdp);
@@ -2929,6 +3012,7 @@ static void check_cb_ovld(struct rcu_data *rdp)
static void
__call_rcu(struct rcu_head *head, rcu_callback_t func)
{
+ static atomic_t doublefrees;
unsigned long flags;
struct rcu_data *rdp;
bool was_alldone;
@@ -2942,8 +3026,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
* Use rcu:rcu_callback trace event to find the previous
* time callback was passed to __call_rcu().
*/
- WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
- head, head->func);
+ if (atomic_inc_return(&doublefrees) < 4) {
+ pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
+ mem_dump_obj(head);
+ }
WRITE_ONCE(head->func, rcu_leak_callback);
return;
}
@@ -2977,9 +3063,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
trace_rcu_callback(rcu_state.name, head,
rcu_segcblist_n_cbs(&rdp->cblist));
+ trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
+
/* Go handle any RCU core processing required. */
- if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
+ if (unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
} else {
__call_rcu_core(rdp, head, flags);
@@ -3084,6 +3171,9 @@ struct kfree_rcu_cpu_work {
* In order to save some per-cpu space the list is singular.
* Even though it is lockless an access has to be protected by the
* per-cpu lock.
+ * @page_cache_work: A work to refill the cache when it is empty
+ * @work_in_progress: Indicates that page_cache_work is running
+ * @hrtimer: A hrtimer for scheduling a page_cache_work
* @nr_bkv_objs: number of allocated objects at @bkvcache.
*
* This is a per-CPU structure. The reason that it is not included in
@@ -3100,6 +3190,11 @@ struct kfree_rcu_cpu {
bool monitor_todo;
bool initialized;
int count;
+
+ struct work_struct page_cache_work;
+ atomic_t work_in_progress;
+ struct hrtimer hrtimer;
+
struct llist_head bkvcache;
int nr_bkv_objs;
};
@@ -3217,10 +3312,10 @@ static void kfree_rcu_work(struct work_struct *work)
}
rcu_lock_release(&rcu_callback_map);
- krcp = krc_this_cpu_lock(&flags);
+ raw_spin_lock_irqsave(&krcp->lock, flags);
if (put_cached_bnode(krcp, bkvhead[i]))
bkvhead[i] = NULL;
- krc_this_cpu_unlock(krcp, flags);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
if (bkvhead[i])
free_page((unsigned long) bkvhead[i]);
@@ -3347,6 +3442,57 @@ static void kfree_rcu_monitor(struct work_struct *work)
raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
+static enum hrtimer_restart
+schedule_page_work_fn(struct hrtimer *t)
+{
+ struct kfree_rcu_cpu *krcp =
+ container_of(t, struct kfree_rcu_cpu, hrtimer);
+
+ queue_work(system_highpri_wq, &krcp->page_cache_work);
+ return HRTIMER_NORESTART;
+}
+
+static void fill_page_cache_func(struct work_struct *work)
+{
+ struct kvfree_rcu_bulk_data *bnode;
+ struct kfree_rcu_cpu *krcp =
+ container_of(work, struct kfree_rcu_cpu,
+ page_cache_work);
+ unsigned long flags;
+ bool pushed;
+ int i;
+
+ for (i = 0; i < rcu_min_cached_objs; i++) {
+ bnode = (struct kvfree_rcu_bulk_data *)
+ __get_free_page(GFP_KERNEL | __GFP_NOWARN);
+
+ if (bnode) {
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ pushed = put_cached_bnode(krcp, bnode);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
+
+ if (!pushed) {
+ free_page((unsigned long) bnode);
+ break;
+ }
+ }
+ }
+
+ atomic_set(&krcp->work_in_progress, 0);
+}
+
+static void
+run_page_cache_worker(struct kfree_rcu_cpu *krcp)
+{
+ if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
+ !atomic_xchg(&krcp->work_in_progress, 1)) {
+ hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ krcp->hrtimer.function = schedule_page_work_fn;
+ hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
+ }
+}
+
static inline bool
kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
{
@@ -3363,32 +3509,8 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
if (!krcp->bkvhead[idx] ||
krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
bnode = get_cached_bnode(krcp);
- if (!bnode) {
- /*
- * To keep this path working on raw non-preemptible
- * sections, prevent the optional entry into the
- * allocator as it uses sleeping locks. In fact, even
- * if the caller of kfree_rcu() is preemptible, this
- * path still is not, as krcp->lock is a raw spinlock.
- * With additional page pre-allocation in the works,
- * hitting this return is going to be much less likely.
- */
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- return false;
-
- /*
- * NOTE: For one argument of kvfree_rcu() we can
- * drop the lock and get the page in sleepable
- * context. That would allow to maintain an array
- * for the CONFIG_PREEMPT_RT as well if no cached
- * pages are available.
- */
- bnode = (struct kvfree_rcu_bulk_data *)
- __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
- }
-
/* Switch to emergency path. */
- if (unlikely(!bnode))
+ if (!bnode)
return false;
/* Initialize the new block. */
@@ -3452,12 +3574,11 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
goto unlock_return;
}
- /*
- * Under high memory pressure GFP_NOWAIT can fail,
- * in that case the emergency path is maintained.
- */
+ kasan_record_aux_stack(ptr);
success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
if (!success) {
+ run_page_cache_worker(krcp);
+
if (head == NULL)
// Inline if kvfree_rcu(one_arg) call.
goto unlock_return;
@@ -3567,7 +3688,7 @@ void __init kfree_rcu_scheduler_running(void)
* During early boot, any blocking grace-period wait automatically
* implies a grace period. Later on, this is never the case for PREEMPTION.
*
- * Howevr, because a context switch is a grace period for !PREEMPTION, any
+ * However, because a context switch is a grace period for !PREEMPTION, any
* blocking grace-period wait automatically implies a grace period if
* there is only one CPU online at any point time during execution of
* either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
@@ -3583,7 +3704,20 @@ static int rcu_blocking_is_gp(void)
return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
might_sleep(); /* Check for RCU read-side critical section. */
preempt_disable();
- ret = num_online_cpus() <= 1;
+ /*
+ * If the rcu_state.n_online_cpus counter is equal to one,
+ * there is only one CPU, and that CPU sees all prior accesses
+ * made by any CPU that was online at the time of its access.
+ * Furthermore, if this counter is equal to one, its value cannot
+ * change until after the preempt_enable() below.
+ *
+ * Furthermore, if rcu_state.n_online_cpus is equal to one here,
+ * all later CPUs (both this one and any that come online later
+ * on) are guaranteed to see all accesses prior to this point
+ * in the code, without the need for additional memory barriers.
+ * Those memory barriers are provided by CPU-hotplug code.
+ */
+ ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
preempt_enable();
return ret;
}
@@ -3628,7 +3762,7 @@ void synchronize_rcu(void)
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu() in RCU read-side critical section");
if (rcu_blocking_is_gp())
- return;
+ return; // Context allows vacuous grace periods.
if (rcu_gp_is_expedited())
synchronize_rcu_expedited();
else
@@ -3690,6 +3824,8 @@ static int rcu_pending(int user)
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
+ lockdep_assert_irqs_disabled();
+
/* Check for CPU stalls, if enabled. */
check_cpu_stall(rdp);
@@ -3707,13 +3843,13 @@ static int rcu_pending(int user)
return 1;
/* Does this CPU have callbacks ready to invoke? */
- if (rcu_segcblist_ready_cbs(&rdp->cblist))
+ if (!rcu_segcblist_is_offloaded(&rdp->cblist) &&
+ rcu_segcblist_ready_cbs(&rdp->cblist))
return 1;
/* Has RCU gone idle with this CPU needing another grace period? */
if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
- (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
- !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
+ !rcu_segcblist_is_offloaded(&rdp->cblist) &&
!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
return 1;
@@ -3944,12 +4080,18 @@ int rcutree_prepare_cpu(unsigned int cpu)
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rcu_state.n_force_qs;
rdp->blimit = blimit;
- if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
- !rcu_segcblist_is_offloaded(&rdp->cblist))
- rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
rcu_dynticks_eqs_online();
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+ /*
+ * Lock in case the CB/GP kthreads are still around handling
+ * old callbacks (longer term we should flush all callbacks
+ * before completing CPU offline)
+ */
+ rcu_nocb_lock(rdp);
+ if (rcu_segcblist_empty(&rdp->cblist)) /* No early-boot CBs? */
+ rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
+ rcu_nocb_unlock(rdp);
/*
* Add CPU to leaf rcu_node pending-online bitmask. Any needed
@@ -3964,11 +4106,13 @@ int rcutree_prepare_cpu(unsigned int cpu)
rdp->cpu_no_qs.b.norm = true;
rdp->core_needs_qs = false;
rdp->rcu_iw_pending = false;
+ rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcu_prepare_kthreads(cpu);
rcu_spawn_cpu_nocb_kthread(cpu);
+ WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
return 0;
}
@@ -4057,6 +4201,9 @@ void rcu_cpu_starting(unsigned int cpu)
rnp = rdp->mynode;
mask = rdp->grpmask;
+ WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
+ WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
+ smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
raw_spin_lock_irqsave_rcu_node(rnp, flags);
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
newcpu = !(rnp->expmaskinitnext & mask);
@@ -4067,17 +4214,21 @@ void rcu_cpu_starting(unsigned int cpu)
rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
- if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+
+ /* An incoming CPU should never be blocking a grace period. */
+ if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
rcu_disable_urgency_upon_qs(rdp);
/* Report QS -after- changing ->qsmaskinitnext! */
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
} else {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
+ smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
+ WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
+ WARN_ON_ONCE(rnp->ofl_seq & 0x1);
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
}
-#ifdef CONFIG_HOTPLUG_CPU
/*
* The outgoing function has no further need of RCU, so remove it from
* the rcu_node tree's ->qsmaskinitnext bit masks.
@@ -4093,6 +4244,9 @@ void rcu_report_dead(unsigned int cpu)
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
+ // Do any dangling deferred wakeups.
+ do_nocb_deferred_wakeup(rdp);
+
/* QS for any half-done expedited grace period. */
preempt_disable();
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
@@ -4101,6 +4255,9 @@ void rcu_report_dead(unsigned int cpu)
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask;
+ WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
+ WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
+ smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
raw_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
@@ -4113,10 +4270,14 @@ void rcu_report_dead(unsigned int cpu)
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
raw_spin_unlock(&rcu_state.ofl_lock);
+ smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
+ WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
+ WARN_ON_ONCE(rnp->ofl_seq & 0x1);
rdp->cpu_started = false;
}
+#ifdef CONFIG_HOTPLUG_CPU
/*
* The outgoing CPU has just passed through the dying-idle state, and we
* are being invoked from the CPU that was IPIed to continue the offline
@@ -4449,24 +4610,14 @@ static void __init kfree_rcu_batch_init(void)
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
- struct kvfree_rcu_bulk_data *bnode;
for (i = 0; i < KFREE_N_BATCHES; i++) {
INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
krcp->krw_arr[i].krcp = krcp;
}
- for (i = 0; i < rcu_min_cached_objs; i++) {
- bnode = (struct kvfree_rcu_bulk_data *)
- __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
-
- if (bnode)
- put_cached_bnode(krcp, bnode);
- else
- pr_err("Failed to preallocate for %d CPU!\n", cpu);
- }
-
INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
+ INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
krcp->initialized = true;
}
if (register_shrinker(&kfree_rcu_shrinker))
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index e4f66b8f7c47..71821d59d95c 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -56,6 +56,7 @@ struct rcu_node {
/* Initialized from ->qsmaskinitnext at the */
/* beginning of each grace period. */
unsigned long qsmaskinitnext;
+ unsigned long ofl_seq; /* CPU-hotplug operation sequence count. */
/* Online CPUs for next grace period. */
unsigned long expmask; /* CPUs or groups that need to check in */
/* to allow the current expedited GP */
@@ -200,6 +201,7 @@ struct rcu_data {
/* 5) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU
struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
+ struct swait_queue_head nocb_state_wq; /* For offloading state changes */
struct task_struct *nocb_gp_kthread;
raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
atomic_t nocb_lock_contended; /* Contention experienced. */
@@ -255,6 +257,7 @@ struct rcu_data {
};
/* Values for nocb_defer_wakeup field in struct rcu_data. */
+#define RCU_NOCB_WAKE_OFF -1
#define RCU_NOCB_WAKE_NOT 0
#define RCU_NOCB_WAKE 1
#define RCU_NOCB_WAKE_FORCE 2
@@ -298,6 +301,7 @@ struct rcu_state {
/* Hierarchy levels (+1 to */
/* shut bogus gcc warning) */
int ncpus; /* # CPUs seen so far. */
+ int n_online_cpus; /* # CPUs online for RCU. */
/* The following fields are guarded by the root rcu_node's lock. */
@@ -431,7 +435,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
unsigned long flags);
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
-static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_cpu_nocb_kthread(int cpu);
static void __init rcu_spawn_nocb_kthreads(void);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 8760b6ead770..6c6ff06d4ae6 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -545,7 +545,7 @@ static void synchronize_rcu_expedited_wait(void)
data_race(rnp_root->expmask),
".T"[!!data_race(rnp_root->exp_tasks)]);
if (ndetected) {
- pr_err("blocking rcu_node structures:");
+ pr_err("blocking rcu_node structures (internal RCU debug):");
rcu_for_each_node_breadth_first(rnp) {
if (rnp == rnp_root)
continue; /* printed unconditionally */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index fd8a52e9a887..2d603771c7dc 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -628,7 +628,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
set_tsk_need_resched(current);
set_preempt_need_resched();
if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
- !rdp->defer_qs_iw_pending && exp) {
+ !rdp->defer_qs_iw_pending && exp && cpu_online(rdp->cpu)) {
// Get scheduler to re-evaluate and call hooks.
// If !IRQ_WORK, FQS scan will eventually IPI.
init_irq_work(&rdp->defer_qs_iw,
@@ -682,6 +682,7 @@ static void rcu_flavor_sched_clock_irq(int user)
{
struct task_struct *t = current;
+ lockdep_assert_irqs_disabled();
if (user || rcu_is_cpu_rrupt_from_idle()) {
rcu_note_voluntary_context_switch(current);
}
@@ -1631,8 +1632,8 @@ bool rcu_is_nocb_cpu(int cpu)
* Kick the GP kthread for this NOCB group. Caller holds ->nocb_lock
* and this function releases it.
*/
-static void wake_nocb_gp(struct rcu_data *rdp, bool force,
- unsigned long flags)
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
+ unsigned long flags)
__releases(rdp->nocb_lock)
{
bool needwake = false;
@@ -1643,7 +1644,7 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("AlreadyAwake"));
rcu_nocb_unlock_irqrestore(rdp, flags);
- return;
+ return false;
}
del_timer(&rdp->nocb_timer);
rcu_nocb_unlock_irqrestore(rdp, flags);
@@ -1656,6 +1657,8 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
if (needwake)
wake_up_process(rdp_gp->nocb_gp_kthread);
+
+ return needwake;
}
/*
@@ -1665,6 +1668,8 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
const char *reason)
{
+ if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_OFF)
+ return;
if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
mod_timer(&rdp->nocb_timer, jiffies + 1);
if (rdp->nocb_defer_wakeup < waketype)
@@ -1929,6 +1934,52 @@ static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
}
/*
+ * Check if we ignore this rdp.
+ *
+ * We check that without holding the nocb lock but
+ * we make sure not to miss a freshly offloaded rdp
+ * with the current ordering:
+ *
+ * rdp_offload_toggle() nocb_gp_enabled_cb()
+ * ------------------------- ----------------------------
+ * WRITE flags LOCK nocb_gp_lock
+ * LOCK nocb_gp_lock READ/WRITE nocb_gp_sleep
+ * READ/WRITE nocb_gp_sleep UNLOCK nocb_gp_lock
+ * UNLOCK nocb_gp_lock READ flags
+ */
+static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
+{
+ u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
+
+ return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_state)
+{
+ struct rcu_segcblist *cblist = &rdp->cblist;
+
+ if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+ if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
+ rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
+ if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+ *needwake_state = true;
+ }
+ return true;
+ }
+
+ /*
+ * De-offloading. Clear our flag and notify the de-offload worker.
+ * We will ignore this rdp until it ever gets re-offloaded.
+ */
+ WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+ rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
+ if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+ *needwake_state = true;
+ return false;
+}
+
+
+/*
* No-CBs GP kthreads come here to wait for additional callbacks to show up
* or for grace periods to end.
*/
@@ -1956,8 +2007,18 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
*/
WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
+ bool needwake_state = false;
+
+ if (!nocb_gp_enabled_cb(rdp))
+ continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
rcu_nocb_lock_irqsave(rdp, flags);
+ if (!nocb_gp_update_state(rdp, &needwake_state)) {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ if (needwake_state)
+ swake_up_one(&rdp->nocb_state_wq);
+ continue;
+ }
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
if (bypass_ncbs &&
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
@@ -1967,6 +2028,8 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
rcu_nocb_unlock_irqrestore(rdp, flags);
+ if (needwake_state)
+ swake_up_one(&rdp->nocb_state_wq);
continue; /* No callbacks here, try next. */
}
if (bypass_ncbs) {
@@ -2018,6 +2081,8 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
}
if (needwake_gp)
rcu_gp_kthread_wake();
+ if (needwake_state)
+ swake_up_one(&rdp->nocb_state_wq);
}
my_rdp->nocb_gp_bypass = bypass;
@@ -2081,14 +2146,27 @@ static int rcu_nocb_gp_kthread(void *arg)
return 0;
}
+static inline bool nocb_cb_can_run(struct rcu_data *rdp)
+{
+ u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
+ return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
+{
+ return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
+}
+
/*
* Invoke any ready callbacks from the corresponding no-CBs CPU,
* then, if there are no more, wait for more to appear.
*/
static void nocb_cb_wait(struct rcu_data *rdp)
{
+ struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long cur_gp_seq;
unsigned long flags;
+ bool needwake_state = false;
bool needwake_gp = false;
struct rcu_node *rnp = rdp->mynode;
@@ -2100,32 +2178,55 @@ static void nocb_cb_wait(struct rcu_data *rdp)
local_bh_enable();
lockdep_assert_irqs_enabled();
rcu_nocb_lock_irqsave(rdp, flags);
- if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+ if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
}
- if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
- rcu_nocb_unlock_irqrestore(rdp, flags);
- if (needwake_gp)
- rcu_gp_kthread_wake();
- return;
- }
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
WRITE_ONCE(rdp->nocb_cb_sleep, true);
+
+ if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+ if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
+ rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
+ if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+ needwake_state = true;
+ }
+ if (rcu_segcblist_ready_cbs(cblist))
+ WRITE_ONCE(rdp->nocb_cb_sleep, false);
+ } else {
+ /*
+ * De-offloading. Clear our flag and notify the de-offload worker.
+ * We won't touch the callbacks and keep sleeping until we ever
+ * get re-offloaded.
+ */
+ WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
+ rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
+ if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+ needwake_state = true;
+ }
+
+ if (rdp->nocb_cb_sleep)
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
+
rcu_nocb_unlock_irqrestore(rdp, flags);
if (needwake_gp)
rcu_gp_kthread_wake();
- swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
- !READ_ONCE(rdp->nocb_cb_sleep));
- if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */
- /* ^^^ Ensure CB invocation follows _sleep test. */
- return;
- }
- WARN_ON(signal_pending(current));
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+
+ if (needwake_state)
+ swake_up_one(&rdp->nocb_state_wq);
+
+ do {
+ swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+ nocb_cb_wait_cond(rdp));
+
+ // VVV Ensure CB invocation follows _sleep test.
+ if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
+ WARN_ON(signal_pending(current));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+ }
+ } while (!nocb_cb_can_run(rdp));
}
/*
@@ -2148,24 +2249,27 @@ static int rcu_nocb_cb_kthread(void *arg)
/* Is a deferred wakeup of rcu_nocb_kthread() required? */
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
{
- return READ_ONCE(rdp->nocb_defer_wakeup);
+ return READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT;
}
/* Do a deferred wakeup of rcu_nocb_kthread(). */
-static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
{
unsigned long flags;
int ndw;
+ int ret;
rcu_nocb_lock_irqsave(rdp, flags);
if (!rcu_nocb_need_deferred_wakeup(rdp)) {
rcu_nocb_unlock_irqrestore(rdp, flags);
- return;
+ return false;
}
ndw = READ_ONCE(rdp->nocb_defer_wakeup);
WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
- wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+ ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
+
+ return ret;
}
/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
@@ -2181,12 +2285,208 @@ static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
* This means we do an inexact common-case check. Note that if
* we miss, ->nocb_timer will eventually clean things up.
*/
-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
if (rcu_nocb_need_deferred_wakeup(rdp))
- do_nocb_deferred_wakeup_common(rdp);
+ return do_nocb_deferred_wakeup_common(rdp);
+ return false;
}
+void rcu_nocb_flush_deferred_wakeup(void)
+{
+ do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
+
+static int rdp_offload_toggle(struct rcu_data *rdp,
+ bool offload, unsigned long flags)
+ __releases(rdp->nocb_lock)
+{
+ struct rcu_segcblist *cblist = &rdp->cblist;
+ struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+ bool wake_gp = false;
+
+ rcu_segcblist_offload(cblist, offload);
+
+ if (rdp->nocb_cb_sleep)
+ rdp->nocb_cb_sleep = false;
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+
+ /*
+ * Ignore former value of nocb_cb_sleep and force wake up as it could
+ * have been spuriously set to false already.
+ */
+ swake_up_one(&rdp->nocb_cb_wq);
+
+ raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+ if (rdp_gp->nocb_gp_sleep) {
+ rdp_gp->nocb_gp_sleep = false;
+ wake_gp = true;
+ }
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
+ if (wake_gp)
+ wake_up_process(rdp_gp->nocb_gp_kthread);
+
+ return 0;
+}
+
+static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
+{
+ struct rcu_segcblist *cblist = &rdp->cblist;
+ unsigned long flags;
+ int ret;
+
+ pr_info("De-offloading %d\n", rdp->cpu);
+
+ rcu_nocb_lock_irqsave(rdp, flags);
+ /*
+ * If there are still pending work offloaded, the offline
+ * CPU won't help much handling them.
+ */
+ if (cpu_is_offline(rdp->cpu) && !rcu_segcblist_empty(&rdp->cblist)) {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ return -EBUSY;
+ }
+
+ ret = rdp_offload_toggle(rdp, false, flags);
+ swait_event_exclusive(rdp->nocb_state_wq,
+ !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
+ SEGCBLIST_KTHREAD_GP));
+ rcu_nocb_lock_irqsave(rdp, flags);
+ /* Make sure nocb timer won't stay around */
+ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_OFF);
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ del_timer_sync(&rdp->nocb_timer);
+
+ /*
+ * Flush bypass. While IRQs are disabled and once we set
+ * SEGCBLIST_SOFTIRQ_ONLY, no callback is supposed to be
+ * enqueued on bypass.
+ */
+ rcu_nocb_lock_irqsave(rdp, flags);
+ rcu_nocb_flush_bypass(rdp, NULL, jiffies);
+ rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
+ /*
+ * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
+ * rcu_nocb_unlock_irqrestore() anymore. Theoretically we
+ * could set SEGCBLIST_SOFTIRQ_ONLY with cb unlocked and IRQs
+ * disabled now, but let's be paranoid.
+ */
+ raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+
+ return ret;
+}
+
+static long rcu_nocb_rdp_deoffload(void *arg)
+{
+ struct rcu_data *rdp = arg;
+
+ WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+ return __rcu_nocb_rdp_deoffload(rdp);
+}
+
+int rcu_nocb_cpu_deoffload(int cpu)
+{
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ int ret = 0;
+
+ if (rdp == rdp->nocb_gp_rdp) {
+ pr_info("Can't deoffload an rdp GP leader (yet)\n");
+ return -EINVAL;
+ }
+ mutex_lock(&rcu_state.barrier_mutex);
+ cpus_read_lock();
+ if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
+ if (cpu_online(cpu))
+ ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
+ else
+ ret = __rcu_nocb_rdp_deoffload(rdp);
+ if (!ret)
+ cpumask_clear_cpu(cpu, rcu_nocb_mask);
+ }
+ cpus_read_unlock();
+ mutex_unlock(&rcu_state.barrier_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
+
+static int __rcu_nocb_rdp_offload(struct rcu_data *rdp)
+{
+ struct rcu_segcblist *cblist = &rdp->cblist;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * For now we only support re-offload, ie: the rdp must have been
+ * offloaded on boot first.
+ */
+ if (!rdp->nocb_gp_rdp)
+ return -EINVAL;
+
+ pr_info("Offloading %d\n", rdp->cpu);
+ /*
+ * Can't use rcu_nocb_lock_irqsave() while we are in
+ * SEGCBLIST_SOFTIRQ_ONLY mode.
+ */
+ raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+ /* Re-enable nocb timer */
+ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+ /*
+ * We didn't take the nocb lock while working on the
+ * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
+ * Every modifications that have been done previously on
+ * rdp->cblist must be visible remotely by the nocb kthreads
+ * upon wake up after reading the cblist flags.
+ *
+ * The layout against nocb_lock enforces that ordering:
+ *
+ * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait()
+ * ------------------------- ----------------------------
+ * WRITE callbacks rcu_nocb_lock()
+ * rcu_nocb_lock() READ flags
+ * WRITE flags READ callbacks
+ * rcu_nocb_unlock() rcu_nocb_unlock()
+ */
+ ret = rdp_offload_toggle(rdp, true, flags);
+ swait_event_exclusive(rdp->nocb_state_wq,
+ rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
+ rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+
+ return ret;
+}
+
+static long rcu_nocb_rdp_offload(void *arg)
+{
+ struct rcu_data *rdp = arg;
+
+ WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+ return __rcu_nocb_rdp_offload(rdp);
+}
+
+int rcu_nocb_cpu_offload(int cpu)
+{
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ int ret = 0;
+
+ mutex_lock(&rcu_state.barrier_mutex);
+ cpus_read_lock();
+ if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
+ if (cpu_online(cpu))
+ ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
+ else
+ ret = __rcu_nocb_rdp_offload(rdp);
+ if (!ret)
+ cpumask_set_cpu(cpu, rcu_nocb_mask);
+ }
+ cpus_read_unlock();
+ mutex_unlock(&rcu_state.barrier_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
+
void __init rcu_init_nohz(void)
{
int cpu;
@@ -2229,7 +2529,9 @@ void __init rcu_init_nohz(void)
rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
- rcu_segcblist_offload(&rdp->cblist);
+ rcu_segcblist_offload(&rdp->cblist, true);
+ rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
+ rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
}
rcu_organize_nocb_kthreads();
}
@@ -2239,6 +2541,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
init_swait_queue_head(&rdp->nocb_cb_wq);
init_swait_queue_head(&rdp->nocb_gp_wq);
+ init_swait_queue_head(&rdp->nocb_state_wq);
raw_spin_lock_init(&rdp->nocb_lock);
raw_spin_lock_init(&rdp->nocb_bypass_lock);
raw_spin_lock_init(&rdp->nocb_gp_lock);
@@ -2381,6 +2684,19 @@ void rcu_bind_current_to_nocb(void)
}
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
+// The ->on_cpu field is available only in CONFIG_SMP=y, so...
+#ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+ return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : "";
+}
+#else // #ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+ return "";
+}
+#endif // #else #ifdef CONFIG_SMP
+
/*
* Dump out nocb grace-period kthread state for the specified rcu_data
* structure.
@@ -2389,7 +2705,7 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
{
struct rcu_node *rnp = rdp->mynode;
- pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n",
+ pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
rdp->cpu,
"kK"[!!rdp->nocb_gp_kthread],
"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
@@ -2403,12 +2719,17 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
".B"[!!rdp->nocb_gp_bypass],
".G"[!!rdp->nocb_gp_gp],
(long)rdp->nocb_gp_seq,
- rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops));
+ rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
+ rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
+ rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+ show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
}
/* Dump out nocb kthread state for the specified rcu_data structure. */
static void show_rcu_nocb_state(struct rcu_data *rdp)
{
+ char bufw[20];
+ char bufr[20];
struct rcu_segcblist *rsclp = &rdp->cblist;
bool waslocked;
bool wastimer;
@@ -2417,8 +2738,11 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
if (rdp->nocb_gp_rdp == rdp)
show_rcu_nocb_gp_state(rdp);
- pr_info(" CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n",
+ sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
+ sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
+ pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
rdp->cpu, rdp->nocb_gp_rdp->cpu,
+ rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
"kK"[!!rdp->nocb_cb_kthread],
"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
"cC"[!!atomic_read(&rdp->nocb_lock_contended)],
@@ -2429,11 +2753,16 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
jiffies - rdp->nocb_nobypass_last,
rdp->nocb_nobypass_count,
".D"[rcu_segcblist_ready_cbs(rsclp)],
- ".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)],
- ".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)],
- ".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)],
+ ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
+ rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
+ ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
+ rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
+ ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
- rcu_segcblist_n_cbs(&rdp->cblist));
+ rcu_segcblist_n_cbs(&rdp->cblist),
+ rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
+ rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+ show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
/* It is OK for GP kthreads to have GP state. */
if (rdp->nocb_gp_rdp == rdp)
@@ -2518,8 +2847,9 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
return false;
}
-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
+ return false;
}
static void rcu_spawn_cpu_nocb_kthread(int cpu)
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 0fde39b8daab..475b26171b20 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -13,6 +13,7 @@
/* panic() on RCU Stall sysctl. */
int sysctl_panic_on_rcu_stall __read_mostly;
+int sysctl_max_rcu_stall_to_panic __read_mostly;
#ifdef CONFIG_PROVE_RCU
#define RCU_STALL_DELAY_DELTA (5 * HZ)
@@ -106,6 +107,11 @@ early_initcall(check_cpu_stall_init);
/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
static void panic_on_rcu_stall(void)
{
+ static int cpu_stall;
+
+ if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
+ return;
+
if (sysctl_panic_on_rcu_stall)
panic("RCU Stall\n");
}
@@ -249,14 +255,18 @@ static bool check_slow_task(struct task_struct *t, void *arg)
/*
* Scan the current list of tasks blocked within RCU read-side critical
- * sections, printing out the tid of each.
+ * sections, printing out the tid of each of the first few of them.
*/
-static int rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
+ __releases(rnp->lock)
{
+ int i = 0;
int ndetected = 0;
struct rcu_stall_chk_rdr rscr;
struct task_struct *t;
+ struct task_struct *ts[8];
+ lockdep_assert_irqs_disabled();
if (!rcu_preempt_blocked_readers_cgp(rnp))
return 0;
pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
@@ -264,6 +274,14 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
t = list_entry(rnp->gp_tasks->prev,
struct task_struct, rcu_node_entry);
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+ get_task_struct(t);
+ ts[i++] = t;
+ if (i >= ARRAY_SIZE(ts))
+ break;
+ }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ for (i--; i; i--) {
+ t = ts[i];
if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
pr_cont(" P%d", t->pid);
else
@@ -273,6 +291,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
".q"[rscr.rs.b.need_qs],
".e"[rscr.rs.b.exp_hint],
".l"[rscr.on_blkd_list]);
+ lockdep_assert_irqs_disabled();
+ put_task_struct(t);
ndetected++;
}
pr_cont("\n");
@@ -293,8 +313,9 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
* Because preemptible RCU does not exist, we never have to check for
* tasks blocked within RCU read-side critical sections.
*/
-static int rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
{
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return 0;
}
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -314,9 +335,12 @@ static void rcu_dump_cpu_stacks(void)
rcu_for_each_leaf_node(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
for_each_leaf_node_possible_cpu(rnp, cpu)
- if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
- if (!trigger_single_cpu_backtrace(cpu))
+ if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
+ if (cpu_is_offline(cpu))
+ pr_err("Offline CPU %d blocking current GP.\n", cpu);
+ else if (!trigger_single_cpu_backtrace(cpu))
dump_cpu_task(cpu);
+ }
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
}
@@ -430,25 +454,66 @@ static void print_cpu_stall_info(int cpu)
/* Complain about starvation of grace-period kthread. */
static void rcu_check_gp_kthread_starvation(void)
{
+ int cpu;
struct task_struct *gpk = rcu_state.gp_kthread;
unsigned long j;
if (rcu_is_gp_kthread_starving(&j)) {
+ cpu = gpk ? task_cpu(gpk) : -1;
pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
rcu_state.name, j,
(long)rcu_seq_current(&rcu_state.gp_seq),
data_race(rcu_state.gp_flags),
gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
- gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
+ gpk ? gpk->state : ~0, cpu);
if (gpk) {
pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
pr_err("RCU grace-period kthread stack dump:\n");
sched_show_task(gpk);
+ if (cpu >= 0) {
+ if (cpu_is_offline(cpu)) {
+ pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
+ } else {
+ pr_err("Stack dump where RCU GP kthread last ran:\n");
+ if (!trigger_single_cpu_backtrace(cpu))
+ dump_cpu_task(cpu);
+ }
+ }
wake_up_process(gpk);
}
}
}
+/* Complain about missing wakeups from expired fqs wait timer */
+static void rcu_check_gp_kthread_expired_fqs_timer(void)
+{
+ struct task_struct *gpk = rcu_state.gp_kthread;
+ short gp_state;
+ unsigned long jiffies_fqs;
+ int cpu;
+
+ /*
+ * Order reads of .gp_state and .jiffies_force_qs.
+ * Matching smp_wmb() is present in rcu_gp_fqs_loop().
+ */
+ gp_state = smp_load_acquire(&rcu_state.gp_state);
+ jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
+
+ if (gp_state == RCU_GP_WAIT_FQS &&
+ time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
+ gpk && !READ_ONCE(gpk->on_rq)) {
+ cpu = task_cpu(gpk);
+ pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx\n",
+ rcu_state.name, (jiffies - jiffies_fqs),
+ (long)rcu_seq_current(&rcu_state.gp_seq),
+ data_race(rcu_state.gp_flags),
+ gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
+ gpk->state);
+ pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
+ cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
+ }
+}
+
static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
{
int cpu;
@@ -459,6 +524,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
struct rcu_node *rnp;
long totqlen = 0;
+ lockdep_assert_irqs_disabled();
+
/* Kick and suppress, if so configured. */
rcu_stall_kick_kthreads();
if (rcu_stall_is_suppressed())
@@ -472,7 +539,6 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
rcu_for_each_leaf_node(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- ndetected += rcu_print_task_stall(rnp);
if (rnp->qsmask != 0) {
for_each_leaf_node_possible_cpu(rnp, cpu)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
@@ -480,7 +546,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
ndetected++;
}
}
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
+ lockdep_assert_irqs_disabled();
}
for_each_possible_cpu(cpu)
@@ -511,6 +578,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
WRITE_ONCE(rcu_state.jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
+ rcu_check_gp_kthread_expired_fqs_timer();
rcu_check_gp_kthread_starvation();
panic_on_rcu_stall();
@@ -526,6 +594,8 @@ static void print_cpu_stall(unsigned long gps)
struct rcu_node *rnp = rcu_get_root();
long totqlen = 0;
+ lockdep_assert_irqs_disabled();
+
/* Kick and suppress, if so configured. */
rcu_stall_kick_kthreads();
if (rcu_stall_is_suppressed())
@@ -546,6 +616,7 @@ static void print_cpu_stall(unsigned long gps)
jiffies - gps,
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
+ rcu_check_gp_kthread_expired_fqs_timer();
rcu_check_gp_kthread_starvation();
rcu_dump_cpu_stacks();
@@ -580,6 +651,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
unsigned long js;
struct rcu_node *rnp;
+ lockdep_assert_irqs_disabled();
if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
!rcu_gp_in_progress())
return;
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 39334d2d2b37..b95ae86c40a7 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -56,8 +56,10 @@
#ifndef CONFIG_TINY_RCU
module_param(rcu_expedited, int, 0);
module_param(rcu_normal, int, 0);
-static int rcu_normal_after_boot;
+static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
+#ifndef CONFIG_PREEMPT_RT
module_param(rcu_normal_after_boot, int, 0);
+#endif
#endif /* #ifndef CONFIG_TINY_RCU */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/kernel/reboot.c b/kernel/reboot.c
index e7b78d5ae1ab..eb1b15850761 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -244,6 +244,8 @@ void migrate_to_reboot_cpu(void)
void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
+ if (pm_power_off_prepare)
+ pm_power_off_prepare();
migrate_to_reboot_cpu();
syscore_shutdown();
if (!cmd)
@@ -551,22 +553,26 @@ static int __init reboot_setup(char *str)
break;
case 's':
- {
- int rc;
-
- if (isdigit(*(str+1))) {
- rc = kstrtoint(str+1, 0, &reboot_cpu);
- if (rc)
- return rc;
- } else if (str[1] == 'm' && str[2] == 'p' &&
- isdigit(*(str+3))) {
- rc = kstrtoint(str+3, 0, &reboot_cpu);
- if (rc)
- return rc;
+ /*
+ * reboot_cpu is s[mp]#### with #### being the processor
+ * to be used for rebooting. Skip 's' or 'smp' prefix.
+ */
+ str += str[1] == 'm' && str[2] == 'p' ? 3 : 1;
+
+ if (isdigit(str[0])) {
+ int cpu = simple_strtoul(str, NULL, 0);
+
+ if (cpu >= num_possible_cpus()) {
+ pr_err("Ignoring the CPU number in reboot= option. "
+ "CPU %d exceeds possible cpu number %d\n",
+ cpu, num_possible_cpus());
+ break;
+ }
+ reboot_cpu = cpu;
} else
*mode = REBOOT_SOFT;
break;
- }
+
case 'g':
*mode = REBOOT_GPIO;
break;
@@ -594,3 +600,217 @@ static int __init reboot_setup(char *str)
return 1;
}
__setup("reboot=", reboot_setup);
+
+#ifdef CONFIG_SYSFS
+
+#define REBOOT_COLD_STR "cold"
+#define REBOOT_WARM_STR "warm"
+#define REBOOT_HARD_STR "hard"
+#define REBOOT_SOFT_STR "soft"
+#define REBOOT_GPIO_STR "gpio"
+#define REBOOT_UNDEFINED_STR "undefined"
+
+#define BOOT_TRIPLE_STR "triple"
+#define BOOT_KBD_STR "kbd"
+#define BOOT_BIOS_STR "bios"
+#define BOOT_ACPI_STR "acpi"
+#define BOOT_EFI_STR "efi"
+#define BOOT_PCI_STR "pci"
+
+static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ const char *val;
+
+ switch (reboot_mode) {
+ case REBOOT_COLD:
+ val = REBOOT_COLD_STR;
+ break;
+ case REBOOT_WARM:
+ val = REBOOT_WARM_STR;
+ break;
+ case REBOOT_HARD:
+ val = REBOOT_HARD_STR;
+ break;
+ case REBOOT_SOFT:
+ val = REBOOT_SOFT_STR;
+ break;
+ case REBOOT_GPIO:
+ val = REBOOT_GPIO_STR;
+ break;
+ default:
+ val = REBOOT_UNDEFINED_STR;
+ }
+
+ return sprintf(buf, "%s\n", val);
+}
+static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ if (!strncmp(buf, REBOOT_COLD_STR, strlen(REBOOT_COLD_STR)))
+ reboot_mode = REBOOT_COLD;
+ else if (!strncmp(buf, REBOOT_WARM_STR, strlen(REBOOT_WARM_STR)))
+ reboot_mode = REBOOT_WARM;
+ else if (!strncmp(buf, REBOOT_HARD_STR, strlen(REBOOT_HARD_STR)))
+ reboot_mode = REBOOT_HARD;
+ else if (!strncmp(buf, REBOOT_SOFT_STR, strlen(REBOOT_SOFT_STR)))
+ reboot_mode = REBOOT_SOFT;
+ else if (!strncmp(buf, REBOOT_GPIO_STR, strlen(REBOOT_GPIO_STR)))
+ reboot_mode = REBOOT_GPIO;
+ else
+ return -EINVAL;
+
+ reboot_default = 0;
+
+ return count;
+}
+static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode);
+
+#ifdef CONFIG_X86
+static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", reboot_force);
+}
+static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool res;
+
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ if (kstrtobool(buf, &res))
+ return -EINVAL;
+
+ reboot_default = 0;
+ reboot_force = res;
+
+ return count;
+}
+static struct kobj_attribute reboot_force_attr = __ATTR_RW(force);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ const char *val;
+
+ switch (reboot_type) {
+ case BOOT_TRIPLE:
+ val = BOOT_TRIPLE_STR;
+ break;
+ case BOOT_KBD:
+ val = BOOT_KBD_STR;
+ break;
+ case BOOT_BIOS:
+ val = BOOT_BIOS_STR;
+ break;
+ case BOOT_ACPI:
+ val = BOOT_ACPI_STR;
+ break;
+ case BOOT_EFI:
+ val = BOOT_EFI_STR;
+ break;
+ case BOOT_CF9_FORCE:
+ val = BOOT_PCI_STR;
+ break;
+ default:
+ val = REBOOT_UNDEFINED_STR;
+ }
+
+ return sprintf(buf, "%s\n", val);
+}
+static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR)))
+ reboot_type = BOOT_TRIPLE;
+ else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR)))
+ reboot_type = BOOT_KBD;
+ else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR)))
+ reboot_type = BOOT_BIOS;
+ else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR)))
+ reboot_type = BOOT_ACPI;
+ else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR)))
+ reboot_type = BOOT_EFI;
+ else if (!strncmp(buf, BOOT_PCI_STR, strlen(BOOT_PCI_STR)))
+ reboot_type = BOOT_CF9_FORCE;
+ else
+ return -EINVAL;
+
+ reboot_default = 0;
+
+ return count;
+}
+static struct kobj_attribute reboot_type_attr = __ATTR_RW(type);
+#endif
+
+#ifdef CONFIG_SMP
+static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", reboot_cpu);
+}
+static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int cpunum;
+ int rc;
+
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ rc = kstrtouint(buf, 0, &cpunum);
+
+ if (rc)
+ return rc;
+
+ if (cpunum >= num_possible_cpus())
+ return -ERANGE;
+
+ reboot_default = 0;
+ reboot_cpu = cpunum;
+
+ return count;
+}
+static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
+#endif
+
+static struct attribute *reboot_attrs[] = {
+ &reboot_mode_attr.attr,
+#ifdef CONFIG_X86
+ &reboot_force_attr.attr,
+ &reboot_type_attr.attr,
+#endif
+#ifdef CONFIG_SMP
+ &reboot_cpu_attr.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group reboot_attr_group = {
+ .attrs = reboot_attrs,
+};
+
+static int __init reboot_ksysfs_init(void)
+{
+ struct kobject *reboot_kobj;
+ int ret;
+
+ reboot_kobj = kobject_create_and_add("reboot", kernel_kobj);
+ if (!reboot_kobj)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(reboot_kobj, &reboot_attr_group);
+ if (ret) {
+ kobject_put(reboot_kobj);
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(reboot_ksysfs_init);
+
+#endif
diff --git a/kernel/relay.c b/kernel/relay.c
index b08d936d5fa7..d1a67fbb819d 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -28,15 +28,6 @@ static DEFINE_MUTEX(relay_channels_mutex);
static LIST_HEAD(relay_channels);
/*
- * close() vm_op implementation for relay file mapping.
- */
-static void relay_file_mmap_close(struct vm_area_struct *vma)
-{
- struct rchan_buf *buf = vma->vm_private_data;
- buf->chan->cb->buf_unmapped(buf, vma->vm_file);
-}
-
-/*
* fault() vm_op implementation for relay file mapping.
*/
static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
@@ -62,7 +53,6 @@ static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
*/
static const struct vm_operations_struct relay_file_mmap_ops = {
.fault = relay_buf_fault,
- .close = relay_file_mmap_close,
};
/*
@@ -96,7 +86,6 @@ static void relay_free_page_array(struct page **array)
static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
{
unsigned long length = vma->vm_end - vma->vm_start;
- struct file *filp = vma->vm_file;
if (!buf)
return -EBADF;
@@ -107,7 +96,6 @@ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
vma->vm_ops = &relay_file_mmap_ops;
vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = buf;
- buf->chan->cb->buf_mapped(buf, filp);
return 0;
}
@@ -264,70 +252,16 @@ EXPORT_SYMBOL_GPL(relay_buf_full);
* High-level relay kernel API and associated functions.
*/
-/*
- * rchan_callback implementations defining default channel behavior. Used
- * in place of corresponding NULL values in client callback struct.
- */
-
-/*
- * subbuf_start() default callback. Does nothing.
- */
-static int subbuf_start_default_callback (struct rchan_buf *buf,
- void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
-{
- if (relay_buf_full(buf))
- return 0;
-
- return 1;
-}
-
-/*
- * buf_mapped() default callback. Does nothing.
- */
-static void buf_mapped_default_callback(struct rchan_buf *buf,
- struct file *filp)
-{
-}
-
-/*
- * buf_unmapped() default callback. Does nothing.
- */
-static void buf_unmapped_default_callback(struct rchan_buf *buf,
- struct file *filp)
-{
-}
-
-/*
- * create_buf_file_create() default callback. Does nothing.
- */
-static struct dentry *create_buf_file_default_callback(const char *filename,
- struct dentry *parent,
- umode_t mode,
- struct rchan_buf *buf,
- int *is_global)
+static int relay_subbuf_start(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
{
- return NULL;
-}
+ if (!buf->chan->cb->subbuf_start)
+ return !relay_buf_full(buf);
-/*
- * remove_buf_file() default callback. Does nothing.
- */
-static int remove_buf_file_default_callback(struct dentry *dentry)
-{
- return -EINVAL;
+ return buf->chan->cb->subbuf_start(buf, subbuf,
+ prev_subbuf, prev_padding);
}
-/* relay channel default callbacks */
-static struct rchan_callbacks default_channel_callbacks = {
- .subbuf_start = subbuf_start_default_callback,
- .buf_mapped = buf_mapped_default_callback,
- .buf_unmapped = buf_unmapped_default_callback,
- .create_buf_file = create_buf_file_default_callback,
- .remove_buf_file = remove_buf_file_default_callback,
-};
-
/**
* wakeup_readers - wake up readers waiting on a channel
* @work: contains the channel buffer
@@ -371,7 +305,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
for (i = 0; i < buf->chan->n_subbufs; i++)
buf->padding[i] = 0;
- buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0);
+ relay_subbuf_start(buf, buf->data, NULL, 0);
}
/**
@@ -499,27 +433,6 @@ static void relay_close_buf(struct rchan_buf *buf)
kref_put(&buf->kref, relay_remove_buf);
}
-static void setup_callbacks(struct rchan *chan,
- struct rchan_callbacks *cb)
-{
- if (!cb) {
- chan->cb = &default_channel_callbacks;
- return;
- }
-
- if (!cb->subbuf_start)
- cb->subbuf_start = subbuf_start_default_callback;
- if (!cb->buf_mapped)
- cb->buf_mapped = buf_mapped_default_callback;
- if (!cb->buf_unmapped)
- cb->buf_unmapped = buf_unmapped_default_callback;
- if (!cb->create_buf_file)
- cb->create_buf_file = create_buf_file_default_callback;
- if (!cb->remove_buf_file)
- cb->remove_buf_file = remove_buf_file_default_callback;
- chan->cb = cb;
-}
-
int relay_prepare_cpu(unsigned int cpu)
{
struct rchan *chan;
@@ -565,7 +478,7 @@ struct rchan *relay_open(const char *base_filename,
struct dentry *parent,
size_t subbuf_size,
size_t n_subbufs,
- struct rchan_callbacks *cb,
+ const struct rchan_callbacks *cb,
void *private_data)
{
unsigned int i;
@@ -576,6 +489,8 @@ struct rchan *relay_open(const char *base_filename,
return NULL;
if (subbuf_size > UINT_MAX / n_subbufs)
return NULL;
+ if (!cb || !cb->create_buf_file || !cb->remove_buf_file)
+ return NULL;
chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
if (!chan)
@@ -597,7 +512,7 @@ struct rchan *relay_open(const char *base_filename,
chan->has_base_filename = 1;
strlcpy(chan->base_filename, base_filename, NAME_MAX);
}
- setup_callbacks(chan, cb);
+ chan->cb = cb;
kref_init(&chan->kref);
mutex_lock(&relay_channels_mutex);
@@ -780,7 +695,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
new = buf->start + new_subbuf * buf->chan->subbuf_size;
buf->offset = 0;
- if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) {
+ if (!relay_subbuf_start(buf, new, old, buf->prev_padding)) {
buf->offset = buf->chan->subbuf_size + 1;
return 0;
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 3ae2f56cc79d..833394f9c608 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -320,9 +320,8 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
/**
- * Finds the lowest iomem resource that covers part of [@start..@end]. The
- * caller must specify @start, @end, @flags, and @desc (which may be
- * IORES_DESC_NONE).
+ * find_next_iomem_res - Finds the lowest iomem resource that covers part of
+ * [@start..@end].
*
* If a resource is found, returns 0 and @*res is overwritten with the part
* of the resource that's within [@start..@end]; if none is found, returns
@@ -337,6 +336,9 @@ EXPORT_SYMBOL(release_resource);
* @desc: descriptor the resource must have
* @first_lvl: walk only the first level children, if set
* @res: return ptr, if resource found
+ *
+ * The caller must specify @start, @end, @flags, and @desc
+ * (which may be IORES_DESC_NONE).
*/
static int find_next_iomem_res(resource_size_t start, resource_size_t end,
unsigned long flags, unsigned long desc,
@@ -416,11 +418,9 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
}
/**
- * Walks through iomem resources and calls func() with matching resource
- * ranges. This walks through whole tree and not just first level children.
- * All the memory ranges which overlap start,end and also match flags and
- * desc are valid candidates.
- *
+ * walk_iomem_res_desc - Walks through iomem resources and calls func()
+ * with matching resource ranges.
+ * *
* @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
* @flags: I/O resource flags
* @start: start addr
@@ -428,6 +428,10 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
* @arg: function argument for the callback @func
* @func: callback function that is called for each qualifying resource area
*
+ * This walks through whole tree and not just first level children.
+ * All the memory ranges which overlap start,end and also match flags and
+ * desc are valid candidates.
+ *
* NOTE: For a new descriptor search, define a new IORES_DESC in
* <linux/ioport.h> and set it in 'desc' of a target resource entry.
*/
@@ -557,13 +561,13 @@ int region_intersects(resource_size_t start, size_t size, unsigned long flags,
}
read_unlock(&resource_lock);
- if (other == 0)
- return type ? REGION_INTERSECTS : REGION_DISJOINT;
+ if (type == 0)
+ return REGION_DISJOINT;
- if (type)
- return REGION_MIXED;
+ if (other == 0)
+ return REGION_INTERSECTS;
- return REGION_DISJOINT;
+ return REGION_MIXED;
}
EXPORT_SYMBOL_GPL(region_intersects);
@@ -1372,9 +1376,9 @@ static bool system_ram_resources_mergeable(struct resource *r1,
!r1->child && !r2->child;
}
-/*
+/**
* merge_system_ram_resource - mark the System RAM resource mergeable and try to
- * merge it with adjacent, mergeable resources
+ * merge it with adjacent, mergeable resources
* @res: resource descriptor
*
* This interface is intended for memory hotplug, whereby lots of contiguous
diff --git a/kernel/resource_kunit.c b/kernel/resource_kunit.c
new file mode 100644
index 000000000000..58ab9f914602
--- /dev/null
+++ b/kernel/resource_kunit.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for API provided by resource.c and ioport.h
+ */
+
+#include <kunit/test.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#define R0_START 0x0000
+#define R0_END 0xffff
+#define R1_START 0x1234
+#define R1_END 0x2345
+#define R2_START 0x4567
+#define R2_END 0x5678
+#define R3_START 0x6789
+#define R3_END 0x789a
+#define R4_START 0x2000
+#define R4_END 0x7000
+
+static struct resource r0 = { .start = R0_START, .end = R0_END };
+static struct resource r1 = { .start = R1_START, .end = R1_END };
+static struct resource r2 = { .start = R2_START, .end = R2_END };
+static struct resource r3 = { .start = R3_START, .end = R3_END };
+static struct resource r4 = { .start = R4_START, .end = R4_END };
+
+struct result {
+ struct resource *r1;
+ struct resource *r2;
+ struct resource r;
+ bool ret;
+};
+
+static struct result results_for_union[] = {
+ {
+ .r1 = &r1, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true,
+ }, {
+ .r1 = &r2, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true,
+ }, {
+ .r1 = &r3, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true,
+ }, {
+ .r1 = &r4, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true,
+ }, {
+ .r1 = &r2, .r2 = &r1, .ret = false,
+ }, {
+ .r1 = &r3, .r2 = &r1, .ret = false,
+ }, {
+ .r1 = &r4, .r2 = &r1, .r.start = R1_START, .r.end = R4_END, .ret = true,
+ }, {
+ .r1 = &r2, .r2 = &r3, .ret = false,
+ }, {
+ .r1 = &r2, .r2 = &r4, .r.start = R4_START, .r.end = R4_END, .ret = true,
+ }, {
+ .r1 = &r3, .r2 = &r4, .r.start = R4_START, .r.end = R3_END, .ret = true,
+ },
+};
+
+static struct result results_for_intersection[] = {
+ {
+ .r1 = &r1, .r2 = &r0, .r.start = R1_START, .r.end = R1_END, .ret = true,
+ }, {
+ .r1 = &r2, .r2 = &r0, .r.start = R2_START, .r.end = R2_END, .ret = true,
+ }, {
+ .r1 = &r3, .r2 = &r0, .r.start = R3_START, .r.end = R3_END, .ret = true,
+ }, {
+ .r1 = &r4, .r2 = &r0, .r.start = R4_START, .r.end = R4_END, .ret = true,
+ }, {
+ .r1 = &r2, .r2 = &r1, .ret = false,
+ }, {
+ .r1 = &r3, .r2 = &r1, .ret = false,
+ }, {
+ .r1 = &r4, .r2 = &r1, .r.start = R4_START, .r.end = R1_END, .ret = true,
+ }, {
+ .r1 = &r2, .r2 = &r3, .ret = false,
+ }, {
+ .r1 = &r2, .r2 = &r4, .r.start = R2_START, .r.end = R2_END, .ret = true,
+ }, {
+ .r1 = &r3, .r2 = &r4, .r.start = R3_START, .r.end = R4_END, .ret = true,
+ },
+};
+
+static void resource_do_test(struct kunit *test, bool ret, struct resource *r,
+ bool exp_ret, struct resource *exp_r,
+ struct resource *r1, struct resource *r2)
+{
+ KUNIT_EXPECT_EQ_MSG(test, ret, exp_ret, "Resources %pR %pR", r1, r2);
+ KUNIT_EXPECT_EQ_MSG(test, r->start, exp_r->start, "Start elements are not equal");
+ KUNIT_EXPECT_EQ_MSG(test, r->end, exp_r->end, "End elements are not equal");
+}
+
+static void resource_do_union_test(struct kunit *test, struct result *r)
+{
+ struct resource result;
+ bool ret;
+
+ memset(&result, 0, sizeof(result));
+ ret = resource_union(r->r1, r->r2, &result);
+ resource_do_test(test, ret, &result, r->ret, &r->r, r->r1, r->r2);
+
+ memset(&result, 0, sizeof(result));
+ ret = resource_union(r->r2, r->r1, &result);
+ resource_do_test(test, ret, &result, r->ret, &r->r, r->r2, r->r1);
+}
+
+static void resource_test_union(struct kunit *test)
+{
+ struct result *r = results_for_union;
+ unsigned int i = 0;
+
+ do {
+ resource_do_union_test(test, &r[i]);
+ } while (++i < ARRAY_SIZE(results_for_union));
+}
+
+static void resource_do_intersection_test(struct kunit *test, struct result *r)
+{
+ struct resource result;
+ bool ret;
+
+ memset(&result, 0, sizeof(result));
+ ret = resource_intersection(r->r1, r->r2, &result);
+ resource_do_test(test, ret, &result, r->ret, &r->r, r->r1, r->r2);
+
+ memset(&result, 0, sizeof(result));
+ ret = resource_intersection(r->r2, r->r1, &result);
+ resource_do_test(test, ret, &result, r->ret, &r->r, r->r2, r->r1);
+}
+
+static void resource_test_intersection(struct kunit *test)
+{
+ struct result *r = results_for_intersection;
+ unsigned int i = 0;
+
+ do {
+ resource_do_intersection_test(test, &r[i]);
+ } while (++i < ARRAY_SIZE(results_for_intersection));
+}
+
+static struct kunit_case resource_test_cases[] = {
+ KUNIT_CASE(resource_test_union),
+ KUNIT_CASE(resource_test_intersection),
+ {}
+};
+
+static struct kunit_suite resource_test_suite = {
+ .name = "resource",
+ .test_cases = resource_test_cases,
+};
+kunit_test_suite(resource_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/scftorture.c b/kernel/scftorture.c
index 554a521ee235..2377cbb32474 100644
--- a/kernel/scftorture.c
+++ b/kernel/scftorture.c
@@ -59,9 +59,10 @@ torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
torture_param(int, shutdown_secs, 0, "Shutdown time (ms), <= zero to disable.");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s.");
-torture_param(int, stutter_cpus, 5, "Number of jiffies to change CPUs under test, 0=disable");
+torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU hotplug.");
torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
+torture_param(int, weight_resched, -1, "Testing weight for resched_cpu() operations.");
torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations.");
torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations.");
torture_param(int, weight_many, -1, "Testing weight for multi-CPU no-wait operations.");
@@ -82,6 +83,7 @@ torture_param(bool, shutdown, SCFTORT_SHUTDOWN, "Shutdown at end of torture test
struct scf_statistics {
struct task_struct *task;
int cpu;
+ long long n_resched;
long long n_single;
long long n_single_ofl;
long long n_single_wait;
@@ -97,12 +99,15 @@ static struct task_struct *scf_torture_stats_task;
static DEFINE_PER_CPU(long long, scf_invoked_count);
// Data for random primitive selection
-#define SCF_PRIM_SINGLE 0
-#define SCF_PRIM_MANY 1
-#define SCF_PRIM_ALL 2
-#define SCF_NPRIMS (2 * 3) // Need wait and no-wait versions of each.
+#define SCF_PRIM_RESCHED 0
+#define SCF_PRIM_SINGLE 1
+#define SCF_PRIM_MANY 2
+#define SCF_PRIM_ALL 3
+#define SCF_NPRIMS 7 // Need wait and no-wait versions of each,
+ // except for SCF_PRIM_RESCHED.
static char *scf_prim_name[] = {
+ "resched_cpu",
"smp_call_function_single",
"smp_call_function_many",
"smp_call_function",
@@ -136,6 +141,8 @@ static char *bangstr = "";
static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand);
+extern void resched_cpu(int cpu); // An alternative IPI vector.
+
// Print torture statistics. Caller must ensure serialization.
static void scf_torture_stats_print(void)
{
@@ -148,6 +155,7 @@ static void scf_torture_stats_print(void)
for_each_possible_cpu(cpu)
invoked_count += data_race(per_cpu(scf_invoked_count, cpu));
for (i = 0; i < nthreads; i++) {
+ scfs.n_resched += scf_stats_p[i].n_resched;
scfs.n_single += scf_stats_p[i].n_single;
scfs.n_single_ofl += scf_stats_p[i].n_single_ofl;
scfs.n_single_wait += scf_stats_p[i].n_single_wait;
@@ -160,8 +168,8 @@ static void scf_torture_stats_print(void)
if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
bangstr = "!!! ";
- pr_alert("%s %sscf_invoked_count %s: %lld single: %lld/%lld single_ofl: %lld/%lld many: %lld/%lld all: %lld/%lld ",
- SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count,
+ pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld many: %lld/%lld all: %lld/%lld ",
+ SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
scfs.n_single, scfs.n_single_wait, scfs.n_single_ofl, scfs.n_single_wait_ofl,
scfs.n_many, scfs.n_many_wait, scfs.n_all, scfs.n_all_wait);
torture_onoff_stats();
@@ -314,6 +322,13 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
}
}
switch (scfsp->scfs_prim) {
+ case SCF_PRIM_RESCHED:
+ if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) {
+ cpu = torture_random(trsp) % nr_cpu_ids;
+ scfp->n_resched++;
+ resched_cpu(cpu);
+ }
+ break;
case SCF_PRIM_SINGLE:
cpu = torture_random(trsp) % nr_cpu_ids;
if (scfsp->scfs_wait)
@@ -383,6 +398,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
static int scftorture_invoker(void *arg)
{
int cpu;
+ int curcpu;
DEFINE_TORTURE_RANDOM(rand);
struct scf_statistics *scfp = (struct scf_statistics *)arg;
bool was_offline = false;
@@ -397,7 +413,10 @@ static int scftorture_invoker(void *arg)
VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id());
// Make sure that the CPU is affinitized appropriately during testing.
- WARN_ON_ONCE(smp_processor_id() != scfp->cpu);
+ curcpu = smp_processor_id();
+ WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids,
+ "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
+ __func__, scfp->cpu, curcpu, nr_cpu_ids);
if (!atomic_dec_return(&n_started))
while (atomic_read_acquire(&n_started)) {
@@ -421,6 +440,7 @@ static int scftorture_invoker(void *arg)
was_offline = false;
}
cond_resched();
+ stutter_wait("scftorture_invoker");
} while (!torture_must_stop());
VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp->cpu);
@@ -433,8 +453,8 @@ static void
scftorture_print_module_parms(const char *tag)
{
pr_alert(SCFTORT_FLAG
- "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter_cpus=%d use_cpus_read_lock=%d, weight_single=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
- verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter_cpus, use_cpus_read_lock, weight_single, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
+ "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
+ verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
}
static void scf_cleanup_handler(void *unused)
@@ -475,6 +495,7 @@ static int __init scf_torture_init(void)
{
long i;
int firsterr = 0;
+ unsigned long weight_resched1 = weight_resched;
unsigned long weight_single1 = weight_single;
unsigned long weight_single_wait1 = weight_single_wait;
unsigned long weight_many1 = weight_many;
@@ -487,9 +508,10 @@ static int __init scf_torture_init(void)
scftorture_print_module_parms("Start of test");
- if (weight_single == -1 && weight_single_wait == -1 &&
+ if (weight_resched == -1 && weight_single == -1 && weight_single_wait == -1 &&
weight_many == -1 && weight_many_wait == -1 &&
weight_all == -1 && weight_all_wait == -1) {
+ weight_resched1 = 2 * nr_cpu_ids;
weight_single1 = 2 * nr_cpu_ids;
weight_single_wait1 = 2 * nr_cpu_ids;
weight_many1 = 2;
@@ -497,6 +519,8 @@ static int __init scf_torture_init(void)
weight_all1 = 1;
weight_all_wait1 = 1;
} else {
+ if (weight_resched == -1)
+ weight_resched1 = 0;
if (weight_single == -1)
weight_single1 = 0;
if (weight_single_wait == -1)
@@ -517,6 +541,10 @@ static int __init scf_torture_init(void)
firsterr = -EINVAL;
goto unwind;
}
+ if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST))
+ scf_sel_add(weight_resched1, SCF_PRIM_RESCHED, false);
+ else if (weight_resched1)
+ VERBOSE_SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored");
scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false);
scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true);
scf_sel_add(weight_many1, SCF_PRIM_MANY, false);
@@ -535,6 +563,11 @@ static int __init scf_torture_init(void)
if (firsterr)
goto unwind;
}
+ if (stutter > 0) {
+ firsterr = torture_stutter_init(stutter, stutter);
+ if (firsterr)
+ goto unwind;
+ }
// Worker tasks invoking smp_call_function().
if (nthreads < 0)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d2003a7d5ab5..7f5ffc878411 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -97,7 +97,7 @@ int sysctl_sched_rt_runtime = 950000;
*
* Normal scheduling state is serialized by rq->lock. __schedule() takes the
* local CPU's rq->lock, it optionally removes the task from the runqueue and
- * always looks at the local rq data structures to find the most elegible task
+ * always looks at the local rq data structures to find the most eligible task
* to run next.
*
* Task enqueue is also under rq->lock, possibly taken from another CPU.
@@ -320,14 +320,6 @@ void update_rq_clock(struct rq *rq)
update_rq_clock_task(rq, delta);
}
-static inline void
-rq_csd_init(struct rq *rq, call_single_data_t *csd, smp_call_func_t func)
-{
- csd->flags = 0;
- csd->func = func;
- csd->info = rq;
-}
-
#ifdef CONFIG_SCHED_HRTICK
/*
* Use HR-timers to deliver accurate preemption points.
@@ -363,8 +355,9 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
static void __hrtick_restart(struct rq *rq)
{
struct hrtimer *timer = &rq->hrtick_timer;
+ ktime_t time = rq->hrtick_time;
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
+ hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
}
/*
@@ -388,7 +381,6 @@ static void __hrtick_start(void *arg)
void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
- ktime_t time;
s64 delta;
/*
@@ -396,9 +388,7 @@ void hrtick_start(struct rq *rq, u64 delay)
* doesn't make sense and can cause timer DoS.
*/
delta = max_t(s64, delay, 10000LL);
- time = ktime_add_ns(timer->base->get_time(), delta);
-
- hrtimer_set_expires(timer, time);
+ rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
if (rq == this_rq())
__hrtick_restart(rq);
@@ -428,7 +418,7 @@ void hrtick_start(struct rq *rq, u64 delay)
static void hrtick_rq_init(struct rq *rq)
{
#ifdef CONFIG_SMP
- rq_csd_init(rq, &rq->hrtick_csd, __hrtick_start);
+ INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
#endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
rq->hrtick_timer.function = hrtick;
@@ -518,7 +508,7 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
/*
* Atomically grab the task, if ->wake_q is !nil already it means
- * its already queued (either by us or someone else) and will get the
+ * it's already queued (either by us or someone else) and will get the
* wakeup due to that.
*
* In order to ensure that a pending wakeup will observe our pending
@@ -769,7 +759,7 @@ bool sched_can_stop_tick(struct rq *rq)
return false;
/*
- * If there are more than one RR tasks, we need the tick to effect the
+ * If there are more than one RR tasks, we need the tick to affect the
* actual RR behaviour.
*/
if (rq->rt.rr_nr_running) {
@@ -1187,14 +1177,14 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
* accounting was performed at enqueue time and we can just return
* here.
*
- * Need to be careful of the following enqeueue/dequeue ordering
+ * Need to be careful of the following enqueue/dequeue ordering
* problem too
*
* enqueue(taskA)
* // sched_uclamp_used gets enabled
* enqueue(taskB)
* dequeue(taskA)
- * // Must not decrement bukcet->tasks here
+ * // Must not decrement bucket->tasks here
* dequeue(taskB)
*
* where we could end up with stale data in uc_se and
@@ -1413,17 +1403,24 @@ done:
static int uclamp_validate(struct task_struct *p,
const struct sched_attr *attr)
{
- unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value;
- unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value;
+ int util_min = p->uclamp_req[UCLAMP_MIN].value;
+ int util_max = p->uclamp_req[UCLAMP_MAX].value;
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN)
- lower_bound = attr->sched_util_min;
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX)
- upper_bound = attr->sched_util_max;
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
+ util_min = attr->sched_util_min;
- if (lower_bound > upper_bound)
- return -EINVAL;
- if (upper_bound > SCHED_CAPACITY_SCALE)
+ if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
+ return -EINVAL;
+ }
+
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
+ util_max = attr->sched_util_max;
+
+ if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
+ return -EINVAL;
+ }
+
+ if (util_min != -1 && util_max != -1 && util_min > util_max)
return -EINVAL;
/*
@@ -1438,20 +1435,41 @@ static int uclamp_validate(struct task_struct *p,
return 0;
}
+static bool uclamp_reset(const struct sched_attr *attr,
+ enum uclamp_id clamp_id,
+ struct uclamp_se *uc_se)
+{
+ /* Reset on sched class change for a non user-defined clamp value. */
+ if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
+ !uc_se->user_defined)
+ return true;
+
+ /* Reset on sched_util_{min,max} == -1. */
+ if (clamp_id == UCLAMP_MIN &&
+ attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
+ attr->sched_util_min == -1) {
+ return true;
+ }
+
+ if (clamp_id == UCLAMP_MAX &&
+ attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
+ attr->sched_util_max == -1) {
+ return true;
+ }
+
+ return false;
+}
+
static void __setscheduler_uclamp(struct task_struct *p,
const struct sched_attr *attr)
{
enum uclamp_id clamp_id;
- /*
- * On scheduling class change, reset to default clamps for tasks
- * without a task-specific value.
- */
for_each_clamp_id(clamp_id) {
struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
+ unsigned int value;
- /* Keep using defined clamps across class changes */
- if (uc_se->user_defined)
+ if (!uclamp_reset(attr, clamp_id, uc_se))
continue;
/*
@@ -1459,21 +1477,25 @@ static void __setscheduler_uclamp(struct task_struct *p,
* at runtime.
*/
if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
- __uclamp_update_util_min_rt_default(p);
+ value = sysctl_sched_uclamp_util_min_rt_default;
else
- uclamp_se_set(uc_se, uclamp_none(clamp_id), false);
+ value = uclamp_none(clamp_id);
+
+ uclamp_se_set(uc_se, value, false);
}
if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
return;
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
+ attr->sched_util_min != -1) {
uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
attr->sched_util_min, true);
}
- if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
+ attr->sched_util_max != -1) {
uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
attr->sched_util_max, true);
}
@@ -1696,19 +1718,104 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
#ifdef CONFIG_SMP
+static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
+
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask,
+ u32 flags);
+
+static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
+{
+ if (likely(!p->migration_disabled))
+ return;
+
+ if (p->cpus_ptr != &p->cpus_mask)
+ return;
+
+ /*
+ * Violates locking rules! see comment in __do_set_cpus_allowed().
+ */
+ __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE);
+}
+
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (p->migration_disabled) {
+ p->migration_disabled++;
+ return;
+ }
+
+ preempt_disable();
+ this_rq()->nr_pinned++;
+ p->migration_disabled = 1;
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
+ }
+
+ /*
+ * Ensure stop_task runs either before or after this, and that
+ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
+ */
+ preempt_disable();
+ if (p->cpus_ptr != &p->cpus_mask)
+ __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
+ /*
+ * Mustn't clear migration_disabled() until cpus_ptr points back at the
+ * regular cpus_mask, otherwise things that race (eg.
+ * select_fallback_rq) get confused.
+ */
+ barrier();
+ p->migration_disabled = 0;
+ this_rq()->nr_pinned--;
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_enable);
+
+static inline bool rq_has_pinned_tasks(struct rq *rq)
+{
+ return rq->nr_pinned;
+}
+
/*
* Per-CPU kthreads are allowed to run on !active && online CPUs, see
* __set_cpus_allowed_ptr() and select_fallback_rq().
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
+ /* When not in the task's cpumask, no point in looking further. */
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
- if (is_per_cpu_kthread(p))
+ /* migrate_disabled() must be allowed to finish. */
+ if (is_migration_disabled(p))
return cpu_online(cpu);
- return cpu_active(cpu);
+ /* Non kernel threads are not allowed during either online or offline. */
+ if (!(p->flags & PF_KTHREAD))
+ return cpu_active(cpu);
+
+ /* KTHREAD_IS_PER_CPU is always allowed. */
+ if (kthread_is_per_cpu(p))
+ return cpu_online(cpu);
+
+ /* Regular kernel threads don't get to stay during offline. */
+ if (cpu_rq(cpu)->balance_push)
+ return false;
+
+ /* But are allowed during online. */
+ return cpu_online(cpu);
}
/*
@@ -1750,8 +1857,16 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
}
struct migration_arg {
- struct task_struct *task;
- int dest_cpu;
+ struct task_struct *task;
+ int dest_cpu;
+ struct set_affinity_pending *pending;
+};
+
+struct set_affinity_pending {
+ refcount_t refs;
+ struct completion done;
+ struct cpu_stop_work stop_work;
+ struct migration_arg arg;
};
/*
@@ -1783,16 +1898,19 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
*/
static int migration_cpu_stop(void *data)
{
+ struct set_affinity_pending *pending;
struct migration_arg *arg = data;
struct task_struct *p = arg->task;
+ int dest_cpu = arg->dest_cpu;
struct rq *rq = this_rq();
+ bool complete = false;
struct rq_flags rf;
/*
* The original target CPU might have gone down and we might
* be on another CPU but it doesn't matter.
*/
- local_irq_disable();
+ local_irq_save(rf.flags);
/*
* We need to explicitly wake pending tasks before running
* __migrate_task() such that we will not miss enforcing cpus_ptr
@@ -1802,21 +1920,137 @@ static int migration_cpu_stop(void *data)
raw_spin_lock(&p->pi_lock);
rq_lock(rq, &rf);
+
+ pending = p->migration_pending;
/*
* If task_rq(p) != rq, it cannot be migrated here, because we're
* holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
* we're holding p->pi_lock.
*/
if (task_rq(p) == rq) {
+ if (is_migration_disabled(p))
+ goto out;
+
+ if (pending) {
+ p->migration_pending = NULL;
+ complete = true;
+ }
+
+ /* migrate_enable() -- we must not race against SCA */
+ if (dest_cpu < 0) {
+ /*
+ * When this was migrate_enable() but we no longer
+ * have a @pending, a concurrent SCA 'fixed' things
+ * and we should be valid again. Nothing to do.
+ */
+ if (!pending) {
+ WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
+ goto out;
+ }
+
+ dest_cpu = cpumask_any_distribute(&p->cpus_mask);
+ }
+
if (task_on_rq_queued(p))
- rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
+ rq = __migrate_task(rq, &rf, p, dest_cpu);
else
- p->wake_cpu = arg->dest_cpu;
+ p->wake_cpu = dest_cpu;
+
+ } else if (dest_cpu < 0 || pending) {
+ /*
+ * This happens when we get migrated between migrate_enable()'s
+ * preempt_enable() and scheduling the stopper task. At that
+ * point we're a regular task again and not current anymore.
+ *
+ * A !PREEMPT kernel has a giant hole here, which makes it far
+ * more likely.
+ */
+
+ /*
+ * The task moved before the stopper got to run. We're holding
+ * ->pi_lock, so the allowed mask is stable - if it got
+ * somewhere allowed, we're done.
+ */
+ if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
+ p->migration_pending = NULL;
+ complete = true;
+ goto out;
+ }
+
+ /*
+ * When this was migrate_enable() but we no longer have an
+ * @pending, a concurrent SCA 'fixed' things and we should be
+ * valid again. Nothing to do.
+ */
+ if (!pending) {
+ WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
+ goto out;
+ }
+
+ /*
+ * When migrate_enable() hits a rq mis-match we can't reliably
+ * determine is_migration_disabled() and so have to chase after
+ * it.
+ */
+ task_rq_unlock(rq, p, &rf);
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+ return 0;
}
- rq_unlock(rq, &rf);
- raw_spin_unlock(&p->pi_lock);
+out:
+ task_rq_unlock(rq, p, &rf);
+
+ if (complete)
+ complete_all(&pending->done);
+
+ /* For pending->{arg,stop_work} */
+ pending = arg->pending;
+ if (pending && refcount_dec_and_test(&pending->refs))
+ wake_up_var(&pending->refs);
- local_irq_enable();
+ return 0;
+}
+
+int push_cpu_stop(void *arg)
+{
+ struct rq *lowest_rq = NULL, *rq = this_rq();
+ struct task_struct *p = arg;
+
+ raw_spin_lock_irq(&p->pi_lock);
+ raw_spin_lock(&rq->lock);
+
+ if (task_rq(p) != rq)
+ goto out_unlock;
+
+ if (is_migration_disabled(p)) {
+ p->migration_flags |= MDF_PUSH;
+ goto out_unlock;
+ }
+
+ p->migration_flags &= ~MDF_PUSH;
+
+ if (p->sched_class->find_lock_rq)
+ lowest_rq = p->sched_class->find_lock_rq(p, rq);
+
+ if (!lowest_rq)
+ goto out_unlock;
+
+ // XXX validate p is still the highest prio task
+ if (task_rq(p) == rq) {
+ deactivate_task(rq, p, 0);
+ set_task_cpu(p, lowest_rq->cpu);
+ activate_task(lowest_rq, p, 0);
+ resched_curr(lowest_rq);
+ }
+
+ double_unlock_balance(rq, lowest_rq);
+
+out_unlock:
+ rq->push_busy = false;
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ put_task_struct(p);
return 0;
}
@@ -1824,18 +2058,39 @@ static int migration_cpu_stop(void *data)
* sched_class::set_cpus_allowed must do the below, but is not required to
* actually call this function.
*/
-void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
+ if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
+ p->cpus_ptr = new_mask;
+ return;
+ }
+
cpumask_copy(&p->cpus_mask, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
struct rq *rq = task_rq(p);
bool queued, running;
- lockdep_assert_held(&p->pi_lock);
+ /*
+ * This here violates the locking rules for affinity, since we're only
+ * supposed to change these variables while holding both rq->lock and
+ * p->pi_lock.
+ *
+ * HOWEVER, it magically works, because ttwu() is the only code that
+ * accesses these variables under p->pi_lock and only does so after
+ * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
+ * before finish_task().
+ *
+ * XXX do further audits, this smells like something putrid.
+ */
+ if (flags & SCA_MIGRATE_DISABLE)
+ SCHED_WARN_ON(!p->on_cpu);
+ else
+ lockdep_assert_held(&p->pi_lock);
queued = task_on_rq_queued(p);
running = task_current(rq, p);
@@ -1851,7 +2106,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
if (running)
put_prev_task(rq, p);
- p->sched_class->set_cpus_allowed(p, new_mask);
+ p->sched_class->set_cpus_allowed(p, new_mask, flags);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
@@ -1859,6 +2114,208 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
set_next_task(rq, p);
}
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ __do_set_cpus_allowed(p, new_mask, 0);
+}
+
+/*
+ * This function is wildly self concurrent; here be dragons.
+ *
+ *
+ * When given a valid mask, __set_cpus_allowed_ptr() must block until the
+ * designated task is enqueued on an allowed CPU. If that task is currently
+ * running, we have to kick it out using the CPU stopper.
+ *
+ * Migrate-Disable comes along and tramples all over our nice sandcastle.
+ * Consider:
+ *
+ * Initial conditions: P0->cpus_mask = [0, 1]
+ *
+ * P0@CPU0 P1
+ *
+ * migrate_disable();
+ * <preempted>
+ * set_cpus_allowed_ptr(P0, [1]);
+ *
+ * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
+ * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
+ * This means we need the following scheme:
+ *
+ * P0@CPU0 P1
+ *
+ * migrate_disable();
+ * <preempted>
+ * set_cpus_allowed_ptr(P0, [1]);
+ * <blocks>
+ * <resumes>
+ * migrate_enable();
+ * __set_cpus_allowed_ptr();
+ * <wakes local stopper>
+ * `--> <woken on migration completion>
+ *
+ * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
+ * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
+ * task p are serialized by p->pi_lock, which we can leverage: the one that
+ * should come into effect at the end of the Migrate-Disable region is the last
+ * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
+ * but we still need to properly signal those waiting tasks at the appropriate
+ * moment.
+ *
+ * This is implemented using struct set_affinity_pending. The first
+ * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
+ * setup an instance of that struct and install it on the targeted task_struct.
+ * Any and all further callers will reuse that instance. Those then wait for
+ * a completion signaled at the tail of the CPU stopper callback (1), triggered
+ * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
+ *
+ *
+ * (1) In the cases covered above. There is one more where the completion is
+ * signaled within affine_move_task() itself: when a subsequent affinity request
+ * cancels the need for an active migration. Consider:
+ *
+ * Initial conditions: P0->cpus_mask = [0, 1]
+ *
+ * P0@CPU0 P1 P2
+ *
+ * migrate_disable();
+ * <preempted>
+ * set_cpus_allowed_ptr(P0, [1]);
+ * <blocks>
+ * set_cpus_allowed_ptr(P0, [0, 1]);
+ * <signal completion>
+ * <awakes>
+ *
+ * Note that the above is safe vs a concurrent migrate_enable(), as any
+ * pending affinity completion is preceded by an uninstallation of
+ * p->migration_pending done with p->pi_lock held.
+ */
+static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
+ int dest_cpu, unsigned int flags)
+{
+ struct set_affinity_pending my_pending = { }, *pending = NULL;
+ struct migration_arg arg = {
+ .task = p,
+ .dest_cpu = dest_cpu,
+ };
+ bool complete = false;
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+ if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+ struct task_struct *push_task = NULL;
+
+ if ((flags & SCA_MIGRATE_ENABLE) &&
+ (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
+ rq->push_busy = true;
+ push_task = get_task_struct(p);
+ }
+
+ pending = p->migration_pending;
+ if (pending) {
+ refcount_inc(&pending->refs);
+ p->migration_pending = NULL;
+ complete = true;
+ }
+ task_rq_unlock(rq, p, rf);
+
+ if (push_task) {
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ p, &rq->push_work);
+ }
+
+ if (complete)
+ goto do_complete;
+
+ return 0;
+ }
+
+ if (!(flags & SCA_MIGRATE_ENABLE)) {
+ /* serialized by p->pi_lock */
+ if (!p->migration_pending) {
+ /* Install the request */
+ refcount_set(&my_pending.refs, 1);
+ init_completion(&my_pending.done);
+ p->migration_pending = &my_pending;
+ } else {
+ pending = p->migration_pending;
+ refcount_inc(&pending->refs);
+ }
+ }
+ pending = p->migration_pending;
+ /*
+ * - !MIGRATE_ENABLE:
+ * we'll have installed a pending if there wasn't one already.
+ *
+ * - MIGRATE_ENABLE:
+ * we're here because the current CPU isn't matching anymore,
+ * the only way that can happen is because of a concurrent
+ * set_cpus_allowed_ptr() call, which should then still be
+ * pending completion.
+ *
+ * Either way, we really should have a @pending here.
+ */
+ if (WARN_ON_ONCE(!pending)) {
+ task_rq_unlock(rq, p, rf);
+ return -EINVAL;
+ }
+
+ if (flags & SCA_MIGRATE_ENABLE) {
+
+ refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
+ p->migration_flags &= ~MDF_PUSH;
+ task_rq_unlock(rq, p, rf);
+
+ pending->arg = (struct migration_arg) {
+ .task = p,
+ .dest_cpu = -1,
+ .pending = pending,
+ };
+
+ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+
+ return 0;
+ }
+
+ if (task_running(rq, p) || p->state == TASK_WAKING) {
+ /*
+ * Lessen races (and headaches) by delegating
+ * is_migration_disabled(p) checks to the stopper, which will
+ * run on the same CPU as said p.
+ */
+ task_rq_unlock(rq, p, rf);
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+
+ } else {
+
+ if (!is_migration_disabled(p)) {
+ if (task_on_rq_queued(p))
+ rq = move_queued_task(rq, rf, p, dest_cpu);
+
+ p->migration_pending = NULL;
+ complete = true;
+ }
+ task_rq_unlock(rq, p, rf);
+
+do_complete:
+ if (complete)
+ complete_all(&pending->done);
+ }
+
+ wait_for_completion(&pending->done);
+
+ if (refcount_dec_and_test(&pending->refs))
+ wake_up_var(&pending->refs);
+
+ /*
+ * Block the original owner of &pending until all subsequent callers
+ * have seen the completion and decremented the refcount
+ */
+ wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
+
+ return 0;
+}
+
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
@@ -1869,7 +2326,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
* call is not atomic; no spinlocks may be held.
*/
static int __set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask, bool check)
+ const struct cpumask *new_mask,
+ u32 flags)
{
const struct cpumask *cpu_valid_mask = cpu_active_mask;
unsigned int dest_cpu;
@@ -1880,9 +2338,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
- if (p->flags & PF_KTHREAD) {
+ if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
/*
- * Kernel threads are allowed on online && !active CPUs
+ * Kernel threads are allowed on online && !active CPUs,
+ * however, during cpu-hot-unplug, even these might get pushed
+ * away if not KTHREAD_IS_PER_CPU.
+ *
+ * Specifically, migration_disabled() tasks must not fail the
+ * cpumask_any_and_distribute() pick below, esp. so on
+ * SCA_MIGRATE_ENABLE, otherwise we'll not call
+ * set_cpus_allowed_common() and actually reset p->cpus_ptr.
*/
cpu_valid_mask = cpu_online_mask;
}
@@ -1891,13 +2356,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
* Must re-check here, to close a race against __kthread_bind(),
* sched_setaffinity() is not guaranteed to observe the flag.
*/
- if (check && (p->flags & PF_NO_SETAFFINITY)) {
+ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out;
}
- if (cpumask_equal(&p->cpus_mask, new_mask))
- goto out;
+ if (!(flags & SCA_MIGRATE_ENABLE)) {
+ if (cpumask_equal(&p->cpus_mask, new_mask))
+ goto out;
+
+ if (WARN_ON_ONCE(p == current &&
+ is_migration_disabled(p) &&
+ !cpumask_test_cpu(task_cpu(p), new_mask))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
/*
* Picking a ~random cpu helps in cases where we are changing affinity
@@ -1910,35 +2384,10 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
goto out;
}
- do_set_cpus_allowed(p, new_mask);
+ __do_set_cpus_allowed(p, new_mask, flags);
- if (p->flags & PF_KTHREAD) {
- /*
- * For kernel threads that do indeed end up on online &&
- * !active we want to ensure they are strict per-CPU threads.
- */
- WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
- !cpumask_intersects(new_mask, cpu_active_mask) &&
- p->nr_cpus_allowed != 1);
- }
-
- /* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
- goto out;
+ return affine_move_task(rq, p, &rf, dest_cpu, flags);
- if (task_running(rq, p) || p->state == TASK_WAKING) {
- struct migration_arg arg = { p, dest_cpu };
- /* Need help from migration thread: drop lock and wait. */
- task_rq_unlock(rq, p, &rf);
- stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
- return 0;
- } else if (task_on_rq_queued(p)) {
- /*
- * OK, since we're going to drop the lock immediately
- * afterwards anyway.
- */
- rq = move_queued_task(rq, &rf, p, dest_cpu);
- }
out:
task_rq_unlock(rq, p, &rf);
@@ -1947,7 +2396,7 @@ out:
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
- return __set_cpus_allowed_ptr(p, new_mask, false);
+ return __set_cpus_allowed_ptr(p, new_mask, 0);
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
@@ -1988,6 +2437,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
* Clearly, migrating tasks to offline CPUs is a fairly daft thing.
*/
WARN_ON_ONCE(!cpu_online(new_cpu));
+
+ WARN_ON_ONCE(is_migration_disabled(p));
#endif
trace_sched_migrate_task(p, new_cpu);
@@ -2318,6 +2769,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
}
fallthrough;
case possible:
+ /*
+ * XXX When called from select_task_rq() we only
+ * hold p->pi_lock and again violate locking order.
+ *
+ * More yuck to audit.
+ */
do_set_cpus_allowed(p, cpu_possible_mask);
state = fail;
break;
@@ -2348,12 +2805,12 @@ out:
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
*/
static inline
-int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
{
lockdep_assert_held(&p->pi_lock);
- if (p->nr_cpus_allowed > 1)
- cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+ if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
+ cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
else
cpu = cpumask_any(p->cpus_ptr);
@@ -2375,6 +2832,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
+ static struct lock_class_key stop_pi_lock;
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
struct task_struct *old_stop = cpu_rq(cpu)->stop;
@@ -2390,6 +2848,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
stop->sched_class = &stop_sched_class;
+
+ /*
+ * The PI code calls rt_mutex_setprio() with ->pi_lock held to
+ * adjust the effective priority of a task. As a result,
+ * rt_mutex_setprio() can trigger (RT) balancing operations,
+ * which can then trigger wakeups of the stop thread to push
+ * around the current task.
+ *
+ * The stop task itself will never be part of the PI-chain, it
+ * never blocks, therefore that ->pi_lock recursion is safe.
+ * Tell lockdep about this by placing the stop->pi_lock in its
+ * own class.
+ */
+ lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
}
cpu_rq(cpu)->stop = stop;
@@ -2403,15 +2875,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
}
}
-#else
+#else /* CONFIG_SMP */
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask, bool check)
+ const struct cpumask *new_mask,
+ u32 flags)
{
return set_cpus_allowed_ptr(p, new_mask);
}
-#endif /* CONFIG_SMP */
+static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
+
+static inline bool rq_has_pinned_tasks(struct rq *rq)
+{
+ return false;
+}
+
+#endif /* !CONFIG_SMP */
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
@@ -2465,7 +2945,7 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
#ifdef CONFIG_SMP
if (p->sched_class->task_woken) {
/*
- * Our task @p is fully woken up and running; so its safe to
+ * Our task @p is fully woken up and running; so it's safe to
* drop the rq->lock, hereafter rq is only used for statistics.
*/
rq_unpin_lock(rq, rf);
@@ -2501,7 +2981,12 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
#ifdef CONFIG_SMP
if (wake_flags & WF_MIGRATED)
en_flags |= ENQUEUE_MIGRATED;
+ else
#endif
+ if (p->in_iowait) {
+ delayacct_blkio_end(p);
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
activate_task(rq, p, en_flags);
ttwu_do_wakeup(rq, p, wake_flags, rf);
@@ -2642,6 +3127,13 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
static inline bool ttwu_queue_cond(int cpu, int wake_flags)
{
/*
+ * Do not complicate things with the async wake_list while the CPU is
+ * in hotplug state.
+ */
+ if (!cpu_active(cpu))
+ return false;
+
+ /*
* If the CPU does not share cache, then queue the task on the
* remote rqs wakelist to avoid accessing remote data.
*/
@@ -2888,11 +3380,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
goto unlock;
- if (p->in_iowait) {
- delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
- }
-
#ifdef CONFIG_SMP
/*
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
@@ -2952,7 +3439,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
/*
* If the owning (remote) CPU is still in the middle of schedule() with
- * this task as prev, wait until its done referencing the task.
+ * this task as prev, wait until it's done referencing the task.
*
* Pairs with the smp_store_release() in finish_task().
*
@@ -2961,8 +3448,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_cond_load_acquire(&p->on_cpu, !VAL);
- cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
+ cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
if (task_cpu(p) != cpu) {
+ if (p->in_iowait) {
+ delayacct_blkio_end(p);
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
wake_flags |= WF_MIGRATED;
psi_ttwu_dequeue(p);
set_task_cpu(p, cpu);
@@ -2984,7 +3476,7 @@ out:
/**
* try_invoke_on_locked_down_task - Invoke a function on task in fixed state
- * @p: Process for which the function is to be invoked.
+ * @p: Process for which the function is to be invoked, can be @current.
* @func: Function to invoke.
* @arg: Argument to function.
*
@@ -3002,12 +3494,11 @@ out:
*/
bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
{
- bool ret = false;
struct rq_flags rf;
+ bool ret = false;
struct rq *rq;
- lockdep_assert_irqs_enabled();
- raw_spin_lock_irq(&p->pi_lock);
+ raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
if (p->on_rq) {
rq = __task_rq_lock(p, &rf);
if (task_rq(p) == rq)
@@ -3024,7 +3515,7 @@ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct t
ret = func(p, arg);
}
}
- raw_spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
return ret;
}
@@ -3098,6 +3589,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
init_numa_balancing(clone_flags, p);
#ifdef CONFIG_SMP
p->wake_entry.u_flags = CSD_TYPE_TTWU;
+ p->migration_pending = NULL;
#endif
}
@@ -3344,7 +3836,7 @@ void wake_up_new_task(struct task_struct *p)
*/
p->recent_used_cpu = task_cpu(p);
rseq_migrate(p);
- __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
+ __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
#endif
rq = __task_rq_lock(p, &rf);
update_rq_clock(rq);
@@ -3356,7 +3848,7 @@ void wake_up_new_task(struct task_struct *p)
#ifdef CONFIG_SMP
if (p->sched_class->task_woken) {
/*
- * Nothing relies on rq->lock after this, so its fine to
+ * Nothing relies on rq->lock after this, so it's fine to
* drop it.
*/
rq_unpin_lock(rq, &rf);
@@ -3485,6 +3977,76 @@ static inline void finish_task(struct task_struct *prev)
#endif
}
+#ifdef CONFIG_SMP
+
+static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
+{
+ void (*func)(struct rq *rq);
+ struct callback_head *next;
+
+ lockdep_assert_held(&rq->lock);
+
+ while (head) {
+ func = (void (*)(struct rq *))head->func;
+ next = head->next;
+ head->next = NULL;
+ head = next;
+
+ func(rq);
+ }
+}
+
+static void balance_push(struct rq *rq);
+
+struct callback_head balance_push_callback = {
+ .next = NULL,
+ .func = (void (*)(struct callback_head *))balance_push,
+};
+
+static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+{
+ struct callback_head *head = rq->balance_callback;
+
+ lockdep_assert_held(&rq->lock);
+ if (head)
+ rq->balance_callback = NULL;
+
+ return head;
+}
+
+static void __balance_callbacks(struct rq *rq)
+{
+ do_balance_callbacks(rq, splice_balance_callbacks(rq));
+}
+
+static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
+{
+ unsigned long flags;
+
+ if (unlikely(head)) {
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ do_balance_callbacks(rq, head);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+}
+
+#else
+
+static inline void __balance_callbacks(struct rq *rq)
+{
+}
+
+static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+{
+ return NULL;
+}
+
+static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
+{
+}
+
+#endif
+
static inline void
prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
{
@@ -3510,6 +4072,7 @@ static inline void finish_lock_switch(struct rq *rq)
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+ __balance_callbacks(rq);
raw_spin_unlock_irq(&rq->lock);
}
@@ -3525,6 +4088,22 @@ static inline void finish_lock_switch(struct rq *rq)
# define finish_arch_post_lock_switch() do { } while (0)
#endif
+static inline void kmap_local_sched_out(void)
+{
+#ifdef CONFIG_KMAP_LOCAL
+ if (unlikely(current->kmap_ctrl.idx))
+ __kmap_local_sched_out();
+#endif
+}
+
+static inline void kmap_local_sched_in(void)
+{
+#ifdef CONFIG_KMAP_LOCAL
+ if (unlikely(current->kmap_ctrl.idx))
+ __kmap_local_sched_in();
+#endif
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -3547,6 +4126,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
perf_event_task_sched_out(prev, next);
rseq_preempt(prev);
fire_sched_out_preempt_notifiers(prev, next);
+ kmap_local_sched_out();
prepare_task(next);
prepare_arch_switch(next);
}
@@ -3613,6 +4193,14 @@ static struct rq *finish_task_switch(struct task_struct *prev)
finish_lock_switch(rq);
finish_arch_post_lock_switch();
kcov_finish_switch(current);
+ /*
+ * kmap_local_sched_out() is invoked with rq::lock held and
+ * interrupts disabled. There is no requirement for that, but the
+ * sched out code does not have an interrupt enabled section.
+ * Restoring the maps on sched in does not require interrupts being
+ * disabled either.
+ */
+ kmap_local_sched_in();
fire_sched_in_preempt_notifiers(current);
/*
@@ -3651,43 +4239,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
return rq;
}
-#ifdef CONFIG_SMP
-
-/* rq->lock is NOT held, but preemption is disabled */
-static void __balance_callback(struct rq *rq)
-{
- struct callback_head *head, *next;
- void (*func)(struct rq *rq);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- head = rq->balance_callback;
- rq->balance_callback = NULL;
- while (head) {
- func = (void (*)(struct rq *))head->func;
- next = head->next;
- head->next = NULL;
- head = next;
-
- func(rq);
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-
-static inline void balance_callback(struct rq *rq)
-{
- if (unlikely(rq->balance_callback))
- __balance_callback(rq);
-}
-
-#else
-
-static inline void balance_callback(struct rq *rq)
-{
-}
-
-#endif
-
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
@@ -3707,7 +4258,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
*/
rq = finish_task_switch(prev);
- balance_callback(rq);
preempt_enable();
if (current->set_child_tid)
@@ -3836,7 +4386,7 @@ unsigned long nr_iowait_cpu(int cpu)
}
/*
- * IO-wait accounting, and how its mostly bollocks (on SMP).
+ * IO-wait accounting, and how it's mostly bollocks (on SMP).
*
* The idea behind IO-wait account is to account the idle time that we could
* have spend running if it were not for IO. That is, if we were to improve the
@@ -3888,7 +4438,7 @@ void sched_exec(void)
int dest_cpu;
raw_spin_lock_irqsave(&p->pi_lock, flags);
- dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
+ dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
if (dest_cpu == smp_processor_id())
goto unlock;
@@ -4291,6 +4841,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
preempt_count_set(PREEMPT_DISABLED);
}
rcu_sleep_check();
+ SCHED_WARN_ON(ct_state() == CONTEXT_USER);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -4331,7 +4882,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/*
* Optimization: we know that if all tasks are in the fair class we can
* call that function directly, but only if the @prev task wasn't of a
- * higher scheduling class, because otherwise those loose the
+ * higher scheduling class, because otherwise those lose the
* opportunity to pull in more work from other CPUs.
*/
if (likely(prev->sched_class <= &fair_sched_class &&
@@ -4417,7 +4968,7 @@ static void __sched notrace __schedule(bool preempt)
schedule_debug(prev, preempt);
- if (sched_feat(HRTICK))
+ if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
hrtick_clear(rq);
local_irq_disable();
@@ -4515,6 +5066,7 @@ static void __sched notrace __schedule(bool preempt)
*/
++*switch_count;
+ migrate_disable_switch(rq, prev);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
trace_sched_switch(preempt, prev, next);
@@ -4523,10 +5075,11 @@ static void __sched notrace __schedule(bool preempt)
rq = context_switch(rq, prev, next, &rf);
} else {
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
- rq_unlock_irq(rq, &rf);
- }
- balance_callback(rq);
+ rq_unpin_lock(rq, &rf);
+ __balance_callbacks(rq);
+ raw_spin_unlock_irq(&rq->lock);
+ }
}
void __noreturn do_task_dead(void)
@@ -4630,7 +5183,7 @@ void __sched schedule_idle(void)
} while (need_resched());
}
-#ifdef CONFIG_CONTEXT_TRACKING
+#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
asmlinkage __visible void __sched schedule_user(void)
{
/*
@@ -4709,6 +5262,12 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
+#endif
+
+
/**
* preempt_schedule_notrace - preempt_schedule called by tracing
*
@@ -4761,8 +5320,197 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
}
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
+#endif
+
#endif /* CONFIG_PREEMPTION */
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+#include <linux/entry-common.h>
+
+/*
+ * SC:cond_resched
+ * SC:might_resched
+ * SC:preempt_schedule
+ * SC:preempt_schedule_notrace
+ * SC:irqentry_exit_cond_resched
+ *
+ *
+ * NONE:
+ * cond_resched <- __cond_resched
+ * might_resched <- RET0
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched <- NOP
+ *
+ * VOLUNTARY:
+ * cond_resched <- __cond_resched
+ * might_resched <- __cond_resched
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched <- NOP
+ *
+ * FULL:
+ * cond_resched <- RET0
+ * might_resched <- RET0
+ * preempt_schedule <- preempt_schedule
+ * preempt_schedule_notrace <- preempt_schedule_notrace
+ * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
+ */
+
+enum {
+ preempt_dynamic_none = 0,
+ preempt_dynamic_voluntary,
+ preempt_dynamic_full,
+};
+
+static int preempt_dynamic_mode = preempt_dynamic_full;
+
+static int sched_dynamic_mode(const char *str)
+{
+ if (!strcmp(str, "none"))
+ return 0;
+
+ if (!strcmp(str, "voluntary"))
+ return 1;
+
+ if (!strcmp(str, "full"))
+ return 2;
+
+ return -1;
+}
+
+static void sched_dynamic_update(int mode)
+{
+ /*
+ * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
+ * the ZERO state, which is invalid.
+ */
+ static_call_update(cond_resched, __cond_resched);
+ static_call_update(might_resched, __cond_resched);
+ static_call_update(preempt_schedule, __preempt_schedule_func);
+ static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
+ static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
+
+ switch (mode) {
+ case preempt_dynamic_none:
+ static_call_update(cond_resched, __cond_resched);
+ static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
+ static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
+ static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
+ static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
+ pr_info("Dynamic Preempt: none\n");
+ break;
+
+ case preempt_dynamic_voluntary:
+ static_call_update(cond_resched, __cond_resched);
+ static_call_update(might_resched, __cond_resched);
+ static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
+ static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
+ static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
+ pr_info("Dynamic Preempt: voluntary\n");
+ break;
+
+ case preempt_dynamic_full:
+ static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0);
+ static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
+ static_call_update(preempt_schedule, __preempt_schedule_func);
+ static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
+ static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
+ pr_info("Dynamic Preempt: full\n");
+ break;
+ }
+
+ preempt_dynamic_mode = mode;
+}
+
+static int __init setup_preempt_mode(char *str)
+{
+ int mode = sched_dynamic_mode(str);
+ if (mode < 0) {
+ pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
+ return 1;
+ }
+
+ sched_dynamic_update(mode);
+ return 0;
+}
+__setup("preempt=", setup_preempt_mode);
+
+#ifdef CONFIG_SCHED_DEBUG
+
+static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[16];
+ int mode;
+
+ if (cnt > 15)
+ cnt = 15;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+ mode = sched_dynamic_mode(strstrip(buf));
+ if (mode < 0)
+ return mode;
+
+ sched_dynamic_update(mode);
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+static int sched_dynamic_show(struct seq_file *m, void *v)
+{
+ static const char * preempt_modes[] = {
+ "none", "voluntary", "full"
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
+ if (preempt_dynamic_mode == i)
+ seq_puts(m, "(");
+ seq_puts(m, preempt_modes[i]);
+ if (preempt_dynamic_mode == i)
+ seq_puts(m, ")");
+
+ seq_puts(m, " ");
+ }
+
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int sched_dynamic_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_dynamic_show, NULL);
+}
+
+static const struct file_operations sched_dynamic_fops = {
+ .open = sched_dynamic_open,
+ .write = sched_dynamic_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init int sched_init_debug_dynamic(void)
+{
+ debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops);
+ return 0;
+}
+late_initcall(sched_init_debug_dynamic);
+
+#endif /* CONFIG_SCHED_DEBUG */
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+
/*
* This is the entry point to schedule() from kernel preemption
* off of irq context.
@@ -4852,7 +5600,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
* right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
* ensure a task is de-boosted (pi_task is set to NULL) before the
* task is allowed to run again (and can exit). This ensures the pointer
- * points to a blocked task -- which guaratees the task is present.
+ * points to a blocked task -- which guarantees the task is present.
*/
p->pi_top_task = pi_task;
@@ -4907,20 +5655,21 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
if (!dl_prio(p->normal_prio) ||
(pi_task && dl_prio(pi_task->prio) &&
dl_entity_preempt(&pi_task->dl, &p->dl))) {
- p->dl.dl_boosted = 1;
+ p->dl.pi_se = pi_task->dl.pi_se;
queue_flag |= ENQUEUE_REPLENISH;
- } else
- p->dl.dl_boosted = 0;
+ } else {
+ p->dl.pi_se = &p->dl;
+ }
p->sched_class = &dl_sched_class;
} else if (rt_prio(prio)) {
if (dl_prio(oldprio))
- p->dl.dl_boosted = 0;
+ p->dl.pi_se = &p->dl;
if (oldprio < prio)
queue_flag |= ENQUEUE_HEAD;
p->sched_class = &rt_sched_class;
} else {
if (dl_prio(oldprio))
- p->dl.dl_boosted = 0;
+ p->dl.pi_se = &p->dl;
if (rt_prio(oldprio))
p->rt.timeout = 0;
p->sched_class = &fair_sched_class;
@@ -4937,9 +5686,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
out_unlock:
/* Avoid rq from going away on us: */
preempt_disable();
- __task_rq_unlock(rq, &rf);
- balance_callback(rq);
+ rq_unpin_lock(rq, &rf);
+ __balance_callbacks(rq);
+ raw_spin_unlock(&rq->lock);
+
preempt_enable();
}
#else
@@ -4968,7 +5719,7 @@ void set_user_nice(struct task_struct *p, long nice)
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
- * it wont have any effect on scheduling until the task is
+ * it won't have any effect on scheduling until the task is
* SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
*/
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
@@ -5057,8 +5808,12 @@ SYSCALL_DEFINE1(nice, int, increment)
* @p: the task in question.
*
* Return: The priority value as seen by users in /proc.
- * RT tasks are offset by -200. Normal tasks are centered
- * around 0, value goes from -16 to +15.
+ *
+ * sched policy return value kernel prio user prio/nice
+ *
+ * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
+ * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
+ * deadline -101 -1 0
*/
int task_prio(const struct task_struct *p)
{
@@ -5117,6 +5872,120 @@ struct task_struct *idle_task(int cpu)
return cpu_rq(cpu)->idle;
}
+#ifdef CONFIG_SMP
+/*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+ *
+ * The scheduler tracks the following metrics:
+ *
+ * cpu_util_{cfs,rt,dl,irq}()
+ * cpu_bw_dl()
+ *
+ * Where the cfs,rt and dl util numbers are tracked with the same metric and
+ * synchronized windows and are thus directly comparable.
+ *
+ * The cfs,rt,dl utilization are the running times measured with rq->clock_task
+ * which excludes things like IRQ and steal-time. These latter are then accrued
+ * in the irq utilization.
+ *
+ * The DL bandwidth number otoh is not a measured metric but a value computed
+ * based on the task model parameters and gives the minimal utilization
+ * required to meet deadlines.
+ */
+unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
+ unsigned long max, enum cpu_util_type type,
+ struct task_struct *p)
+{
+ unsigned long dl_util, util, irq;
+ struct rq *rq = cpu_rq(cpu);
+
+ if (!uclamp_is_used() &&
+ type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
+ return max;
+ }
+
+ /*
+ * Early check to see if IRQ/steal time saturates the CPU, can be
+ * because of inaccuracies in how we track these -- see
+ * update_irq_load_avg().
+ */
+ irq = cpu_util_irq(rq);
+ if (unlikely(irq >= max))
+ return max;
+
+ /*
+ * Because the time spend on RT/DL tasks is visible as 'lost' time to
+ * CFS tasks and we use the same metric to track the effective
+ * utilization (PELT windows are synchronized) we can directly add them
+ * to obtain the CPU's actual utilization.
+ *
+ * CFS and RT utilization can be boosted or capped, depending on
+ * utilization clamp constraints requested by currently RUNNABLE
+ * tasks.
+ * When there are no CFS RUNNABLE tasks, clamps are released and
+ * frequency will be gracefully reduced with the utilization decay.
+ */
+ util = util_cfs + cpu_util_rt(rq);
+ if (type == FREQUENCY_UTIL)
+ util = uclamp_rq_util_with(rq, util, p);
+
+ dl_util = cpu_util_dl(rq);
+
+ /*
+ * For frequency selection we do not make cpu_util_dl() a permanent part
+ * of this sum because we want to use cpu_bw_dl() later on, but we need
+ * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
+ * that we select f_max when there is no idle time.
+ *
+ * NOTE: numerical errors or stop class might cause us to not quite hit
+ * saturation when we should -- something for later.
+ */
+ if (util + dl_util >= max)
+ return max;
+
+ /*
+ * OTOH, for energy computation we need the estimated running time, so
+ * include util_dl and ignore dl_bw.
+ */
+ if (type == ENERGY_UTIL)
+ util += dl_util;
+
+ /*
+ * There is still idle time; further improve the number by using the
+ * irq metric. Because IRQ/steal time is hidden from the task clock we
+ * need to scale the task numbers:
+ *
+ * max - irq
+ * U' = irq + --------- * U
+ * max
+ */
+ util = scale_irq_capacity(util, irq, max);
+ util += irq;
+
+ /*
+ * Bandwidth required by DEADLINE must always be granted while, for
+ * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
+ * to gracefully reduce the frequency when no tasks show up for longer
+ * periods of time.
+ *
+ * Ideally we would like to set bw_dl as min/guaranteed freq and util +
+ * bw_dl as requested freq. However, cpufreq is not yet ready for such
+ * an interface. So, we only do the latter for now.
+ */
+ if (type == FREQUENCY_UTIL)
+ util += cpu_bw_dl(rq);
+
+ return min(max, util);
+}
+
+unsigned long sched_cpu_util(int cpu, unsigned long max)
+{
+ return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max,
+ ENERGY_UTIL, NULL);
+}
+#endif /* CONFIG_SMP */
+
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
@@ -5213,6 +6082,7 @@ static int __sched_setscheduler(struct task_struct *p,
int retval, oldprio, oldpolicy = -1, queued, running;
int new_effective_prio, policy = attr->sched_policy;
const struct sched_class *prev_class;
+ struct callback_head *head;
struct rq_flags rf;
int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
@@ -5237,11 +6107,10 @@ recheck:
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
- * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
+ * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
* SCHED_BATCH and SCHED_IDLE is 0.
*/
- if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
- (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
+ if (attr->sched_priority > MAX_RT_PRIO-1)
return -EINVAL;
if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
(rt_policy(policy) != (attr->sched_priority != 0)))
@@ -5451,6 +6320,7 @@ change:
/* Avoid rq from going away on us: */
preempt_disable();
+ head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, &rf);
if (pi) {
@@ -5459,7 +6329,7 @@ change:
}
/* Run balance callbacks after we've adjusted the PI chain: */
- balance_callback(rq);
+ balance_callbacks(rq, head);
preempt_enable();
return 0;
@@ -5954,7 +6824,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
}
#endif
again:
- retval = __set_cpus_allowed_ptr(p, new_mask, true);
+ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
@@ -6076,14 +6946,6 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
return ret;
}
-/**
- * sys_sched_yield - yield the current processor to other threads.
- *
- * This function yields the current CPU to other tasks. If there are no
- * other threads running on this CPU then this function will return.
- *
- * Return: 0.
- */
static void do_sched_yield(void)
{
struct rq_flags rf;
@@ -6094,34 +6956,48 @@ static void do_sched_yield(void)
schedstat_inc(rq->yld_count);
current->sched_class->yield_task(rq);
- /*
- * Since we are going to call schedule() anyway, there's
- * no need to preempt or enable interrupts:
- */
preempt_disable();
- rq_unlock(rq, &rf);
+ rq_unlock_irq(rq, &rf);
sched_preempt_enable_no_resched();
schedule();
}
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. If there are no
+ * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
+ */
SYSCALL_DEFINE0(sched_yield)
{
do_sched_yield();
return 0;
}
-#ifndef CONFIG_PREEMPTION
-int __sched _cond_resched(void)
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+int __sched __cond_resched(void)
{
if (should_resched(0)) {
preempt_schedule_common();
return 1;
}
+#ifndef CONFIG_PREEMPT_RCU
rcu_all_qs();
+#endif
return 0;
}
-EXPORT_SYMBOL(_cond_resched);
+EXPORT_SYMBOL(__cond_resched);
+#endif
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
+EXPORT_STATIC_CALL_TRAMP(cond_resched);
+
+DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
+EXPORT_STATIC_CALL_TRAMP(might_resched);
#endif
/*
@@ -6159,7 +7035,7 @@ EXPORT_SYMBOL(__cond_resched_lock);
*
* The scheduler is at all times free to pick the calling task as the most
* eligible task to run, if removing the yield() call from your code breaks
- * it, its already broken.
+ * it, it's already broken.
*
* Typical broken usage is:
*
@@ -6312,7 +7188,7 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
- ret = MAX_USER_RT_PRIO-1;
+ ret = MAX_RT_PRIO-1;
break;
case SCHED_DEADLINE:
case SCHED_NORMAL:
@@ -6447,6 +7323,7 @@ void sched_show_task(struct task_struct *p)
(unsigned long)task_thread_info(p)->flags);
print_worker_info(KERN_INFO, p);
+ print_stop_info(KERN_INFO, p);
show_stack(p, NULL, KERN_INFO);
put_task_stack(p);
}
@@ -6532,12 +7409,12 @@ void init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
/*
- * Its possible that init_idle() gets called multiple times on a task,
+ * It's possible that init_idle() gets called multiple times on a task,
* in that case do_set_cpus_allowed() will not do the right thing.
*
* And since this is boot we can forgo the serialization.
*/
- set_cpus_allowed_common(idle, cpumask_of(cpu));
+ set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
#endif
/*
* We're having a chicken and egg problem, even though we are
@@ -6688,119 +7565,139 @@ void idle_task_exit(void)
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
-/*
- * Since this CPU is going 'away' for a while, fold any nr_active delta
- * we might have. Assumes we're called after migrate_tasks() so that the
- * nr_active count is stable. We need to take the teardown thread which
- * is calling this into account, so we hand in adjust = 1 to the load
- * calculation.
- *
- * Also see the comment "Global load-average calculations".
- */
-static void calc_load_migrate(struct rq *rq)
+static int __balance_push_cpu_stop(void *arg)
{
- long delta = calc_load_fold_active(rq, 1);
- if (delta)
- atomic_long_add(delta, &calc_load_tasks);
-}
+ struct task_struct *p = arg;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
+ int cpu;
-static struct task_struct *__pick_migrate_task(struct rq *rq)
-{
- const struct sched_class *class;
- struct task_struct *next;
+ raw_spin_lock_irq(&p->pi_lock);
+ rq_lock(rq, &rf);
- for_each_class(class) {
- next = class->pick_next_task(rq);
- if (next) {
- next->sched_class->put_prev_task(rq, next);
- return next;
- }
+ update_rq_clock(rq);
+
+ if (task_rq(p) == rq && task_on_rq_queued(p)) {
+ cpu = select_fallback_rq(rq->cpu, p);
+ rq = __migrate_task(rq, &rf, p, cpu);
}
- /* The idle class should always have a runnable task */
- BUG();
+ rq_unlock(rq, &rf);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ put_task_struct(p);
+
+ return 0;
}
+static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
+
/*
- * Migrate all tasks from the rq, sleeping tasks will be migrated by
- * try_to_wake_up()->select_task_rq().
- *
- * Called with rq->lock held even though we'er in stop_machine() and
- * there's no concurrency possible, we hold the required locks anyway
- * because of lock validation efforts.
+ * Ensure we only run per-cpu kthreads once the CPU goes !active.
*/
-static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
+static void balance_push(struct rq *rq)
{
- struct rq *rq = dead_rq;
- struct task_struct *next, *stop = rq->stop;
- struct rq_flags orf = *rf;
- int dest_cpu;
+ struct task_struct *push_task = rq->curr;
+ lockdep_assert_held(&rq->lock);
+ SCHED_WARN_ON(rq->cpu != smp_processor_id());
/*
- * Fudge the rq selection such that the below task selection loop
- * doesn't get stuck on the currently eligible stop task.
- *
- * We're currently inside stop_machine() and the rq is either stuck
- * in the stop_machine_cpu_stop() loop, or we're executing this code,
- * either way we should never end up calling schedule() until we're
- * done here.
+ * Ensure the thing is persistent until balance_push_set(.on = false);
*/
- rq->stop = NULL;
+ rq->balance_callback = &balance_push_callback;
/*
- * put_prev_task() and pick_next_task() sched
- * class method both need to have an up-to-date
- * value of rq->clock[_task]
+ * Both the cpu-hotplug and stop task are in this case and are
+ * required to complete the hotplug process.
+ *
+ * XXX: the idle task does not match kthread_is_per_cpu() due to
+ * histerical raisins.
*/
- update_rq_clock(rq);
+ if (rq->idle == push_task ||
+ ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
+ is_migration_disabled(push_task)) {
- for (;;) {
/*
- * There's this thread running, bail when that's the only
- * remaining thread:
- */
- if (rq->nr_running == 1)
- break;
-
- next = __pick_migrate_task(rq);
-
- /*
- * Rules for changing task_struct::cpus_mask are holding
- * both pi_lock and rq->lock, such that holding either
- * stabilizes the mask.
+ * If this is the idle task on the outgoing CPU try to wake
+ * up the hotplug control thread which might wait for the
+ * last task to vanish. The rcuwait_active() check is
+ * accurate here because the waiter is pinned on this CPU
+ * and can't obviously be running in parallel.
*
- * Drop rq->lock is not quite as disastrous as it usually is
- * because !cpu_active at this point, which means load-balance
- * will not interfere. Also, stop-machine.
+ * On RT kernels this also has to check whether there are
+ * pinned and scheduled out tasks on the runqueue. They
+ * need to leave the migrate disabled section first.
*/
- rq_unlock(rq, rf);
- raw_spin_lock(&next->pi_lock);
- rq_relock(rq, rf);
-
- /*
- * Since we're inside stop-machine, _nothing_ should have
- * changed the task, WARN if weird stuff happened, because in
- * that case the above rq->lock drop is a fail too.
- */
- if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
- raw_spin_unlock(&next->pi_lock);
- continue;
+ if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
+ rcuwait_active(&rq->hotplug_wait)) {
+ raw_spin_unlock(&rq->lock);
+ rcuwait_wake_up(&rq->hotplug_wait);
+ raw_spin_lock(&rq->lock);
}
+ return;
+ }
- /* Find suitable destination for @next, with force if needed. */
- dest_cpu = select_fallback_rq(dead_rq->cpu, next);
- rq = __migrate_task(rq, rf, next, dest_cpu);
- if (rq != dead_rq) {
- rq_unlock(rq, rf);
- rq = dead_rq;
- *rf = orf;
- rq_relock(rq, rf);
- }
- raw_spin_unlock(&next->pi_lock);
+ get_task_struct(push_task);
+ /*
+ * Temporarily drop rq->lock such that we can wake-up the stop task.
+ * Both preemption and IRQs are still disabled.
+ */
+ raw_spin_unlock(&rq->lock);
+ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ this_cpu_ptr(&push_work));
+ /*
+ * At this point need_resched() is true and we'll take the loop in
+ * schedule(). The next pick is obviously going to be the stop task
+ * which kthread_is_per_cpu() and will push this task away.
+ */
+ raw_spin_lock(&rq->lock);
+}
+
+static void balance_push_set(int cpu, bool on)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ rq->balance_push = on;
+ if (on) {
+ WARN_ON_ONCE(rq->balance_callback);
+ rq->balance_callback = &balance_push_callback;
+ } else if (rq->balance_callback == &balance_push_callback) {
+ rq->balance_callback = NULL;
}
+ rq_unlock_irqrestore(rq, &rf);
+}
- rq->stop = stop;
+/*
+ * Invoked from a CPUs hotplug control thread after the CPU has been marked
+ * inactive. All tasks which are not per CPU kernel threads are either
+ * pushed off this CPU now via balance_push() or placed on a different CPU
+ * during wakeup. Wait until the CPU is quiescent.
+ */
+static void balance_hotplug_wait(void)
+{
+ struct rq *rq = this_rq();
+
+ rcuwait_wait_event(&rq->hotplug_wait,
+ rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
+ TASK_UNINTERRUPTIBLE);
}
+
+#else
+
+static inline void balance_push(struct rq *rq)
+{
+}
+
+static inline void balance_push_set(int cpu, bool on)
+{
+}
+
+static inline void balance_hotplug_wait(void)
+{
+}
+
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
@@ -6886,6 +7783,12 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
+ /*
+ * Make sure that when the hotplug state machine does a roll-back
+ * we clear balance_push. Ideally that would happen earlier...
+ */
+ balance_push_set(cpu, false);
+
#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
@@ -6921,18 +7824,46 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
int ret;
+ /*
+ * Remove CPU from nohz.idle_cpus_mask to prevent participating in
+ * load balancing when not active
+ */
+ nohz_balance_exit_idle(rq);
+
set_cpu_active(cpu, false);
+
/*
- * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
- * users of this state to go away such that all new such users will
- * observe it.
+ * From this point forward, this CPU will refuse to run any task that
+ * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+ * push those tasks away until this gets cleared, see
+ * sched_cpu_dying().
+ */
+ balance_push_set(cpu, true);
+
+ /*
+ * We've cleared cpu_active_mask / set balance_push, wait for all
+ * preempt-disabled and RCU users of this state to go away such that
+ * all new such users will observe it.
+ *
+ * Specifically, we rely on ttwu to no longer target this CPU, see
+ * ttwu_queue_cond() and is_cpu_allowed().
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
synchronize_rcu();
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ update_rq_clock(rq);
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_offline(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
@@ -6946,6 +7877,7 @@ int sched_cpu_deactivate(unsigned int cpu)
ret = cpuset_cpu_inactive(cpu);
if (ret) {
+ balance_push_set(cpu, false);
set_cpu_active(cpu, true);
return ret;
}
@@ -6969,6 +7901,60 @@ int sched_cpu_starting(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Invoked immediately before the stopper thread is invoked to bring the
+ * CPU down completely. At this point all per CPU kthreads except the
+ * hotplug thread (current) and the stopper thread (inactive) have been
+ * either parked or have been unbound from the outgoing CPU. Ensure that
+ * any of those which might be on the way out are gone.
+ *
+ * If after this point a bound task is being woken on this CPU then the
+ * responsible hotplug callback has failed to do it's job.
+ * sched_cpu_dying() will catch it with the appropriate fireworks.
+ */
+int sched_cpu_wait_empty(unsigned int cpu)
+{
+ balance_hotplug_wait();
+ return 0;
+}
+
+/*
+ * Since this CPU is going 'away' for a while, fold any nr_active delta we
+ * might have. Called from the CPU stopper task after ensuring that the
+ * stopper is the last running task on the CPU, so nr_active count is
+ * stable. We need to take the teardown thread which is calling this into
+ * account, so we hand in adjust = 1 to the load calculation.
+ *
+ * Also see the comment "Global load-average calculations".
+ */
+static void calc_load_migrate(struct rq *rq)
+{
+ long delta = calc_load_fold_active(rq, 1);
+
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+}
+
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+ struct task_struct *g, *p;
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_held(&rq->lock);
+
+ printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+ for_each_process_thread(g, p) {
+ if (task_cpu(p) != cpu)
+ continue;
+
+ if (!task_on_rq_queued(p))
+ continue;
+
+ printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+ }
+}
+
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -6978,17 +7964,20 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
+ if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+ WARN(true, "Dying CPU not properly vacated!");
+ dump_rq_tasks(rq, KERN_WARNING);
}
- migrate_tasks(rq, &rf);
- BUG_ON(rq->nr_running != 1);
rq_unlock_irqrestore(rq, &rf);
+ /*
+ * Now that the CPU is offline, make sure we're welcome
+ * to new tasks once we come back up.
+ */
+ balance_push_set(cpu, false);
+
calc_load_migrate(rq);
update_max_interval();
- nohz_balance_exit_idle(rq);
hrtick_clear(rq);
return 0;
}
@@ -7188,7 +8177,10 @@ void __init sched_init(void)
rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0);
- rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
+ INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ rcuwait_init(&rq->hotplug_wait);
#endif
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
@@ -7327,6 +8319,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset)
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL_GPL(__cant_sleep);
+
+#ifdef CONFIG_SMP
+void __cant_migrate(const char *file, int line)
+{
+ static unsigned long prev_jiffy;
+
+ if (irqs_disabled())
+ return;
+
+ if (is_migration_disabled(current))
+ return;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return;
+
+ if (preempt_count() > 0)
+ return;
+
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
+ pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), is_migration_disabled(current),
+ current->pid, current->comm);
+
+ debug_show_held_locks(current);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL_GPL(__cant_migrate);
+#endif
#endif
#ifdef CONFIG_MAGIC_SYSRQ
@@ -7660,7 +8685,7 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
return -EINVAL;
#endif
/*
- * Serialize against wake_up_new_task() such that if its
+ * Serialize against wake_up_new_task() such that if it's
* running, we're sure to observe its full state.
*/
raw_spin_lock_irq(&task->pi_lock);
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 8cb06c8c7eb1..ceb03d76c0cc 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -120,7 +120,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask &&
- cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
+ cpumask_and(later_mask, cp->free_cpus, &p->cpus_mask)) {
unsigned long cap, max_cap = 0;
int cpu, max_cpu = -1;
@@ -151,7 +151,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
- if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
+ if (cpumask_test_cpu(best_cpu, &p->cpus_mask) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index e254745a82cb..41e498b0008a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -53,6 +53,7 @@ struct sugov_cpu {
unsigned int iowait_boost;
u64 last_update;
+ unsigned long util;
unsigned long bw_dl;
unsigned long max;
@@ -102,7 +103,9 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
- if (sg_policy->next_freq == next_freq)
+ if (sg_policy->need_freq_update)
+ sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
+ else if (sg_policy->next_freq == next_freq)
return false;
sg_policy->next_freq = next_freq;
@@ -164,127 +167,19 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
return sg_policy->next_freq;
- sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
-/*
- * This function computes an effective utilization for the given CPU, to be
- * used for frequency selection given the linear relation: f = u * f_max.
- *
- * The scheduler tracks the following metrics:
- *
- * cpu_util_{cfs,rt,dl,irq}()
- * cpu_bw_dl()
- *
- * Where the cfs,rt and dl util numbers are tracked with the same metric and
- * synchronized windows and are thus directly comparable.
- *
- * The cfs,rt,dl utilization are the running times measured with rq->clock_task
- * which excludes things like IRQ and steal-time. These latter are then accrued
- * in the irq utilization.
- *
- * The DL bandwidth number otoh is not a measured metric but a value computed
- * based on the task model parameters and gives the minimal utilization
- * required to meet deadlines.
- */
-unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
- unsigned long max, enum schedutil_type type,
- struct task_struct *p)
-{
- unsigned long dl_util, util, irq;
- struct rq *rq = cpu_rq(cpu);
-
- if (!uclamp_is_used() &&
- type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
- return max;
- }
-
- /*
- * Early check to see if IRQ/steal time saturates the CPU, can be
- * because of inaccuracies in how we track these -- see
- * update_irq_load_avg().
- */
- irq = cpu_util_irq(rq);
- if (unlikely(irq >= max))
- return max;
-
- /*
- * Because the time spend on RT/DL tasks is visible as 'lost' time to
- * CFS tasks and we use the same metric to track the effective
- * utilization (PELT windows are synchronized) we can directly add them
- * to obtain the CPU's actual utilization.
- *
- * CFS and RT utilization can be boosted or capped, depending on
- * utilization clamp constraints requested by currently RUNNABLE
- * tasks.
- * When there are no CFS RUNNABLE tasks, clamps are released and
- * frequency will be gracefully reduced with the utilization decay.
- */
- util = util_cfs + cpu_util_rt(rq);
- if (type == FREQUENCY_UTIL)
- util = uclamp_rq_util_with(rq, util, p);
-
- dl_util = cpu_util_dl(rq);
-
- /*
- * For frequency selection we do not make cpu_util_dl() a permanent part
- * of this sum because we want to use cpu_bw_dl() later on, but we need
- * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
- * that we select f_max when there is no idle time.
- *
- * NOTE: numerical errors or stop class might cause us to not quite hit
- * saturation when we should -- something for later.
- */
- if (util + dl_util >= max)
- return max;
-
- /*
- * OTOH, for energy computation we need the estimated running time, so
- * include util_dl and ignore dl_bw.
- */
- if (type == ENERGY_UTIL)
- util += dl_util;
-
- /*
- * There is still idle time; further improve the number by using the
- * irq metric. Because IRQ/steal time is hidden from the task clock we
- * need to scale the task numbers:
- *
- * max - irq
- * U' = irq + --------- * U
- * max
- */
- util = scale_irq_capacity(util, irq, max);
- util += irq;
-
- /*
- * Bandwidth required by DEADLINE must always be granted while, for
- * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
- * to gracefully reduce the frequency when no tasks show up for longer
- * periods of time.
- *
- * Ideally we would like to set bw_dl as min/guaranteed freq and util +
- * bw_dl as requested freq. However, cpufreq is not yet ready for such
- * an interface. So, we only do the latter for now.
- */
- if (type == FREQUENCY_UTIL)
- util += cpu_bw_dl(rq);
-
- return min(max, util);
-}
-
-static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+static void sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
- unsigned long util = cpu_util_cfs(rq);
unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->max = max;
sg_cpu->bw_dl = cpu_bw_dl(rq);
-
- return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
+ sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
+ FREQUENCY_UTIL, NULL);
}
/**
@@ -361,8 +256,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* sugov_iowait_apply() - Apply the IO boost to a CPU.
* @sg_cpu: the sugov data for the cpu to boost
* @time: the update time from the caller
- * @util: the utilization to (eventually) boost
- * @max: the maximum value the utilization can be boosted to
*
* A CPU running a task which woken up after an IO operation can have its
* utilization boosted to speed up the completion of those IO operations.
@@ -376,18 +269,17 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* This mechanism is designed to boost high frequently IO waiting tasks, while
* being more conservative on tasks which does sporadic IO operations.
*/
-static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
- unsigned long util, unsigned long max)
+static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
{
unsigned long boost;
/* No boost currently required */
if (!sg_cpu->iowait_boost)
- return util;
+ return;
/* Reset boost if the CPU appears to have been idle enough */
if (sugov_iowait_reset(sg_cpu, time, false))
- return util;
+ return;
if (!sg_cpu->iowait_boost_pending) {
/*
@@ -396,18 +288,19 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
sg_cpu->iowait_boost >>= 1;
if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0;
- return util;
+ return;
}
}
sg_cpu->iowait_boost_pending = false;
/*
- * @util is already in capacity scale; convert iowait_boost
+ * sg_cpu->util is already in capacity scale; convert iowait_boost
* into the same scale so we can compare.
*/
- boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
- return max(boost, util);
+ boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
+ if (sg_cpu->util < boost)
+ sg_cpu->util = boost;
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -433,15 +326,10 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_p
sg_policy->limits_changed = true;
}
-static void sugov_update_single(struct update_util_data *hook, u64 time,
- unsigned int flags)
+static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+ u64 time, unsigned int flags)
{
- struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
- unsigned long util, max;
- unsigned int next_f;
- bool busy;
- unsigned int cached_freq = sg_policy->cached_raw_freq;
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
@@ -449,20 +337,31 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
ignore_dl_rate_limit(sg_cpu, sg_policy);
if (!sugov_should_update_freq(sg_policy, time))
- return;
+ return false;
+
+ sugov_get_util(sg_cpu);
+ sugov_iowait_apply(sg_cpu, time);
- /* Limits may have changed, don't skip frequency update */
- busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
+ return true;
+}
- util = sugov_get_util(sg_cpu);
- max = sg_cpu->max;
- util = sugov_iowait_apply(sg_cpu, time, util, max);
- next_f = get_next_freq(sg_policy, util, max);
+static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
+ unsigned int flags)
+{
+ struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ unsigned int cached_freq = sg_policy->cached_raw_freq;
+ unsigned int next_f;
+
+ if (!sugov_update_single_common(sg_cpu, time, flags))
+ return;
+
+ next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
/*
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq) {
+ if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
next_f = sg_policy->next_freq;
/* Restore cached freq as next_freq has changed */
@@ -483,6 +382,38 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
}
}
+static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
+ unsigned int flags)
+{
+ struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ unsigned long prev_util = sg_cpu->util;
+
+ /*
+ * Fall back to the "frequency" path if frequency invariance is not
+ * supported, because the direct mapping between the utilization and
+ * the performance levels depends on the frequency invariance.
+ */
+ if (!arch_scale_freq_invariant()) {
+ sugov_update_single_freq(hook, time, flags);
+ return;
+ }
+
+ if (!sugov_update_single_common(sg_cpu, time, flags))
+ return;
+
+ /*
+ * Do not reduce the target performance level if the CPU has not been
+ * idle recently, as the reduction is likely to be premature then.
+ */
+ if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
+ sg_cpu->util = prev_util;
+
+ cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
+ map_util_perf(sg_cpu->util), sg_cpu->max);
+
+ sg_cpu->sg_policy->last_freq_update_time = time;
+}
+
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
@@ -494,9 +425,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max;
- j_util = sugov_get_util(j_sg_cpu);
+ sugov_get_util(j_sg_cpu);
+ sugov_iowait_apply(j_sg_cpu, time);
+ j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
- j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
if (j_util * max > j_max * util) {
util = j_util;
@@ -820,6 +752,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
static int sugov_start(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
+ void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
unsigned int cpu;
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
@@ -827,9 +760,10 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->next_freq = 0;
sg_policy->work_in_progress = false;
sg_policy->limits_changed = false;
- sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0;
+ sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
+
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -838,13 +772,17 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_cpu->sg_policy = sg_policy;
}
+ if (policy_is_shared(policy))
+ uu = sugov_update_shared;
+ else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
+ uu = sugov_update_single_perf;
+ else
+ uu = sugov_update_single_freq;
+
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- policy_is_shared(policy) ?
- sugov_update_shared :
- sugov_update_single);
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
}
return 0;
}
@@ -881,7 +819,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
struct cpufreq_governor schedutil_gov = {
.name = "schedutil",
.owner = THIS_MODULE,
- .dynamic_switching = true,
+ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
.init = sugov_init,
.exit = sugov_exit,
.start = sugov_start,
@@ -899,16 +837,9 @@ struct cpufreq_governor *cpufreq_default_governor(void)
cpufreq_governor_init(schedutil_gov);
#ifdef CONFIG_ENERGY_MODEL
-extern bool sched_energy_update;
-extern struct mutex sched_energy_mutex;
-
static void rebuild_sd_workfn(struct work_struct *work)
{
- mutex_lock(&sched_energy_mutex);
- sched_energy_update = true;
- rebuild_sched_domains();
- sched_energy_update = false;
- mutex_unlock(&sched_energy_mutex);
+ rebuild_sched_domains_energy();
}
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 0033731a0797..ec9be789c7e2 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -11,7 +11,7 @@
* This code tracks the priority of each CPU so that global migration
* decisions are easy to calculate. Each CPU can be in a state as follows:
*
- * (INVALID), IDLE, NORMAL, RT1, ... RT99
+ * (INVALID), NORMAL, RT1, ... RT99, HIGHER
*
* going from the lowest priority to the highest. CPUs in the INVALID state
* are not eligible for routing. The system maintains this state with
@@ -19,24 +19,48 @@
* in that class). Therefore a typical application without affinity
* restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
* searches). For tasks with affinity restrictions, the algorithm has a
- * worst case complexity of O(min(102, nr_domcpus)), though the scenario that
+ * worst case complexity of O(min(101, nr_domcpus)), though the scenario that
* yields the worst case search is fairly contrived.
*/
#include "sched.h"
-/* Convert between a 140 based task->prio, and our 102 based cpupri */
+/*
+ * p->rt_priority p->prio newpri cpupri
+ *
+ * -1 -1 (CPUPRI_INVALID)
+ *
+ * 99 0 (CPUPRI_NORMAL)
+ *
+ * 1 98 98 1
+ * ...
+ * 49 50 50 49
+ * 50 49 49 50
+ * ...
+ * 99 0 0 99
+ *
+ * 100 100 (CPUPRI_HIGHER)
+ */
static int convert_prio(int prio)
{
int cpupri;
- if (prio == CPUPRI_INVALID)
- cpupri = CPUPRI_INVALID;
- else if (prio == MAX_PRIO)
- cpupri = CPUPRI_IDLE;
- else if (prio >= MAX_RT_PRIO)
- cpupri = CPUPRI_NORMAL;
- else
- cpupri = MAX_RT_PRIO - prio + 1;
+ switch (prio) {
+ case CPUPRI_INVALID:
+ cpupri = CPUPRI_INVALID; /* -1 */
+ break;
+
+ case 0 ... 98:
+ cpupri = MAX_RT_PRIO-1 - prio; /* 1 ... 99 */
+ break;
+
+ case MAX_RT_PRIO-1:
+ cpupri = CPUPRI_NORMAL; /* 0 */
+ break;
+
+ case MAX_RT_PRIO:
+ cpupri = CPUPRI_HIGHER; /* 100 */
+ break;
+ }
return cpupri;
}
@@ -73,11 +97,11 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
if (skip)
return 0;
- if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
+ if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids)
return 0;
if (lowest_mask) {
- cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
+ cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
/*
* We have to ensure that we have at least one bit
@@ -177,7 +201,7 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
* cpupri_set - update the CPU priority setting
* @cp: The cpupri context
* @cpu: The target CPU
- * @newpri: The priority (INVALID-RT99) to assign to this CPU
+ * @newpri: The priority (INVALID,NORMAL,RT1-RT99,HIGHER) to assign to this CPU
*
* Note: Assumes cpu_rq(cpu)->lock is locked
*
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h
index efbb492bb94c..d6cba0020064 100644
--- a/kernel/sched/cpupri.h
+++ b/kernel/sched/cpupri.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2)
+#define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO+1)
#define CPUPRI_INVALID -1
-#define CPUPRI_IDLE 0
-#define CPUPRI_NORMAL 1
-/* values 2-101 are RT priorities 0-99 */
+#define CPUPRI_NORMAL 0
+/* values 1-99 are for RT1-RT99 priorities */
+#define CPUPRI_HIGHER 100
struct cpupri_vec {
atomic_t count;
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 5a55d2300452..5f611658eeab 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -44,12 +44,13 @@ static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
}
/*
- * Called before incrementing preempt_count on {soft,}irq_enter
+ * Called after incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
-void irqtime_account_irq(struct task_struct *curr)
+void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
{
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
+ unsigned int pc;
s64 delta;
int cpu;
@@ -59,6 +60,7 @@ void irqtime_account_irq(struct task_struct *curr)
cpu = smp_processor_id();
delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
irqtime->irq_start_time += delta;
+ pc = preempt_count() - offset;
/*
* We do not account for softirq time from ksoftirqd here.
@@ -66,12 +68,11 @@ void irqtime_account_irq(struct task_struct *curr)
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
- if (hardirq_count())
+ if (pc & HARDIRQ_MASK)
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
- else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
+ else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
}
-EXPORT_SYMBOL_GPL(irqtime_account_irq);
static u64 irqtime_tick_accounted(u64 maxtime)
{
@@ -418,24 +419,21 @@ void vtime_task_switch(struct task_struct *prev)
}
# endif
-/*
- * Archs that account the whole time spent in the idle task
- * (outside irq) as idle time can rely on this and just implement
- * vtime_account_kernel() and vtime_account_idle(). Archs that
- * have other meaning of the idle time (s390 only includes the
- * time spent by the CPU when it's in low power mode) must override
- * vtime_account().
- */
-#ifndef __ARCH_HAS_VTIME_ACCOUNT
-void vtime_account_irq_enter(struct task_struct *tsk)
+void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
{
- if (!in_interrupt() && is_idle_task(tsk))
+ unsigned int pc = preempt_count() - offset;
+
+ if (pc & HARDIRQ_OFFSET) {
+ vtime_account_hardirq(tsk);
+ } else if (pc & SOFTIRQ_OFFSET) {
+ vtime_account_softirq(tsk);
+ } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
+ is_idle_task(tsk)) {
vtime_account_idle(tsk);
- else
+ } else {
vtime_account_kernel(tsk);
+ }
}
-EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
-#endif /* __ARCH_HAS_VTIME_ACCOUNT */
void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
u64 *ut, u64 *st)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index f232305dcefe..aac3539aa0fe 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -43,6 +43,28 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
+#ifdef CONFIG_RT_MUTEXES
+static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
+{
+ return dl_se->pi_se;
+}
+
+static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
+{
+ return pi_of(dl_se) != dl_se;
+}
+#else
+static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
+{
+ return dl_se;
+}
+
+static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_SMP
static inline struct dl_bw *dl_bw_of(int i)
{
@@ -97,6 +119,17 @@ static inline unsigned long dl_bw_capacity(int i)
return __dl_bw_capacity(i);
}
}
+
+static inline bool dl_bw_visited(int cpu, u64 gen)
+{
+ struct root_domain *rd = cpu_rq(cpu)->rd;
+
+ if (rd->visit_gen == gen)
+ return true;
+
+ rd->visit_gen = gen;
+ return false;
+}
#else
static inline struct dl_bw *dl_bw_of(int i)
{
@@ -112,6 +145,11 @@ static inline unsigned long dl_bw_capacity(int i)
{
return SCHED_CAPACITY_SCALE;
}
+
+static inline bool dl_bw_visited(int cpu, u64 gen)
+{
+ return false;
+}
#endif
static inline
@@ -479,58 +517,44 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
update_dl_migration(dl_rq);
}
+#define __node_2_pdl(node) \
+ rb_entry((node), struct task_struct, pushable_dl_tasks)
+
+static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
+{
+ return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
+}
+
/*
* The list of pushable -deadline task is not a plist, like in
* sched_rt.c, it is an rb-tree with tasks ordered by deadline.
*/
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
- struct dl_rq *dl_rq = &rq->dl;
- struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct task_struct *entry;
- bool leftmost = true;
+ struct rb_node *leftmost;
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct task_struct,
- pushable_dl_tasks);
- if (dl_entity_preempt(&p->dl, &entry->dl))
- link = &parent->rb_left;
- else {
- link = &parent->rb_right;
- leftmost = false;
- }
- }
-
+ leftmost = rb_add_cached(&p->pushable_dl_tasks,
+ &rq->dl.pushable_dl_tasks_root,
+ __pushable_less);
if (leftmost)
- dl_rq->earliest_dl.next = p->dl.deadline;
-
- rb_link_node(&p->pushable_dl_tasks, parent, link);
- rb_insert_color_cached(&p->pushable_dl_tasks,
- &dl_rq->pushable_dl_tasks_root, leftmost);
+ rq->dl.earliest_dl.next = p->dl.deadline;
}
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
+ struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
+ struct rb_node *leftmost;
if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return;
- if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
- struct rb_node *next_node;
-
- next_node = rb_next(&p->pushable_dl_tasks);
- if (next_node) {
- dl_rq->earliest_dl.next = rb_entry(next_node,
- struct task_struct, pushable_dl_tasks)->dl.deadline;
- }
- }
+ leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
+ if (leftmost)
+ dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
- rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
}
@@ -543,7 +567,7 @@ static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
- return dl_task(prev);
+ return rq->online && dl_task(prev);
}
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
@@ -698,7 +722,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
- WARN_ON(dl_se->dl_boosted);
+ WARN_ON(is_dl_boosted(dl_se));
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
@@ -736,21 +760,20 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
* could happen are, typically, a entity voluntarily trying to overcome its
* runtime, or it just underestimated it during sched_setattr().
*/
-static void replenish_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se)
+static void replenish_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
- BUG_ON(pi_se->dl_runtime <= 0);
+ BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
if (dl_se->dl_deadline == 0) {
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
if (dl_se->dl_yielded && dl_se->runtime > 0)
@@ -763,8 +786,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
* arbitrary large.
*/
while (dl_se->runtime <= 0) {
- dl_se->deadline += pi_se->dl_period;
- dl_se->runtime += pi_se->dl_runtime;
+ dl_se->deadline += pi_of(dl_se)->dl_period;
+ dl_se->runtime += pi_of(dl_se)->dl_runtime;
}
/*
@@ -778,8 +801,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
if (dl_se->dl_yielded)
@@ -812,8 +835,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
* task with deadline equal to period this is the same of using
* dl_period instead of dl_deadline in the equation above.
*/
-static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se, u64 t)
+static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
{
u64 left, right;
@@ -835,9 +857,9 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
- left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+ left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
- (pi_se->dl_runtime >> DL_SCALE);
+ (pi_of(dl_se)->dl_runtime >> DL_SCALE);
return dl_time_before(right, left);
}
@@ -922,24 +944,23 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
* Please refer to the comments update_dl_revised_wakeup() function to find
* more about the Revised CBS rule.
*/
-static void update_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se)
+static void update_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
- dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
+ dl_entity_overflow(dl_se, rq_clock(rq))) {
if (unlikely(!dl_is_implicit(dl_se) &&
!dl_time_before(dl_se->deadline, rq_clock(rq)) &&
- !dl_se->dl_boosted)){
+ !is_dl_boosted(dl_se))) {
update_dl_revised_wakeup(dl_se, rq);
return;
}
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
}
@@ -1038,7 +1059,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
* The task might have been boosted by someone else and might be in the
* boosting/deboosting path, its not throttled.
*/
- if (dl_se->dl_boosted)
+ if (is_dl_boosted(dl_se))
goto unlock;
/*
@@ -1066,7 +1087,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
* but do not enqueue -- wait for our wakeup to do that.
*/
if (!task_on_rq_queued(p)) {
- replenish_dl_entity(dl_se, dl_se);
+ replenish_dl_entity(dl_se);
goto unlock;
}
@@ -1156,7 +1177,7 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
- if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
return;
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
@@ -1287,7 +1308,7 @@ throttle:
dl_se->dl_overrun = 1;
__dequeue_task_dl(rq, curr, 0);
- if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
+ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl))
@@ -1378,6 +1399,8 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
if (dl_rq->earliest_dl.curr == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
+ if (dl_rq->earliest_dl.curr == 0)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
dl_rq->earliest_dl.curr = deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
}
@@ -1395,6 +1418,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0;
cpudl_clear(&rq->rd->cpudl, rq->cpu);
+ cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
} else {
struct rb_node *leftmost = dl_rq->root.rb_leftmost;
struct sched_dl_entity *entry;
@@ -1440,29 +1464,21 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dec_dl_migration(dl_se, dl_rq);
}
+#define __node_2_dle(node) \
+ rb_entry((node), struct sched_dl_entity, rb_node)
+
+static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
+{
+ return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
+}
+
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rb_node **link = &dl_rq->root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct sched_dl_entity *entry;
- int leftmost = 1;
BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct sched_dl_entity, rb_node);
- if (dl_time_before(dl_se->deadline, entry->deadline))
- link = &parent->rb_left;
- else {
- link = &parent->rb_right;
- leftmost = 0;
- }
- }
-
- rb_link_node(&dl_se->rb_node, parent, link);
- rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
+ rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
inc_dl_tasks(dl_se, dl_rq);
}
@@ -1475,14 +1491,14 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
return;
rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
+
RB_CLEAR_NODE(&dl_se->rb_node);
dec_dl_tasks(dl_se, dl_rq);
}
static void
-enqueue_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se, int flags)
+enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
{
BUG_ON(on_dl_rq(dl_se));
@@ -1493,9 +1509,9 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
*/
if (flags & ENQUEUE_WAKEUP) {
task_contending(dl_se, flags);
- update_dl_entity(dl_se, pi_se);
+ update_dl_entity(dl_se);
} else if (flags & ENQUEUE_REPLENISH) {
- replenish_dl_entity(dl_se, pi_se);
+ replenish_dl_entity(dl_se);
} else if ((flags & ENQUEUE_RESTORE) &&
dl_time_before(dl_se->deadline,
rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
@@ -1512,19 +1528,7 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
- struct task_struct *pi_task = rt_mutex_get_top_task(p);
- struct sched_dl_entity *pi_se = &p->dl;
-
- /*
- * Use the scheduling parameters of the top pi-waiter task if:
- * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
- * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
- * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
- * boosted due to a SCHED_DEADLINE pi-waiter).
- * Otherwise we keep our runtime and deadline.
- */
- if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
- pi_se = &pi_task->dl;
+ if (is_dl_boosted(&p->dl)) {
/*
* Because of delays in the detection of the overrun of a
* thread's runtime, it might be the case that a thread
@@ -1557,7 +1561,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* the throttle.
*/
p->dl.dl_throttled = 0;
- BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
+ BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
return;
}
@@ -1594,7 +1598,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
return;
}
- enqueue_dl_entity(&p->dl, pi_se, flags);
+ enqueue_dl_entity(&p->dl, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
@@ -1664,13 +1668,13 @@ static void yield_task_dl(struct rq *rq)
static int find_later_rq(struct task_struct *task);
static int
-select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_dl(struct task_struct *p, int cpu, int flags)
{
struct task_struct *curr;
bool select_rq;
struct rq *rq;
- if (sd_flag != SD_BALANCE_WAKE)
+ if (!(flags & WF_TTWU))
goto out;
rq = cpu_rq(cpu);
@@ -1828,7 +1832,7 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
if (!first)
return;
- if (hrtick_enabled(rq))
+ if (hrtick_enabled_dl(rq))
start_hrtick_dl(rq, p);
if (rq->curr->sched_class != &dl_sched_class)
@@ -1891,7 +1895,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
* not being the leftmost task anymore. In that case NEED_RESCHED will
* be set and schedule() will start a new hrtick for the next task.
*/
- if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
+ if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
is_leftmost(p, &rq->dl))
start_hrtick_dl(rq, p);
}
@@ -1912,7 +1916,7 @@ static void task_fork_dl(struct task_struct *p)
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
- cpumask_test_cpu(cpu, p->cpus_ptr))
+ cpumask_test_cpu(cpu, &p->cpus_mask))
return 1;
return 0;
}
@@ -2002,8 +2006,8 @@ static int find_later_rq(struct task_struct *task)
return this_cpu;
}
- best_cpu = cpumask_first_and(later_mask,
- sched_domain_span(sd));
+ best_cpu = cpumask_any_and_distribute(later_mask,
+ sched_domain_span(sd));
/*
* Last chance: if a CPU being in both later_mask
* and current sd span is valid, that becomes our
@@ -2025,7 +2029,7 @@ static int find_later_rq(struct task_struct *task)
if (this_cpu != -1)
return this_cpu;
- cpu = cpumask_any(later_mask);
+ cpu = cpumask_any_distribute(later_mask);
if (cpu < nr_cpu_ids)
return cpu;
@@ -2062,7 +2066,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
+ !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
task_running(rq, task) ||
!dl_task(task) ||
!task_on_rq_queued(task))) {
@@ -2129,6 +2133,9 @@ static int push_dl_task(struct rq *rq)
return 0;
retry:
+ if (is_migration_disabled(next_task))
+ return 0;
+
if (WARN_ON(next_task == rq->curr))
return 0;
@@ -2206,7 +2213,7 @@ static void push_dl_tasks(struct rq *rq)
static void pull_dl_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
- struct task_struct *p;
+ struct task_struct *p, *push_task;
bool resched = false;
struct rq *src_rq;
u64 dmin = LONG_MAX;
@@ -2236,6 +2243,7 @@ static void pull_dl_task(struct rq *this_rq)
continue;
/* Might drop this_rq->lock */
+ push_task = NULL;
double_lock_balance(this_rq, src_rq);
/*
@@ -2267,17 +2275,27 @@ static void pull_dl_task(struct rq *this_rq)
src_rq->curr->dl.deadline))
goto skip;
- resched = true;
-
- deactivate_task(src_rq, p, 0);
- set_task_cpu(p, this_cpu);
- activate_task(this_rq, p, 0);
- dmin = p->dl.deadline;
+ if (is_migration_disabled(p)) {
+ push_task = get_push_task(src_rq);
+ } else {
+ deactivate_task(src_rq, p, 0);
+ set_task_cpu(p, this_cpu);
+ activate_task(this_rq, p, 0);
+ dmin = p->dl.deadline;
+ resched = true;
+ }
/* Is there any other task even earlier? */
}
skip:
double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
+ raw_spin_unlock(&this_rq->lock);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
+ raw_spin_lock(&this_rq->lock);
+ }
}
if (resched)
@@ -2301,7 +2319,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
}
static void set_cpus_allowed_dl(struct task_struct *p,
- const struct cpumask *new_mask)
+ const struct cpumask *new_mask,
+ u32 flags)
{
struct root_domain *src_rd;
struct rq *rq;
@@ -2330,7 +2349,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
raw_spin_unlock(&src_dl_b->lock);
}
- set_cpus_allowed_common(p, new_mask);
+ set_cpus_allowed_common(p, new_mask, flags);
}
/* Assumes rq->lock is held */
@@ -2369,9 +2388,13 @@ void dl_add_task_root_domain(struct task_struct *p)
struct rq *rq;
struct dl_bw *dl_b;
- rq = task_rq_lock(p, &rf);
- if (!dl_task(p))
- goto unlock;
+ raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
+ if (!dl_task(p)) {
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
+ return;
+ }
+
+ rq = __task_rq_lock(p, &rf);
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
@@ -2380,7 +2403,6 @@ void dl_add_task_root_domain(struct task_struct *p)
raw_spin_unlock(&dl_b->lock);
-unlock:
task_rq_unlock(rq, p, &rf);
}
@@ -2474,7 +2496,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio)
{
- if (task_on_rq_queued(p) || rq->curr == p) {
+ if (task_on_rq_queued(p) || task_current(rq, p)) {
#ifdef CONFIG_SMP
/*
* This might be too much, but unfortunately
@@ -2503,8 +2525,8 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
}
}
-const struct sched_class dl_sched_class
- __section("__dl_sched_class") = {
+DEFINE_SCHED_CLASS(dl) = {
+
.enqueue_task = enqueue_task_dl,
.dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl,
@@ -2523,6 +2545,7 @@ const struct sched_class dl_sched_class
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,
+ .find_lock_rq = find_lock_later_rq,
#endif
.task_tick = task_tick_dl,
@@ -2535,33 +2558,39 @@ const struct sched_class dl_sched_class
.update_curr = update_curr_dl,
};
+/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
+static u64 dl_generation;
+
int sched_dl_global_validate(void)
{
u64 runtime = global_rt_runtime();
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
+ u64 gen = ++dl_generation;
struct dl_bw *dl_b;
- int cpu, ret = 0;
+ int cpu, cpus, ret = 0;
unsigned long flags;
/*
* Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in
* any of the root_domains.
- *
- * FIXME: Cycling on all the CPUs is overdoing, but simpler than
- * cycling on root_domains... Discussion on different/better
- * solutions is welcome!
*/
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
+
+ if (dl_bw_visited(cpu, gen))
+ goto next;
+
dl_b = dl_bw_of(cpu);
+ cpus = dl_bw_cpus(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
- if (new_bw < dl_b->total_bw)
+ if (new_bw * cpus < dl_b->total_bw)
ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+next:
rcu_read_unlock_sched();
if (ret)
@@ -2587,6 +2616,7 @@ static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
void sched_dl_do_global(void)
{
u64 new_bw = -1;
+ u64 gen = ++dl_generation;
struct dl_bw *dl_b;
int cpu;
unsigned long flags;
@@ -2597,11 +2627,14 @@ void sched_dl_do_global(void)
if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
- /*
- * FIXME: As above...
- */
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
+
+ if (dl_bw_visited(cpu, gen)) {
+ rcu_read_unlock_sched();
+ continue;
+ }
+
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
@@ -2787,11 +2820,14 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_bw = 0;
dl_se->dl_density = 0;
- dl_se->dl_boosted = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0;
+
+#ifdef CONFIG_RT_MUTEXES
+ dl_se->pi_se = dl_se;
+#endif
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 0655524700d2..486f403a778b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -251,7 +251,7 @@ static int sd_ctl_doflags(struct ctl_table *table, int write,
unsigned long flags = *(unsigned long *)table->data;
size_t data_size = 0;
size_t len = 0;
- char *tmp;
+ char *tmp, *buf;
int idx;
if (write)
@@ -269,17 +269,17 @@ static int sd_ctl_doflags(struct ctl_table *table, int write,
return 0;
}
- tmp = kcalloc(data_size + 1, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
+ buf = kcalloc(data_size + 1, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
char *name = sd_flag_debug[idx].name;
- len += snprintf(tmp + len, strlen(name) + 2, "%s ", name);
+ len += snprintf(buf + len, strlen(name) + 2, "%s ", name);
}
- tmp += *ppos;
+ tmp = buf + *ppos;
len -= *ppos;
if (len > *lenp)
@@ -294,7 +294,7 @@ static int sd_ctl_doflags(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
- kfree(tmp);
+ kfree(buf);
return 0;
}
@@ -486,7 +486,7 @@ static char *task_group_path(struct task_group *tg)
static void
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{
- if (rq->curr == p)
+ if (task_current(rq, p))
SEQ_printf(m, ">R");
else
SEQ_printf(m, " %c", task_state_to_char(p));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 290f9e38378c..8a8bd7b13634 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -531,12 +531,15 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}
-static inline int entity_before(struct sched_entity *a,
+static inline bool entity_before(struct sched_entity *a,
struct sched_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}
+#define __node_2_se(node) \
+ rb_entry((node), struct sched_entity, run_node)
+
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
@@ -552,8 +555,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
}
if (leftmost) { /* non-empty tree */
- struct sched_entity *se;
- se = rb_entry(leftmost, struct sched_entity, run_node);
+ struct sched_entity *se = __node_2_se(leftmost);
if (!curr)
vruntime = se->vruntime;
@@ -569,37 +571,17 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
#endif
}
+static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+{
+ return entity_before(__node_2_se(a), __node_2_se(b));
+}
+
/*
* Enqueue an entity into the rb-tree:
*/
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct sched_entity *entry;
- bool leftmost = true;
-
- /*
- * Find the right place in the rbtree:
- */
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct sched_entity, run_node);
- /*
- * We dont care about collisions. Nodes with
- * the same key stay together.
- */
- if (entity_before(se, entry)) {
- link = &parent->rb_left;
- } else {
- link = &parent->rb_right;
- leftmost = false;
- }
- }
-
- rb_link_node(&se->run_node, parent, link);
- rb_insert_color_cached(&se->run_node,
- &cfs_rq->tasks_timeline, leftmost);
+ rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
}
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -614,7 +596,7 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
if (!left)
return NULL;
- return rb_entry(left, struct sched_entity, run_node);
+ return __node_2_se(left);
}
static struct sched_entity *__pick_next_entity(struct sched_entity *se)
@@ -624,7 +606,7 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
if (!next)
return NULL;
- return rb_entry(next, struct sched_entity, run_node);
+ return __node_2_se(next);
}
#ifdef CONFIG_SCHED_DEBUG
@@ -635,7 +617,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
if (!last)
return NULL;
- return rb_entry(last, struct sched_entity, run_node);
+ return __node_2_se(last);
}
/**************************************************************
@@ -906,6 +888,15 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (!schedstat_enabled())
return;
+ /*
+ * When the sched_schedstat changes from 0 to 1, some sched se
+ * maybe already in the runqueue, the se->statistics.wait_start
+ * will be 0.So it will let the delta wrong. We need to avoid this
+ * scenario.
+ */
+ if (unlikely(!schedstat_val(se->statistics.wait_start)))
+ return;
+
delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
if (entity_is_task(se)) {
@@ -1550,7 +1541,8 @@ struct task_numa_env {
static unsigned long cpu_load(struct rq *rq);
static unsigned long cpu_runnable(struct rq *rq);
static unsigned long cpu_util(int cpu);
-static inline long adjust_numa_imbalance(int imbalance, int nr_running);
+static inline long adjust_numa_imbalance(int imbalance,
+ int dst_running, int dst_weight);
static inline enum
numa_type numa_classify(unsigned int imbalance_pct,
@@ -1930,7 +1922,8 @@ static void task_numa_find_cpu(struct task_numa_env *env,
src_running = env->src_stats.nr_running - 1;
dst_running = env->dst_stats.nr_running + 1;
imbalance = max(0, dst_running - src_running);
- imbalance = adjust_numa_imbalance(imbalance, dst_running);
+ imbalance = adjust_numa_imbalance(imbalance, dst_running,
+ env->dst_stats.weight);
/* Use idle CPU if there is no imbalance */
if (!imbalance) {
@@ -3932,6 +3925,22 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
trace_sched_util_est_cfs_tp(cfs_rq);
}
+static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
+ struct task_struct *p)
+{
+ unsigned int enqueued;
+
+ if (!sched_feat(UTIL_EST))
+ return;
+
+ /* Update root cfs_rq's estimated utilization */
+ enqueued = cfs_rq->avg.util_est.enqueued;
+ enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
+ WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+ trace_sched_util_est_cfs_tp(cfs_rq);
+}
+
/*
* Check if a (signed) value is within a specified (unsigned) margin,
* based on the observation that:
@@ -3945,23 +3954,16 @@ static inline bool within_margin(int value, int margin)
return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
}
-static void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
+static inline void util_est_update(struct cfs_rq *cfs_rq,
+ struct task_struct *p,
+ bool task_sleep)
{
long last_ewma_diff;
struct util_est ue;
- int cpu;
if (!sched_feat(UTIL_EST))
return;
- /* Update root cfs_rq's estimated utilization */
- ue.enqueued = cfs_rq->avg.util_est.enqueued;
- ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
- WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
-
- trace_sched_util_est_cfs_tp(cfs_rq);
-
/*
* Skip update of task's estimated utilization when the task has not
* yet completed an activation, e.g. being migrated.
@@ -4001,8 +4003,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
* To avoid overestimation of actual task utilization, skip updates if
* we cannot grant there is idle time in this CPU.
*/
- cpu = cpu_of(rq_of(cfs_rq));
- if (task_util(p) > capacity_orig_of(cpu))
+ if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
return;
/*
@@ -4041,7 +4042,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
if (!static_branch_unlikely(&sched_asym_cpucapacity))
return;
- if (!p) {
+ if (!p || p->nr_cpus_allowed == 1) {
rq->misfit_task_load = 0;
return;
}
@@ -4085,8 +4086,11 @@ static inline void
util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
static inline void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
- bool task_sleep) {}
+util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+static inline void
+util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
+ bool task_sleep) {}
static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
#endif /* CONFIG_SMP */
@@ -4779,25 +4783,37 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
/* throttled entity or throttle-on-deactivate */
if (!se->on_rq)
- break;
+ goto done;
- if (dequeue) {
- dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
- } else {
- update_load_avg(qcfs_rq, se, 0);
- se_update_runnable(se);
- }
+ dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
- if (qcfs_rq->load.weight)
- dequeue = 0;
+ if (qcfs_rq->load.weight) {
+ /* Avoid re-evaluating load for this entity: */
+ se = parent_entity(se);
+ break;
+ }
}
- if (!se)
- sub_nr_running(rq, task_delta);
+ for_each_sched_entity(se) {
+ struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+ /* throttled entity or throttle-on-deactivate */
+ if (!se->on_rq)
+ goto done;
+
+ update_load_avg(qcfs_rq, se, 0);
+ se_update_runnable(se);
+
+ qcfs_rq->h_nr_running -= task_delta;
+ qcfs_rq->idle_h_nr_running -= idle_task_delta;
+ }
+
+ /* At this point se is NULL and we are at root level*/
+ sub_nr_running(rq, task_delta);
+done:
/*
* Note: distribution will already see us throttled via the
* throttled-list. rq->lock protects completion.
@@ -5105,9 +5121,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
return;
distribute_cfs_runtime(cfs_b);
-
- raw_spin_lock_irqsave(&cfs_b->lock, flags);
- raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
}
/*
@@ -5399,7 +5412,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
s64 delta = slice - ran;
if (delta < 0) {
- if (rq->curr == p)
+ if (task_current(rq, p))
resched_curr(rq);
return;
}
@@ -5416,7 +5429,7 @@ static void hrtick_update(struct rq *rq)
{
struct task_struct *curr = rq->curr;
- if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
+ if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
return;
if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
@@ -5477,6 +5490,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p);
+ int task_new = !(flags & ENQUEUE_WAKEUP);
/*
* The code below (indirectly) updates schedutil which looks at
@@ -5549,7 +5563,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* into account, but that is not straightforward to implement,
* and the following generally works well enough in practice.
*/
- if (flags & ENQUEUE_WAKEUP)
+ if (!task_new)
update_overutilized_status(rq);
enqueue_throttle:
@@ -5588,6 +5602,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
int idle_h_nr_running = task_has_idle_policy(p);
bool was_sched_idle = sched_idle_rq(rq);
+ util_est_dequeue(&rq->cfs, p);
+
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
@@ -5638,7 +5654,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
rq->next_balance = jiffies;
dequeue_throttle:
- util_est_dequeue(&rq->cfs, p, task_sleep);
+ util_est_update(&rq->cfs, p, task_sleep);
hrtick_update(rq);
}
@@ -5804,6 +5820,9 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync)
if (sync && cpu_rq(this_cpu)->nr_running == 1)
return this_cpu;
+ if (available_idle_cpu(prev_cpu))
+ return prev_cpu;
+
return nr_cpumask_bits;
}
@@ -5982,6 +6001,14 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
return new_cpu;
}
+static inline int __select_idle_cpu(int cpu)
+{
+ if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
+ return cpu;
+
+ return -1;
+}
+
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
EXPORT_SYMBOL_GPL(sched_smt_present);
@@ -6040,73 +6067,51 @@ unlock:
* there are no idle cores left in the system; tracked through
* sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
*/
-static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
+static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
- struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
- int core, cpu;
+ bool idle = true;
+ int cpu;
if (!static_branch_likely(&sched_smt_present))
- return -1;
-
- if (!test_idle_cores(target, false))
- return -1;
+ return __select_idle_cpu(core);
- cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
-
- for_each_cpu_wrap(core, cpus, target) {
- bool idle = true;
-
- for_each_cpu(cpu, cpu_smt_mask(core)) {
- if (!available_idle_cpu(cpu)) {
- idle = false;
- break;
+ for_each_cpu(cpu, cpu_smt_mask(core)) {
+ if (!available_idle_cpu(cpu)) {
+ idle = false;
+ if (*idle_cpu == -1) {
+ if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
+ *idle_cpu = cpu;
+ break;
+ }
+ continue;
}
+ break;
}
- cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
-
- if (idle)
- return core;
+ if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
+ *idle_cpu = cpu;
}
- /*
- * Failed to find an idle core; stop looking for one.
- */
- set_idle_cores(target, 0);
+ if (idle)
+ return core;
+ cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
return -1;
}
-/*
- * Scan the local SMT mask for idle CPUs.
- */
-static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
-{
- int cpu;
-
- if (!static_branch_likely(&sched_smt_present))
- return -1;
-
- for_each_cpu(cpu, cpu_smt_mask(target)) {
- if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
- !cpumask_test_cpu(cpu, sched_domain_span(sd)))
- continue;
- if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
- return cpu;
- }
+#else /* CONFIG_SCHED_SMT */
- return -1;
+static inline void set_idle_cores(int cpu, int val)
+{
}
-#else /* CONFIG_SCHED_SMT */
-
-static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
+static inline bool test_idle_cores(int cpu, bool def)
{
- return -1;
+ return def;
}
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
- return -1;
+ return __select_idle_cpu(core);
}
#endif /* CONFIG_SCHED_SMT */
@@ -6119,49 +6124,61 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
+ int i, cpu, idle_cpu = -1, nr = INT_MAX;
+ bool smt = test_idle_cores(target, false);
+ int this = smp_processor_id();
struct sched_domain *this_sd;
- u64 avg_cost, avg_idle;
u64 time;
- int this = smp_processor_id();
- int cpu, nr = INT_MAX;
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
if (!this_sd)
return -1;
- /*
- * Due to large variance we need a large fuzz factor; hackbench in
- * particularly is sensitive here.
- */
- avg_idle = this_rq()->avg_idle / 512;
- avg_cost = this_sd->avg_scan_cost + 1;
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
- if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
- return -1;
+ if (sched_feat(SIS_PROP) && !smt) {
+ u64 avg_cost, avg_idle, span_avg;
+
+ /*
+ * Due to large variance we need a large fuzz factor;
+ * hackbench in particularly is sensitive here.
+ */
+ avg_idle = this_rq()->avg_idle / 512;
+ avg_cost = this_sd->avg_scan_cost + 1;
- if (sched_feat(SIS_PROP)) {
- u64 span_avg = sd->span_weight * avg_idle;
+ span_avg = sd->span_weight * avg_idle;
if (span_avg > 4*avg_cost)
nr = div_u64(span_avg, avg_cost);
else
nr = 4;
- }
-
- time = cpu_clock(this);
- cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+ time = cpu_clock(this);
+ }
for_each_cpu_wrap(cpu, cpus, target) {
- if (!--nr)
- return -1;
- if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
- break;
+ if (smt) {
+ i = select_idle_core(p, cpu, cpus, &idle_cpu);
+ if ((unsigned int)i < nr_cpumask_bits)
+ return i;
+
+ } else {
+ if (!--nr)
+ return -1;
+ idle_cpu = __select_idle_cpu(cpu);
+ if ((unsigned int)idle_cpu < nr_cpumask_bits)
+ break;
+ }
}
- time = cpu_clock(this) - time;
- update_avg(&this_sd->avg_scan_cost, time);
+ if (smt)
+ set_idle_cores(this, false);
+
+ if (sched_feat(SIS_PROP) && !smt) {
+ time = cpu_clock(this) - time;
+ update_avg(&this_sd->avg_scan_cost, time);
+ }
- return cpu;
+ return idle_cpu;
}
/*
@@ -6172,21 +6189,21 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
static int
select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
{
- unsigned long best_cap = 0;
+ unsigned long task_util, best_cap = 0;
int cpu, best_cpu = -1;
struct cpumask *cpus;
- sync_entity_load_avg(&p->se);
-
cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+ task_util = uclamp_task_util(p);
+
for_each_cpu_wrap(cpu, cpus, target) {
unsigned long cpu_cap = capacity_of(cpu);
if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
continue;
- if (task_fits_capacity(p, cpu_cap))
+ if (fits_capacity(task_util, cpu_cap))
return cpu;
if (cpu_cap > best_cap) {
@@ -6198,44 +6215,42 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
return best_cpu;
}
+static inline bool asym_fits_capacity(int task_util, int cpu)
+{
+ if (static_branch_unlikely(&sched_asym_cpucapacity))
+ return fits_capacity(task_util, capacity_of(cpu));
+
+ return true;
+}
+
/*
* Try and locate an idle core/thread in the LLC cache domain.
*/
static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
+ unsigned long task_util;
int i, recent_used_cpu;
/*
- * For asymmetric CPU capacity systems, our domain of interest is
- * sd_asym_cpucapacity rather than sd_llc.
+ * On asymmetric system, update task utilization because we will check
+ * that the task fits with cpu's capacity.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
- sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
- /*
- * On an asymmetric CPU capacity system where an exclusive
- * cpuset defines a symmetric island (i.e. one unique
- * capacity_orig value through the cpuset), the key will be set
- * but the CPUs within that cpuset will not have a domain with
- * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
- * capacity path.
- */
- if (!sd)
- goto symmetric;
-
- i = select_idle_capacity(p, sd, target);
- return ((unsigned)i < nr_cpumask_bits) ? i : target;
+ sync_entity_load_avg(&p->se);
+ task_util = uclamp_task_util(p);
}
-symmetric:
- if (available_idle_cpu(target) || sched_idle_cpu(target))
+ if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
+ asym_fits_capacity(task_util, target))
return target;
/*
* If the previous CPU is cache affine and idle, don't be stupid:
*/
if (prev != target && cpus_share_cache(prev, target) &&
- (available_idle_cpu(prev) || sched_idle_cpu(prev)))
+ (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
+ asym_fits_capacity(task_util, prev))
return prev;
/*
@@ -6258,7 +6273,8 @@ symmetric:
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
(available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
- cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
+ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
+ asym_fits_capacity(task_util, recent_used_cpu)) {
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
@@ -6267,27 +6283,39 @@ symmetric:
return recent_used_cpu;
}
+ /*
+ * For asymmetric CPU capacity systems, our domain of interest is
+ * sd_asym_cpucapacity rather than sd_llc.
+ */
+ if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+ sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
+ /*
+ * On an asymmetric CPU capacity system where an exclusive
+ * cpuset defines a symmetric island (i.e. one unique
+ * capacity_orig value through the cpuset), the key will be set
+ * but the CPUs within that cpuset will not have a domain with
+ * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
+ * capacity path.
+ */
+ if (sd) {
+ i = select_idle_capacity(p, sd, target);
+ return ((unsigned)i < nr_cpumask_bits) ? i : target;
+ }
+ }
+
sd = rcu_dereference(per_cpu(sd_llc, target));
if (!sd)
return target;
- i = select_idle_core(p, sd, target);
- if ((unsigned)i < nr_cpumask_bits)
- return i;
-
i = select_idle_cpu(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
- i = select_idle_smt(p, sd, target);
- if ((unsigned)i < nr_cpumask_bits)
- return i;
-
return target;
}
/**
- * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
+ * cpu_util - Estimates the amount of capacity of a CPU used by CFS tasks.
* @cpu: the CPU to get the utilization of
*
* The unit of the return value must be the one of capacity so we can compare
@@ -6499,7 +6527,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
* is already enough to scale the EM reported power
* consumption at the (eventually clamped) cpu_capacity.
*/
- sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap,
+ sum_util += effective_cpu_util(cpu, util_cfs, cpu_cap,
ENERGY_UTIL, NULL);
/*
@@ -6509,7 +6537,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
* NOTE: in case RT tasks are running, by default the
* FREQUENCY_UTIL's utilization can be max OPP.
*/
- cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap,
+ cpu_util = effective_cpu_util(cpu, util_cfs, cpu_cap,
FREQUENCY_UTIL, tsk);
max_util = max(max_util, cpu_util);
}
@@ -6607,7 +6635,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
* IOW, placing the task there would make the CPU
* overutilized. Take uclamp into account to see how
* much capacity we can get out of the CPU; this is
- * aligned with schedutil_cpu_util().
+ * aligned with sched_cpu_util().
*/
util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
if (!fits_capacity(util, cpu_cap))
@@ -6663,7 +6691,7 @@ fail:
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
- * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
+ * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
* SD_BALANCE_FORK, or SD_BALANCE_EXEC.
*
* Balances load by selecting the idlest CPU in the idlest group, or under
@@ -6674,15 +6702,17 @@ fail:
* preempt must be disabled.
*/
static int
-select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
+select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
{
+ int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
struct sched_domain *tmp, *sd = NULL;
int cpu = smp_processor_id();
int new_cpu = prev_cpu;
int want_affine = 0;
- int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
+ /* SD_flags and WF_flags share the first nibble */
+ int sd_flag = wake_flags & 0xF;
- if (sd_flag & SD_BALANCE_WAKE) {
+ if (wake_flags & WF_TTWU) {
record_wakee(p);
if (sched_energy_enabled()) {
@@ -6719,9 +6749,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (unlikely(sd)) {
/* Slow path */
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
- } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
+ } else if (wake_flags & WF_TTWU) { /* XXX always ? */
/* Fast path */
-
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
if (want_affine)
@@ -7087,7 +7116,7 @@ done: __maybe_unused;
list_move(&p->se.group_node, &rq->cfs_tasks);
#endif
- if (hrtick_enabled(rq))
+ if (hrtick_enabled_fair(rq))
hrtick_start_fair(rq, p);
update_misfit_status(p, rq);
@@ -8738,6 +8767,16 @@ static bool update_pick_idlest(struct sched_group *idlest,
}
/*
+ * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain.
+ * This is an approximation as the number of running tasks may not be
+ * related to the number of busy CPUs due to sched_setaffinity.
+ */
+static inline bool allow_numa_imbalance(int dst_running, int dst_weight)
+{
+ return (dst_running < (dst_weight >> 2));
+}
+
+/*
* find_idlest_group() finds and returns the least busy CPU group within the
* domain.
*
@@ -8755,9 +8794,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
.group_type = group_overloaded,
};
- imbalance = scale_load_down(NICE_0_LOAD) *
- (sd->imbalance_pct-100) / 100;
-
do {
int local_group;
@@ -8811,6 +8847,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
switch (local_sgs.group_type) {
case group_overloaded:
case group_fully_busy:
+
+ /* Calculate allowed imbalance based on load */
+ imbalance = scale_load_down(NICE_0_LOAD) *
+ (sd->imbalance_pct-100) / 100;
+
/*
* When comparing groups across NUMA domains, it's possible for
* the local domain to be very lightly loaded relative to the
@@ -8867,7 +8908,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
* a real need of migration, periodic load balance will
* take care of it.
*/
- if (local_sgs.idle_cpus)
+ if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight))
return NULL;
}
@@ -8969,16 +9010,19 @@ next_group:
}
}
-static inline long adjust_numa_imbalance(int imbalance, int nr_running)
+#define NUMA_IMBALANCE_MIN 2
+
+static inline long adjust_numa_imbalance(int imbalance,
+ int dst_running, int dst_weight)
{
- unsigned int imbalance_min;
+ if (!allow_numa_imbalance(dst_running, dst_weight))
+ return imbalance;
/*
* Allow a small imbalance based on a simple pair of communicating
- * tasks that remain local when the source domain is almost idle.
+ * tasks that remain local when the destination is lightly loaded.
*/
- imbalance_min = 2;
- if (nr_running <= imbalance_min)
+ if (imbalance <= NUMA_IMBALANCE_MIN)
return 0;
return imbalance;
@@ -9031,7 +9075,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* emptying busiest.
*/
if (local->group_type == group_has_spare) {
- if (busiest->group_type > group_fully_busy) {
+ if ((busiest->group_type > group_fully_busy) &&
+ !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
/*
* If busiest is overloaded, try to fill spare
* capacity. This might end up creating spare capacity
@@ -9080,9 +9125,10 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
}
/* Consider allowing a small imbalance between NUMA groups */
- if (env->sd->flags & SD_NUMA)
+ if (env->sd->flags & SD_NUMA) {
env->imbalance = adjust_numa_imbalance(env->imbalance,
- busiest->sum_nr_running);
+ busiest->sum_nr_running, busiest->group_weight);
+ }
return;
}
@@ -9327,8 +9373,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (rt > env->fbq_type)
continue;
- capacity = capacity_of(i);
nr_running = rq->cfs.h_nr_running;
+ if (!nr_running)
+ continue;
+
+ capacity = capacity_of(i);
/*
* For ASYM_CPUCAPACITY domains, don't pick a CPU that could
@@ -9434,13 +9483,32 @@ asym_active_balance(struct lb_env *env)
}
static inline bool
-voluntary_active_balance(struct lb_env *env)
+imbalanced_active_balance(struct lb_env *env)
+{
+ struct sched_domain *sd = env->sd;
+
+ /*
+ * The imbalanced case includes the case of pinned tasks preventing a fair
+ * distribution of the load on the system but also the even distribution of the
+ * threads on a system with spare capacity
+ */
+ if ((env->migration_type == migrate_task) &&
+ (sd->nr_balance_failed > sd->cache_nice_tries+2))
+ return 1;
+
+ return 0;
+}
+
+static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
if (asym_active_balance(env))
return 1;
+ if (imbalanced_active_balance(env))
+ return 1;
+
/*
* The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
* It's worth migrating the task if the src_cpu's capacity is reduced
@@ -9460,16 +9528,6 @@ voluntary_active_balance(struct lb_env *env)
return 0;
}
-static int need_active_balance(struct lb_env *env)
-{
- struct sched_domain *sd = env->sd;
-
- if (voluntary_active_balance(env))
- return 1;
-
- return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
-}
-
static int active_load_balance_cpu_stop(void *data);
static int should_we_balance(struct lb_env *env)
@@ -9561,6 +9619,8 @@ redo:
env.src_rq = busiest;
ld_moved = 0;
+ /* Clear this flag as soon as we find a pullable task */
+ env.flags |= LBF_ALL_PINNED;
if (busiest->nr_running > 1) {
/*
* Attempt to move tasks. If find_busiest_group has found
@@ -9568,7 +9628,6 @@ redo:
* still unbalanced. ld_moved simply stays zero, so it is
* correctly treated as an imbalance.
*/
- env.flags |= LBF_ALL_PINNED;
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
more_balance:
@@ -9694,10 +9753,12 @@ more_balance:
if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
raw_spin_unlock_irqrestore(&busiest->lock,
flags);
- env.flags |= LBF_ALL_PINNED;
goto out_one_pinned;
}
+ /* Record that we found at least one task that could run on this_cpu */
+ env.flags &= ~LBF_ALL_PINNED;
+
/*
* ->active_balance synchronizes accesses to
* ->active_balance_work. Once set, it's cleared
@@ -9719,21 +9780,13 @@ more_balance:
/* We've kicked active balancing, force task migration. */
sd->nr_balance_failed = sd->cache_nice_tries+1;
}
- } else
+ } else {
sd->nr_balance_failed = 0;
+ }
- if (likely(!active_balance) || voluntary_active_balance(&env)) {
+ if (likely(!active_balance) || need_active_balance(&env)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
- } else {
- /*
- * If we've begun active balancing, start to back off. This
- * case may not be covered by the all_pinned logic if there
- * is only 1 task on the busy runqueue (because we don't call
- * detach_tasks).
- */
- if (sd->balance_interval < sd->max_interval)
- sd->balance_interval *= 2;
}
goto out;
@@ -10047,6 +10100,10 @@ static inline int find_new_ilb(void)
for_each_cpu_and(ilb, nohz.idle_cpus_mask,
housekeeping_cpumask(HK_FLAG_MISC)) {
+
+ if (ilb == smp_processor_id())
+ continue;
+
if (idle_cpu(ilb))
return ilb;
}
@@ -10484,7 +10541,7 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
- * idle_balance is called by schedule() if this_cpu is about to become
+ * newidle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*
* Returns:
@@ -10634,8 +10691,11 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
*/
void trigger_load_balance(struct rq *rq)
{
- /* Don't need to rebalance while attached to NULL domain */
- if (unlikely(on_null_domain(rq)))
+ /*
+ * Don't need to rebalance while attached to NULL domain or
+ * runqueue CPU is not active
+ */
+ if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
return;
if (time_after_eq(jiffies, rq->next_balance))
@@ -10740,7 +10800,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
- if (rq->curr == p) {
+ if (task_current(rq, p)) {
if (p->prio > oldprio)
resched_curr(rq);
} else
@@ -10873,7 +10933,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
* kick off the schedule if running, otherwise just see
* if we can still preempt the current task.
*/
- if (rq->curr == p)
+ if (task_current(rq, p))
resched_curr(rq);
else
check_preempt_curr(rq, p, 0);
@@ -11158,8 +11218,8 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
/*
* All the scheduling class methods:
*/
-const struct sched_class fair_sched_class
- __section("__fair_sched_class") = {
+DEFINE_SCHED_CLASS(fair) = {
+
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 68d369cba9e4..1bc2b158fc51 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -38,6 +38,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true)
SCHED_FEAT(WAKEUP_PREEMPTION, true)
SCHED_FEAT(HRTICK, false)
+SCHED_FEAT(HRTICK_DL, false)
SCHED_FEAT(DOUBLE_TICK, false)
/*
@@ -54,7 +55,6 @@ SCHED_FEAT(TTWU_QUEUE, true)
/*
* When doing wakeups, attempt to limit superfluous scans of the LLC domain.
*/
-SCHED_FEAT(SIS_AVG_CPU, false)
SCHED_FEAT(SIS_PROP, true)
/*
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 24d0ee26377d..7199e6f23789 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -78,7 +78,7 @@ void __weak arch_cpu_idle_dead(void) { }
void __weak arch_cpu_idle(void)
{
cpu_idle_force_poll = 1;
- local_irq_enable();
+ raw_local_irq_enable();
}
/**
@@ -94,9 +94,35 @@ void __cpuidle default_idle_call(void)
trace_cpu_idle(1, smp_processor_id());
stop_critical_timings();
+
+ /*
+ * arch_cpu_idle() is supposed to enable IRQs, however
+ * we can't do that because of RCU and tracing.
+ *
+ * Trace IRQs enable here, then switch off RCU, and have
+ * arch_cpu_idle() use raw_local_irq_enable(). Note that
+ * rcu_idle_enter() relies on lockdep IRQ state, so switch that
+ * last -- this is very similar to the entry code.
+ */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(_THIS_IP_);
rcu_idle_enter();
+ lockdep_hardirqs_on(_THIS_IP_);
+
arch_cpu_idle();
+
+ /*
+ * OK, so IRQs are enabled here, but RCU needs them disabled to
+ * turn itself back on.. funny thing is that disabling IRQs
+ * will cause tracing, which needs RCU. Jump through hoops to
+ * make it 'work'.
+ */
+ raw_local_irq_disable();
+ lockdep_hardirqs_off(_THIS_IP_);
rcu_idle_exit();
+ lockdep_hardirqs_on(_THIS_IP_);
+ raw_local_irq_enable();
+
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
}
@@ -259,6 +285,7 @@ static void do_idle(void)
}
arch_cpu_idle_enter();
+ rcu_nocb_flush_deferred_wakeup();
/*
* In poll mode we reenable interrupts and spin. Also if we
@@ -338,6 +365,7 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns)
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
WARN_ON_ONCE(!duration_ns);
+ WARN_ON_ONCE(current->mm);
rcu_sleep_check();
preempt_disable();
@@ -375,7 +403,7 @@ void cpu_startup_entry(enum cpuhp_state state)
#ifdef CONFIG_SMP
static int
-select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_idle(struct task_struct *p, int cpu, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
@@ -457,8 +485,8 @@ static void update_curr_idle(struct rq *rq)
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
-const struct sched_class idle_sched_class
- __section("__idle_sched_class") = {
+DEFINE_SCHED_CLASS(idle) = {
+
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index e23e74d52db5..08ae45ad9261 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -7,6 +7,134 @@
#include "sched.h"
/*
+ * For documentation purposes, here are some membarrier ordering
+ * scenarios to keep in mind:
+ *
+ * A) Userspace thread execution after IPI vs membarrier's memory
+ * barrier before sending the IPI
+ *
+ * Userspace variables:
+ *
+ * int x = 0, y = 0;
+ *
+ * The memory barrier at the start of membarrier() on CPU0 is necessary in
+ * order to enforce the guarantee that any writes occurring on CPU0 before
+ * the membarrier() is executed will be visible to any code executing on
+ * CPU1 after the IPI-induced memory barrier:
+ *
+ * CPU0 CPU1
+ *
+ * x = 1
+ * membarrier():
+ * a: smp_mb()
+ * b: send IPI IPI-induced mb
+ * c: smp_mb()
+ * r2 = y
+ * y = 1
+ * barrier()
+ * r1 = x
+ *
+ * BUG_ON(r1 == 0 && r2 == 0)
+ *
+ * The write to y and load from x by CPU1 are unordered by the hardware,
+ * so it's possible to have "r1 = x" reordered before "y = 1" at any
+ * point after (b). If the memory barrier at (a) is omitted, then "x = 1"
+ * can be reordered after (a) (although not after (c)), so we get r1 == 0
+ * and r2 == 0. This violates the guarantee that membarrier() is
+ * supposed by provide.
+ *
+ * The timing of the memory barrier at (a) has to ensure that it executes
+ * before the IPI-induced memory barrier on CPU1.
+ *
+ * B) Userspace thread execution before IPI vs membarrier's memory
+ * barrier after completing the IPI
+ *
+ * Userspace variables:
+ *
+ * int x = 0, y = 0;
+ *
+ * The memory barrier at the end of membarrier() on CPU0 is necessary in
+ * order to enforce the guarantee that any writes occurring on CPU1 before
+ * the membarrier() is executed will be visible to any code executing on
+ * CPU0 after the membarrier():
+ *
+ * CPU0 CPU1
+ *
+ * x = 1
+ * barrier()
+ * y = 1
+ * r2 = y
+ * membarrier():
+ * a: smp_mb()
+ * b: send IPI IPI-induced mb
+ * c: smp_mb()
+ * r1 = x
+ * BUG_ON(r1 == 0 && r2 == 1)
+ *
+ * The writes to x and y are unordered by the hardware, so it's possible to
+ * have "r2 = 1" even though the write to x doesn't execute until (b). If
+ * the memory barrier at (c) is omitted then "r1 = x" can be reordered
+ * before (b) (although not before (a)), so we get "r1 = 0". This violates
+ * the guarantee that membarrier() is supposed to provide.
+ *
+ * The timing of the memory barrier at (c) has to ensure that it executes
+ * after the IPI-induced memory barrier on CPU1.
+ *
+ * C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier
+ *
+ * CPU0 CPU1
+ *
+ * membarrier():
+ * a: smp_mb()
+ * d: switch to kthread (includes mb)
+ * b: read rq->curr->mm == NULL
+ * e: switch to user (includes mb)
+ * c: smp_mb()
+ *
+ * Using the scenario from (A), we can show that (a) needs to be paired
+ * with (e). Using the scenario from (B), we can show that (c) needs to
+ * be paired with (d).
+ *
+ * D) exit_mm vs membarrier
+ *
+ * Two thread groups are created, A and B. Thread group B is created by
+ * issuing clone from group A with flag CLONE_VM set, but not CLONE_THREAD.
+ * Let's assume we have a single thread within each thread group (Thread A
+ * and Thread B). Thread A runs on CPU0, Thread B runs on CPU1.
+ *
+ * CPU0 CPU1
+ *
+ * membarrier():
+ * a: smp_mb()
+ * exit_mm():
+ * d: smp_mb()
+ * e: current->mm = NULL
+ * b: read rq->curr->mm == NULL
+ * c: smp_mb()
+ *
+ * Using scenario (B), we can show that (c) needs to be paired with (d).
+ *
+ * E) kthread_{use,unuse}_mm vs membarrier
+ *
+ * CPU0 CPU1
+ *
+ * membarrier():
+ * a: smp_mb()
+ * kthread_unuse_mm()
+ * d: smp_mb()
+ * e: current->mm = NULL
+ * b: read rq->curr->mm == NULL
+ * kthread_use_mm()
+ * f: current->mm = mm
+ * g: smp_mb()
+ * c: smp_mb()
+ *
+ * Using the scenario from (A), we can show that (a) needs to be paired
+ * with (g). Using the scenario from (B), we can show that (c) needs to
+ * be paired with (d).
+ */
+
+/*
* Bitmask made from a "or" of all commands within enum membarrier_cmd,
* except MEMBARRIER_CMD_QUERY.
*/
@@ -38,8 +166,33 @@ static void ipi_mb(void *info)
smp_mb(); /* IPIs should be serializing but paranoid. */
}
+static void ipi_sync_core(void *info)
+{
+ /*
+ * The smp_mb() in membarrier after all the IPIs is supposed to
+ * ensure that memory on remote CPUs that occur before the IPI
+ * become visible to membarrier()'s caller -- see scenario B in
+ * the big comment at the top of this file.
+ *
+ * A sync_core() would provide this guarantee, but
+ * sync_core_before_usermode() might end up being deferred until
+ * after membarrier()'s smp_mb().
+ */
+ smp_mb(); /* IPIs should be serializing but paranoid. */
+
+ sync_core_before_usermode();
+}
+
static void ipi_rseq(void *info)
{
+ /*
+ * Ensure that all stores done by the calling thread are visible
+ * to the current task before the current task resumes. We could
+ * probably optimize this away on most architectures, but by the
+ * time we've already sent an IPI, the cost of the extra smp_mb()
+ * is negligible.
+ */
+ smp_mb();
rseq_preempt(current);
}
@@ -76,6 +229,18 @@ void membarrier_exec_mmap(struct mm_struct *mm)
this_cpu_write(runqueues.membarrier_state, 0);
}
+void membarrier_update_current_mm(struct mm_struct *next_mm)
+{
+ struct rq *rq = this_rq();
+ int membarrier_state = 0;
+
+ if (next_mm)
+ membarrier_state = atomic_read(&next_mm->membarrier_state);
+ if (READ_ONCE(rq->membarrier_state) == membarrier_state)
+ return;
+ WRITE_ONCE(rq->membarrier_state, membarrier_state);
+}
+
static int membarrier_global_expedited(void)
{
int cpu;
@@ -114,12 +279,11 @@ static int membarrier_global_expedited(void)
continue;
/*
- * Skip the CPU if it runs a kernel thread. The scheduler
- * leaves the prior task mm in place as an optimization when
- * scheduling a kthread.
+ * Skip the CPU if it runs a kernel thread which is not using
+ * a task mm.
*/
p = rcu_dereference(cpu_rq(cpu)->curr);
- if (p->flags & PF_KTHREAD)
+ if (!p->mm)
continue;
__cpumask_set_cpu(cpu, tmpmask);
@@ -154,6 +318,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
return -EPERM;
+ ipi_func = ipi_sync_core;
} else if (flags == MEMBARRIER_FLAG_RSEQ) {
if (!IS_ENABLED(CONFIG_RSEQ))
return -EINVAL;
@@ -168,7 +333,8 @@ static int membarrier_private_expedited(int flags, int cpu_id)
return -EPERM;
}
- if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)
+ if (flags != MEMBARRIER_FLAG_SYNC_CORE &&
+ (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1))
return 0;
/*
@@ -187,8 +353,6 @@ static int membarrier_private_expedited(int flags, int cpu_id)
if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id))
goto out;
- if (cpu_id == raw_smp_processor_id())
- goto out;
rcu_read_lock();
p = rcu_dereference(cpu_rq(cpu_id)->curr);
if (!p || p->mm != mm) {
@@ -203,16 +367,6 @@ static int membarrier_private_expedited(int flags, int cpu_id)
for_each_online_cpu(cpu) {
struct task_struct *p;
- /*
- * Skipping the current CPU is OK even through we can be
- * migrated at any point. The current CPU, at the point
- * where we read raw_smp_processor_id(), is ensured to
- * be in program order with respect to the caller
- * thread. Therefore, we can skip this CPU from the
- * iteration.
- */
- if (cpu == raw_smp_processor_id())
- continue;
p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask);
@@ -220,12 +374,38 @@ static int membarrier_private_expedited(int flags, int cpu_id)
rcu_read_unlock();
}
- preempt_disable();
- if (cpu_id >= 0)
+ if (cpu_id >= 0) {
+ /*
+ * smp_call_function_single() will call ipi_func() if cpu_id
+ * is the calling CPU.
+ */
smp_call_function_single(cpu_id, ipi_func, NULL, 1);
- else
- smp_call_function_many(tmpmask, ipi_func, NULL, 1);
- preempt_enable();
+ } else {
+ /*
+ * For regular membarrier, we can save a few cycles by
+ * skipping the current cpu -- we're about to do smp_mb()
+ * below, and if we migrate to a different cpu, this cpu
+ * and the new cpu will execute a full barrier in the
+ * scheduler.
+ *
+ * For SYNC_CORE, we do need a barrier on the current cpu --
+ * otherwise, if we are migrated and replaced by a different
+ * task in the same mm just before, during, or after
+ * membarrier, we will end up with some thread in the mm
+ * running without a core sync.
+ *
+ * For RSEQ, don't rseq_preempt() the caller. User code
+ * is not supposed to issue syscalls at all from inside an
+ * rseq critical section.
+ */
+ if (flags != MEMBARRIER_FLAG_SYNC_CORE) {
+ preempt_disable();
+ smp_call_function_many(tmpmask, ipi_func, NULL, true);
+ preempt_enable();
+ } else {
+ on_each_cpu_mask(tmpmask, ipi_func, NULL, true);
+ }
+ }
out:
if (cpu_id < 0)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 49ec096a8aa1..8f720b71d13d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -89,8 +89,8 @@ void init_rt_rq(struct rt_rq *rt_rq)
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
- rt_rq->highest_prio.next = MAX_RT_PRIO;
+ rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
+ rt_rq->highest_prio.next = MAX_RT_PRIO-1;
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
@@ -161,7 +161,7 @@ void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
{
struct rq *rq = cpu_rq(cpu);
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
+ rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
rt_rq->tg = tg;
@@ -265,7 +265,7 @@ static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
- return rq->rt.highest_prio.curr > prev->prio;
+ return rq->online && rq->rt.highest_prio.curr > prev->prio;
}
static inline int rt_overloaded(struct rq *rq)
@@ -393,8 +393,9 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
rq->rt.highest_prio.next = p->prio;
- } else
- rq->rt.highest_prio.next = MAX_RT_PRIO;
+ } else {
+ rq->rt.highest_prio.next = MAX_RT_PRIO-1;
+ }
}
#else
@@ -1147,8 +1148,9 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio)
sched_find_first_bit(array->bitmap);
}
- } else
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
+ } else {
+ rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
+ }
dec_rt_prio_smp(rt_rq, prio, prev_prio);
}
@@ -1428,14 +1430,14 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task);
static int
-select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_rt(struct task_struct *p, int cpu, int flags)
{
struct task_struct *curr;
struct rq *rq;
bool test;
/* For anything but wake ups, just return the task_cpu */
- if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
+ if (!(flags & (WF_TTWU | WF_FORK)))
goto out;
rq = cpu_rq(cpu);
@@ -1658,7 +1660,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
- cpumask_test_cpu(cpu, p->cpus_ptr))
+ cpumask_test_cpu(cpu, &p->cpus_mask))
return 1;
return 0;
@@ -1752,8 +1754,8 @@ static int find_lowest_rq(struct task_struct *task)
return this_cpu;
}
- best_cpu = cpumask_first_and(lowest_mask,
- sched_domain_span(sd));
+ best_cpu = cpumask_any_and_distribute(lowest_mask,
+ sched_domain_span(sd));
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
@@ -1770,7 +1772,7 @@ static int find_lowest_rq(struct task_struct *task)
if (this_cpu != -1)
return this_cpu;
- cpu = cpumask_any(lowest_mask);
+ cpu = cpumask_any_distribute(lowest_mask);
if (cpu < nr_cpu_ids)
return cpu;
@@ -1811,7 +1813,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
+ !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
task_running(rq, task) ||
!rt_task(task) ||
!task_on_rq_queued(task))) {
@@ -1859,7 +1861,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
* running task can migrate over to a CPU that is running a task
* of lesser priority.
*/
-static int push_rt_task(struct rq *rq)
+static int push_rt_task(struct rq *rq, bool pull)
{
struct task_struct *next_task;
struct rq *lowest_rq;
@@ -1873,6 +1875,34 @@ static int push_rt_task(struct rq *rq)
return 0;
retry:
+ if (is_migration_disabled(next_task)) {
+ struct task_struct *push_task = NULL;
+ int cpu;
+
+ if (!pull || rq->push_busy)
+ return 0;
+
+ cpu = find_lowest_rq(rq->curr);
+ if (cpu == -1 || cpu == rq->cpu)
+ return 0;
+
+ /*
+ * Given we found a CPU with lower priority than @next_task,
+ * therefore it should be running. However we cannot migrate it
+ * to this other CPU, instead attempt to push the current
+ * running task on this CPU away.
+ */
+ push_task = get_push_task(rq);
+ if (push_task) {
+ raw_spin_unlock(&rq->lock);
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ push_task, &rq->push_work);
+ raw_spin_lock(&rq->lock);
+ }
+
+ return 0;
+ }
+
if (WARN_ON(next_task == rq->curr))
return 0;
@@ -1927,12 +1957,10 @@ retry:
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0);
- ret = 1;
-
resched_curr(lowest_rq);
+ ret = 1;
double_unlock_balance(rq, lowest_rq);
-
out:
put_task_struct(next_task);
@@ -1942,7 +1970,7 @@ out:
static void push_rt_tasks(struct rq *rq)
{
/* push_rt_task will return true if it moved an RT */
- while (push_rt_task(rq))
+ while (push_rt_task(rq, false))
;
}
@@ -2095,7 +2123,8 @@ void rto_push_irq_work_func(struct irq_work *work)
*/
if (has_pushable_tasks(rq)) {
raw_spin_lock(&rq->lock);
- push_rt_tasks(rq);
+ while (push_rt_task(rq, true))
+ ;
raw_spin_unlock(&rq->lock);
}
@@ -2120,7 +2149,7 @@ static void pull_rt_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
bool resched = false;
- struct task_struct *p;
+ struct task_struct *p, *push_task;
struct rq *src_rq;
int rt_overload_count = rt_overloaded(this_rq);
@@ -2167,6 +2196,7 @@ static void pull_rt_task(struct rq *this_rq)
* double_lock_balance, and another CPU could
* alter this_rq
*/
+ push_task = NULL;
double_lock_balance(this_rq, src_rq);
/*
@@ -2194,11 +2224,14 @@ static void pull_rt_task(struct rq *this_rq)
if (p->prio < src_rq->curr->prio)
goto skip;
- resched = true;
-
- deactivate_task(src_rq, p, 0);
- set_task_cpu(p, this_cpu);
- activate_task(this_rq, p, 0);
+ if (is_migration_disabled(p)) {
+ push_task = get_push_task(src_rq);
+ } else {
+ deactivate_task(src_rq, p, 0);
+ set_task_cpu(p, this_cpu);
+ activate_task(this_rq, p, 0);
+ resched = true;
+ }
/*
* We continue with the search, just in
* case there's an even higher prio task
@@ -2208,6 +2241,13 @@ static void pull_rt_task(struct rq *this_rq)
}
skip:
double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
+ raw_spin_unlock(&this_rq->lock);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
+ raw_spin_lock(&this_rq->lock);
+ }
}
if (resched)
@@ -2317,7 +2357,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
if (!task_on_rq_queued(p))
return;
- if (rq->curr == p) {
+ if (task_current(rq, p)) {
#ifdef CONFIG_SMP
/*
* If our priority decreases while running, we
@@ -2429,8 +2469,8 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
return 0;
}
-const struct sched_class rt_sched_class
- __section("__rt_sched_class") = {
+DEFINE_SCHED_CLASS(rt) = {
+
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
@@ -2449,6 +2489,7 @@ const struct sched_class rt_sched_class
.rq_offline = rq_offline_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
+ .find_lock_rq = find_lock_lowest_rq,
#endif
.task_tick = task_tick_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index df80bfcea92e..10a1522b1e30 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -67,7 +67,6 @@
#include <linux/tsacct_kern.h>
#include <asm/tlb.h>
-#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_PARAVIRT
# include <asm/paravirt.h>
@@ -141,7 +140,7 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
* scale_load() and scale_load_down(w) to convert between them. The
* following must be true:
*
- * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
+ * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD
*
*/
#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
@@ -257,30 +256,6 @@ struct rt_bandwidth {
void __dl_clear_params(struct task_struct *p);
-/*
- * To keep the bandwidth of -deadline tasks and groups under control
- * we need some place where:
- * - store the maximum -deadline bandwidth of the system (the group);
- * - cache the fraction of that bandwidth that is currently allocated.
- *
- * This is all done in the data structure below. It is similar to the
- * one used for RT-throttling (rt_bandwidth), with the main difference
- * that, since here we are only interested in admission control, we
- * do not decrease any runtime while the group "executes", neither we
- * need a timer to replenish it.
- *
- * With respect to SMP, the bandwidth is given on a per-CPU basis,
- * meaning that:
- * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
- * - dl_total_bw array contains, in the i-eth element, the currently
- * allocated bandwidth on the i-eth CPU.
- * Moreover, groups consume bandwidth on each CPU, while tasks only
- * consume bandwidth on the CPU they're running on.
- * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
- * that will be shown the next time the proc or cgroup controls will
- * be red. It on its turn can be changed by writing on its own
- * control.
- */
struct dl_bandwidth {
raw_spinlock_t dl_runtime_lock;
u64 dl_runtime;
@@ -292,6 +267,24 @@ static inline int dl_bandwidth_enabled(void)
return sysctl_sched_rt_runtime >= 0;
}
+/*
+ * To keep the bandwidth of -deadline tasks under control
+ * we need some place where:
+ * - store the maximum -deadline bandwidth of each cpu;
+ * - cache the fraction of bandwidth that is currently allocated in
+ * each root domain;
+ *
+ * This is all done in the data structure below. It is similar to the
+ * one used for RT-throttling (rt_bandwidth), with the main difference
+ * that, since here we are only interested in admission control, we
+ * do not decrease any runtime while the group "executes", neither we
+ * need a timer to replenish it.
+ *
+ * With respect to SMP, bandwidth is given on a per root domain basis,
+ * meaning that:
+ * - bw (< 100%) is the deadline bandwidth of each CPU;
+ * - total_bw is the currently allocated bandwidth in each root domain;
+ */
struct dl_bw {
raw_spinlock_t lock;
u64 bw;
@@ -801,6 +794,15 @@ struct root_domain {
struct dl_bw dl_bw;
struct cpudl cpudl;
+ /*
+ * Indicate whether a root_domain's dl_bw has been checked or
+ * updated. It's monotonously increasing value.
+ *
+ * Also, some corner cases, like 'wrap around' is dangerous, but given
+ * that u64 is 'big enough'. So that shouldn't be a concern.
+ */
+ u64 visit_gen;
+
#ifdef HAVE_RT_PUSH_IPI
/*
* For IPI pull requests, loop across the rto_mask.
@@ -973,6 +975,7 @@ struct rq {
unsigned long cpu_capacity_orig;
struct callback_head *balance_callback;
+ unsigned char balance_push;
unsigned char nohz_idle_balance;
unsigned char idle_balance;
@@ -1003,6 +1006,10 @@ struct rq {
/* This is used to determine avg_idle's max value */
u64 max_idle_balance_cost;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ struct rcuwait hotplug_wait;
+#endif
#endif /* CONFIG_SMP */
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1024,6 +1031,7 @@ struct rq {
call_single_data_t hrtick_csd;
#endif
struct hrtimer hrtick_timer;
+ ktime_t hrtick_time;
#endif
#ifdef CONFIG_SCHEDSTATS
@@ -1048,6 +1056,12 @@ struct rq {
/* Must be inspected within a rcu lock section */
struct cpuidle_state *idle_state;
#endif
+
+#ifdef CONFIG_SMP
+ unsigned int nr_pinned;
+#endif
+ unsigned int push_busy;
+ struct cpu_stop_work push_work;
};
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1075,6 +1089,16 @@ static inline int cpu_of(struct rq *rq)
#endif
}
+#define MDF_PUSH 0x01
+
+static inline bool is_migration_disabled(struct task_struct *p)
+{
+#ifdef CONFIG_SMP
+ return p->migration_disabled;
+#else
+ return false;
+#endif
+}
#ifdef CONFIG_SCHED_SMT
extern void __update_idle_core(struct rq *rq);
@@ -1203,6 +1227,8 @@ struct rq_flags {
#endif
};
+extern struct callback_head balance_push_callback;
+
/*
* Lockdep annotation that avoids accidental unlocks; it's like a
* sticky/continuous lockdep_assert_held().
@@ -1220,6 +1246,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
#ifdef CONFIG_SCHED_DEBUG
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
rf->clock_update_flags = 0;
+#ifdef CONFIG_SMP
+ SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
+#endif
#endif
}
@@ -1389,7 +1418,7 @@ queue_balance_callback(struct rq *rq,
{
lockdep_assert_held(&rq->lock);
- if (unlikely(head->next))
+ if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
return;
head->func = (void (*)(struct callback_head *))func;
@@ -1714,13 +1743,20 @@ static inline int task_on_rq_migrating(struct task_struct *p)
return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
}
-/*
- * wake flags
- */
-#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
-#define WF_FORK 0x02 /* Child wakeup after fork */
-#define WF_MIGRATED 0x04 /* Internal use, task got migrated */
-#define WF_ON_CPU 0x08 /* Wakee is on_cpu */
+/* Wake flags. The first three directly map to some SD flag value */
+#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
+#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
+#define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */
+
+#define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
+#define WF_MIGRATED 0x20 /* Internal use, task got migrated */
+#define WF_ON_CPU 0x40 /* Wakee is on_cpu */
+
+#ifdef CONFIG_SMP
+static_assert(WF_EXEC == SD_BALANCE_EXEC);
+static_assert(WF_FORK == SD_BALANCE_FORK);
+static_assert(WF_TTWU == SD_BALANCE_WAKE);
+#endif
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -1796,16 +1832,19 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
- int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
+ int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
- const struct cpumask *newmask);
+ const struct cpumask *newmask,
+ u32 flags);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
+
+ struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
#endif
void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
@@ -1833,7 +1872,7 @@ struct sched_class {
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_change_group)(struct task_struct *p, int type);
#endif
-} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
+};
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
@@ -1847,6 +1886,20 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
next->sched_class->set_next_task(rq, next, false);
}
+
+/*
+ * Helper to define a sched_class instance; each one is placed in a separate
+ * section which is ordered by the linker script:
+ *
+ * include/asm-generic/vmlinux.lds.h
+ *
+ * Also enforce alignment on the instance, not the type, to guarantee layout.
+ */
+#define DEFINE_SCHED_CLASS(name) \
+const struct sched_class name##_sched_class \
+ __aligned(__alignof__(struct sched_class)) \
+ __section("__" #name "_sched_class")
+
/* Defined in include/asm-generic/vmlinux.lds.h */
extern struct sched_class __begin_sched_classes[];
extern struct sched_class __end_sched_classes[];
@@ -1889,13 +1942,35 @@ static inline bool sched_fair_runnable(struct rq *rq)
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
extern struct task_struct *pick_next_task_idle(struct rq *rq);
+#define SCA_CHECK 0x01
+#define SCA_MIGRATE_DISABLE 0x02
+#define SCA_MIGRATE_ENABLE 0x04
+
#ifdef CONFIG_SMP
extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
-extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
+
+static inline struct task_struct *get_push_task(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (rq->push_busy)
+ return NULL;
+
+ if (p->nr_cpus_allowed == 1)
+ return NULL;
+
+ rq->push_busy = true;
+ return get_task_struct(p);
+}
+
+extern int push_cpu_stop(void *arg);
#endif
@@ -2030,17 +2105,39 @@ extern const_debug unsigned int sysctl_sched_migration_cost;
*/
static inline int hrtick_enabled(struct rq *rq)
{
- if (!sched_feat(HRTICK))
- return 0;
if (!cpu_active(cpu_of(rq)))
return 0;
return hrtimer_is_hres_active(&rq->hrtick_timer);
}
+static inline int hrtick_enabled_fair(struct rq *rq)
+{
+ if (!sched_feat(HRTICK))
+ return 0;
+ return hrtick_enabled(rq);
+}
+
+static inline int hrtick_enabled_dl(struct rq *rq)
+{
+ if (!sched_feat(HRTICK_DL))
+ return 0;
+ return hrtick_enabled(rq);
+}
+
void hrtick_start(struct rq *rq, u64 delay);
#else
+static inline int hrtick_enabled_fair(struct rq *rq)
+{
+ return 0;
+}
+
+static inline int hrtick_enabled_dl(struct rq *rq)
+{
+ return 0;
+}
+
static inline int hrtick_enabled(struct rq *rq)
{
return 0;
@@ -2484,27 +2581,24 @@ static inline unsigned long capacity_orig_of(int cpu)
{
return cpu_rq(cpu)->cpu_capacity_orig;
}
-#endif
/**
- * enum schedutil_type - CPU utilization type
+ * enum cpu_util_type - CPU utilization type
* @FREQUENCY_UTIL: Utilization used to select frequency
* @ENERGY_UTIL: Utilization used during energy calculation
*
* The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
* need to be aggregated differently depending on the usage made of them. This
- * enum is used within schedutil_freq_util() to differentiate the types of
+ * enum is used within effective_cpu_util() to differentiate the types of
* utilization expected by the callers, and adjust the aggregation accordingly.
*/
-enum schedutil_type {
+enum cpu_util_type {
FREQUENCY_UTIL,
ENERGY_UTIL,
};
-#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
-
-unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
- unsigned long max, enum schedutil_type type,
+unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
+ unsigned long max, enum cpu_util_type type,
struct task_struct *p);
static inline unsigned long cpu_bw_dl(struct rq *rq)
@@ -2533,14 +2627,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
{
return READ_ONCE(rq->avg_rt.util_avg);
}
-#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
-static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
- unsigned long max, enum schedutil_type type,
- struct task_struct *p)
-{
- return 0;
-}
-#endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
+#endif
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq)
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index ceb5b6b12561..55f39125c0e1 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -11,7 +11,7 @@
#ifdef CONFIG_SMP
static int
-select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_stop(struct task_struct *p, int cpu, int flags)
{
return task_cpu(p); /* stop tasks as never migrate */
}
@@ -109,8 +109,7 @@ static void update_curr_stop(struct rq *rq)
/*
* Simple, special scheduling class for the per-CPU stop tasks:
*/
-const struct sched_class stop_sched_class
- __section("__stop_sched_class") = {
+DEFINE_SCHED_CLASS(stop) = {
.enqueue_task = enqueue_task_stop,
.dequeue_task = dequeue_task_stop,
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index dd7770226086..09d35044bd88 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -211,6 +211,15 @@ unsigned int sysctl_sched_energy_aware = 1;
DEFINE_MUTEX(sched_energy_mutex);
bool sched_energy_update;
+void rebuild_sched_domains_energy(void)
+{
+ mutex_lock(&sched_energy_mutex);
+ sched_energy_update = true;
+ rebuild_sched_domains();
+ sched_energy_update = false;
+ mutex_unlock(&sched_energy_mutex);
+}
+
#ifdef CONFIG_PROC_SYSCTL
int sched_energy_aware_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -223,13 +232,8 @@ int sched_energy_aware_handler(struct ctl_table *table, int write,
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) {
state = static_branch_unlikely(&sched_energy_present);
- if (state != sysctl_sched_energy_aware) {
- mutex_lock(&sched_energy_mutex);
- sched_energy_update = 1;
- rebuild_sched_domains();
- sched_energy_update = 0;
- mutex_unlock(&sched_energy_mutex);
- }
+ if (state != sysctl_sched_energy_aware)
+ rebuild_sched_domains_energy();
}
return ret;
@@ -324,6 +328,7 @@ static void sched_energy_set(bool has_eas)
* 3. no SMT is detected.
* 4. the EM complexity is low enough to keep scheduling overheads low;
* 5. schedutil is driving the frequency of all CPUs of the rd;
+ * 6. frequency invariance support is present;
*
* The complexity of the Energy Model is defined as:
*
@@ -372,6 +377,14 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
goto free;
}
+ if (!arch_scale_freq_invariant()) {
+ if (sched_debug()) {
+ pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported",
+ cpumask_pr_args(cpu_map));
+ }
+ goto free;
+ }
+
for_each_cpu(i, cpu_map) {
/* Skip already covered CPUs. */
if (find_pd(pd, i))
@@ -516,6 +529,7 @@ static int init_rootdomain(struct root_domain *rd)
init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
#endif
+ rd->visit_gen = 0;
init_dl_bw(&rd->dl_bw);
if (cpudl_init(&rd->cpudl) != 0)
goto free_rto_mask;
@@ -674,6 +688,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
+ int numa_distance = 0;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
@@ -705,6 +720,38 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
sd->child = NULL;
}
+ for (tmp = sd; tmp; tmp = tmp->parent)
+ numa_distance += !!(tmp->flags & SD_NUMA);
+
+ /*
+ * FIXME: Diameter >=3 is misrepresented.
+ *
+ * Smallest diameter=3 topology is:
+ *
+ * node 0 1 2 3
+ * 0: 10 20 30 40
+ * 1: 20 10 20 30
+ * 2: 30 20 10 20
+ * 3: 40 30 20 10
+ *
+ * 0 --- 1 --- 2 --- 3
+ *
+ * NUMA-3 0-3 N/A N/A 0-3
+ * groups: {0-2},{1-3} {1-3},{0-2}
+ *
+ * NUMA-2 0-2 0-3 0-3 1-3
+ * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2}
+ *
+ * NUMA-1 0-1 0-2 1-3 2-3
+ * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2}
+ *
+ * NUMA-0 0 1 2 3
+ *
+ * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
+ * group span isn't a subset of the domain span.
+ */
+ WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
+
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
@@ -1549,66 +1596,58 @@ static void init_numa_topology_type(void)
}
}
+
+#define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
+
void sched_init_numa(void)
{
- int next_distance, curr_distance = node_distance(0, 0);
struct sched_domain_topology_level *tl;
- int level = 0;
- int i, j, k;
-
- sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL);
- if (!sched_domains_numa_distance)
- return;
-
- /* Includes NUMA identity node at level 0. */
- sched_domains_numa_distance[level++] = curr_distance;
- sched_domains_numa_levels = level;
+ unsigned long *distance_map;
+ int nr_levels = 0;
+ int i, j;
/*
* O(nr_nodes^2) deduplicating selection sort -- in order to find the
* unique distances in the node_distance() table.
- *
- * Assumes node_distance(0,j) includes all distances in
- * node_distance(i,j) in order to avoid cubic time.
*/
- next_distance = curr_distance;
+ distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
+ if (!distance_map)
+ return;
+
+ bitmap_zero(distance_map, NR_DISTANCE_VALUES);
for (i = 0; i < nr_node_ids; i++) {
for (j = 0; j < nr_node_ids; j++) {
- for (k = 0; k < nr_node_ids; k++) {
- int distance = node_distance(i, k);
-
- if (distance > curr_distance &&
- (distance < next_distance ||
- next_distance == curr_distance))
- next_distance = distance;
+ int distance = node_distance(i, j);
- /*
- * While not a strong assumption it would be nice to know
- * about cases where if node A is connected to B, B is not
- * equally connected to A.
- */
- if (sched_debug() && node_distance(k, i) != distance)
- sched_numa_warn("Node-distance not symmetric");
-
- if (sched_debug() && i && !find_numa_distance(distance))
- sched_numa_warn("Node-0 not representative");
+ if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
+ sched_numa_warn("Invalid distance value range");
+ return;
}
- if (next_distance != curr_distance) {
- sched_domains_numa_distance[level++] = next_distance;
- sched_domains_numa_levels = level;
- curr_distance = next_distance;
- } else break;
+
+ bitmap_set(distance_map, distance, 1);
}
+ }
+ /*
+ * We can now figure out how many unique distance values there are and
+ * allocate memory accordingly.
+ */
+ nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
- /*
- * In case of sched_debug() we verify the above assumption.
- */
- if (!sched_debug())
- break;
+ sched_domains_numa_distance = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
+ if (!sched_domains_numa_distance) {
+ bitmap_free(distance_map);
+ return;
}
+ for (i = 0, j = 0; i < nr_levels; i++, j++) {
+ j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
+ sched_domains_numa_distance[i] = j;
+ }
+
+ bitmap_free(distance_map);
+
/*
- * 'level' contains the number of unique distances
+ * 'nr_levels' contains the number of unique distances
*
* The sched_domains_numa_distance[] array includes the actual distance
* numbers.
@@ -1617,15 +1656,15 @@ void sched_init_numa(void)
/*
* Here, we should temporarily reset sched_domains_numa_levels to 0.
* If it fails to allocate memory for array sched_domains_numa_masks[][],
- * the array will contain less then 'level' members. This could be
+ * the array will contain less then 'nr_levels' members. This could be
* dangerous when we use it to iterate array sched_domains_numa_masks[][]
* in other functions.
*
- * We reset it to 'level' at the end of this function.
+ * We reset it to 'nr_levels' at the end of this function.
*/
sched_domains_numa_levels = 0;
- sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
+ sched_domains_numa_masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
if (!sched_domains_numa_masks)
return;
@@ -1633,7 +1672,7 @@ void sched_init_numa(void)
* Now for each level, construct a mask per node which contains all
* CPUs of nodes that are that many hops away from us.
*/
- for (i = 0; i < level; i++) {
+ for (i = 0; i < nr_levels; i++) {
sched_domains_numa_masks[i] =
kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
if (!sched_domains_numa_masks[i])
@@ -1641,12 +1680,17 @@ void sched_init_numa(void)
for (j = 0; j < nr_node_ids; j++) {
struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ int k;
+
if (!mask)
return;
sched_domains_numa_masks[i][j] = mask;
for_each_node(k) {
+ if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
+ sched_numa_warn("Node-distance not symmetric");
+
if (node_distance(j, k) > sched_domains_numa_distance[i])
continue;
@@ -1658,7 +1702,7 @@ void sched_init_numa(void)
/* Compute default topology size */
for (i = 0; sched_domain_topology[i].mask; i++);
- tl = kzalloc((i + level + 1) *
+ tl = kzalloc((i + nr_levels + 1) *
sizeof(struct sched_domain_topology_level), GFP_KERNEL);
if (!tl)
return;
@@ -1681,7 +1725,7 @@ void sched_init_numa(void)
/*
* .. and append 'j' levels of NUMA goodness.
*/
- for (j = 1; j < level; i++, j++) {
+ for (j = 1; j < nr_levels; i++, j++) {
tl[i] = (struct sched_domain_topology_level){
.mask = sd_numa_mask,
.sd_flags = cpu_numa_flags,
@@ -1693,8 +1737,8 @@ void sched_init_numa(void)
sched_domain_topology = tl;
- sched_domains_numa_levels = level;
- sched_max_numa_distance = sched_domains_numa_distance[level - 1];
+ sched_domains_numa_levels = nr_levels;
+ sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
init_numa_topology_type();
}
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 01f5d3020589..183cc6ae68a6 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -37,6 +37,17 @@ void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue
}
EXPORT_SYMBOL(add_wait_queue_exclusive);
+void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
+{
+ unsigned long flags;
+
+ wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
+ spin_lock_irqsave(&wq_head->lock, flags);
+ __add_wait_queue(wq_head, wq_entry);
+ spin_unlock_irqrestore(&wq_head->lock, flags);
+}
+EXPORT_SYMBOL_GPL(add_wait_queue_priority);
+
void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
@@ -57,7 +68,11 @@ EXPORT_SYMBOL(remove_wait_queue);
/*
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
* wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
- * number) then we wake all the non-exclusive tasks and one exclusive task.
+ * number) then we wake that number of exclusive tasks, and potentially all
+ * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
+ * the list and any non-exclusive tasks will be woken first. A priority task
+ * may be at the head of the list, and can consume the event without any other
+ * tasks being woken.
*
* There are circumstances in which we can try to wake a task which has already
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
diff --git a/kernel/scs.c b/kernel/scs.c
index 4ff4a7ba0094..e2a71fc82fa0 100644
--- a/kernel/scs.c
+++ b/kernel/scs.c
@@ -5,26 +5,49 @@
* Copyright (C) 2019 Google LLC
*/
+#include <linux/cpuhotplug.h>
#include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/scs.h>
-#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/vmstat.h>
-static struct kmem_cache *scs_cache;
-
static void __scs_account(void *s, int account)
{
- struct page *scs_page = virt_to_page(s);
+ struct page *scs_page = vmalloc_to_page(s);
mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
account * (SCS_SIZE / SZ_1K));
}
-static void *scs_alloc(int node)
+/* Matches NR_CACHED_STACKS for VMAP_STACK */
+#define NR_CACHED_SCS 2
+static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
+
+static void *__scs_alloc(int node)
{
- void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
+ int i;
+ void *s;
+
+ for (i = 0; i < NR_CACHED_SCS; i++) {
+ s = this_cpu_xchg(scs_cache[i], NULL);
+ if (s) {
+ kasan_unpoison_vmalloc(s, SCS_SIZE);
+ memset(s, 0, SCS_SIZE);
+ return s;
+ }
+ }
+
+ return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
+ GFP_SCS, PAGE_KERNEL, 0, node,
+ __builtin_return_address(0));
+}
+void *scs_alloc(int node)
+{
+ void *s;
+
+ s = __scs_alloc(node);
if (!s)
return NULL;
@@ -34,21 +57,47 @@ static void *scs_alloc(int node)
* Poison the allocation to catch unintentional accesses to
* the shadow stack when KASAN is enabled.
*/
- kasan_poison_object_data(scs_cache, s);
+ kasan_poison_vmalloc(s, SCS_SIZE);
__scs_account(s, 1);
return s;
}
-static void scs_free(void *s)
+void scs_free(void *s)
{
+ int i;
+
__scs_account(s, -1);
- kasan_unpoison_object_data(scs_cache, s);
- kmem_cache_free(scs_cache, s);
+
+ /*
+ * We cannot sleep as this can be called in interrupt context,
+ * so use this_cpu_cmpxchg to update the cache, and vfree_atomic
+ * to free the stack.
+ */
+
+ for (i = 0; i < NR_CACHED_SCS; i++)
+ if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
+ return;
+
+ vfree_atomic(s);
+}
+
+static int scs_cleanup(unsigned int cpu)
+{
+ int i;
+ void **cache = per_cpu_ptr(scs_cache, cpu);
+
+ for (i = 0; i < NR_CACHED_SCS; i++) {
+ vfree(cache[i]);
+ cache[i] = NULL;
+ }
+
+ return 0;
}
void __init scs_init(void)
{
- scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
+ scs_cleanup);
}
int scs_prepare(struct task_struct *tsk, int node)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 8ad7a293255a..952dc1c90229 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -38,7 +38,7 @@
#include <linux/filter.h>
#include <linux/pid.h>
#include <linux/ptrace.h>
-#include <linux/security.h>
+#include <linux/capability.h>
#include <linux/tracehook.h>
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
@@ -143,6 +143,38 @@ struct notification {
struct list_head notifications;
};
+#ifdef SECCOMP_ARCH_NATIVE
+/**
+ * struct action_cache - per-filter cache of seccomp actions per
+ * arch/syscall pair
+ *
+ * @allow_native: A bitmap where each bit represents whether the
+ * filter will always allow the syscall, for the
+ * native architecture.
+ * @allow_compat: A bitmap where each bit represents whether the
+ * filter will always allow the syscall, for the
+ * compat architecture.
+ */
+struct action_cache {
+ DECLARE_BITMAP(allow_native, SECCOMP_ARCH_NATIVE_NR);
+#ifdef SECCOMP_ARCH_COMPAT
+ DECLARE_BITMAP(allow_compat, SECCOMP_ARCH_COMPAT_NR);
+#endif
+};
+#else
+struct action_cache { };
+
+static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter,
+ const struct seccomp_data *sd)
+{
+ return false;
+}
+
+static inline void seccomp_cache_prepare(struct seccomp_filter *sfilter)
+{
+}
+#endif /* SECCOMP_ARCH_NATIVE */
+
/**
* struct seccomp_filter - container for seccomp BPF programs
*
@@ -159,6 +191,7 @@ struct notification {
* this filter after reaching 0. The @users count is always smaller
* or equal to @refs. Hence, reaching 0 for @users does not mean
* the filter can be freed.
+ * @cache: cache of arch/syscall mappings to actions
* @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
* @prev: points to a previously installed, or inherited, filter
* @prog: the BPF program to evaluate
@@ -180,6 +213,7 @@ struct seccomp_filter {
refcount_t refs;
refcount_t users;
bool log;
+ struct action_cache cache;
struct seccomp_filter *prev;
struct bpf_prog *prog;
struct notification *notif;
@@ -298,6 +332,52 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
return 0;
}
+#ifdef SECCOMP_ARCH_NATIVE
+static inline bool seccomp_cache_check_allow_bitmap(const void *bitmap,
+ size_t bitmap_size,
+ int syscall_nr)
+{
+ if (unlikely(syscall_nr < 0 || syscall_nr >= bitmap_size))
+ return false;
+ syscall_nr = array_index_nospec(syscall_nr, bitmap_size);
+
+ return test_bit(syscall_nr, bitmap);
+}
+
+/**
+ * seccomp_cache_check_allow - lookup seccomp cache
+ * @sfilter: The seccomp filter
+ * @sd: The seccomp data to lookup the cache with
+ *
+ * Returns true if the seccomp_data is cached and allowed.
+ */
+static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter,
+ const struct seccomp_data *sd)
+{
+ int syscall_nr = sd->nr;
+ const struct action_cache *cache = &sfilter->cache;
+
+#ifndef SECCOMP_ARCH_COMPAT
+ /* A native-only architecture doesn't need to check sd->arch. */
+ return seccomp_cache_check_allow_bitmap(cache->allow_native,
+ SECCOMP_ARCH_NATIVE_NR,
+ syscall_nr);
+#else
+ if (likely(sd->arch == SECCOMP_ARCH_NATIVE))
+ return seccomp_cache_check_allow_bitmap(cache->allow_native,
+ SECCOMP_ARCH_NATIVE_NR,
+ syscall_nr);
+ if (likely(sd->arch == SECCOMP_ARCH_COMPAT))
+ return seccomp_cache_check_allow_bitmap(cache->allow_compat,
+ SECCOMP_ARCH_COMPAT_NR,
+ syscall_nr);
+#endif /* SECCOMP_ARCH_COMPAT */
+
+ WARN_ON_ONCE(true);
+ return false;
+}
+#endif /* SECCOMP_ARCH_NATIVE */
+
/**
* seccomp_run_filters - evaluates all seccomp filters against @sd
* @sd: optional seccomp data to be passed to filters
@@ -320,6 +400,9 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
if (WARN_ON(f == NULL))
return SECCOMP_RET_KILL_PROCESS;
+ if (seccomp_cache_check_allow(f, sd))
+ return SECCOMP_RET_ALLOW;
+
/*
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
@@ -356,14 +439,14 @@ static inline void seccomp_assign_mode(struct task_struct *task,
task->seccomp.mode = seccomp_mode;
/*
- * Make sure TIF_SECCOMP cannot be set before the mode (and
+ * Make sure SYSCALL_WORK_SECCOMP cannot be set before the mode (and
* filter) is set.
*/
smp_mb__before_atomic();
/* Assume default seccomp processes want spec flaw mitigation. */
if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
arch_seccomp_spec_mitigate(task);
- set_tsk_thread_flag(task, TIF_SECCOMP);
+ set_task_syscall_work(task, SECCOMP);
}
#ifdef CONFIG_SECCOMP_FILTER
@@ -470,6 +553,9 @@ void seccomp_filter_release(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter;
+ /* We are effectively holding the siglock by not having any sighand. */
+ WARN_ON(tsk->sighand != NULL);
+
/* Detach task from its filter tree. */
tsk->seccomp.filter = NULL;
__seccomp_filter_release(orig);
@@ -544,7 +630,12 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
{
struct seccomp_filter *sfilter;
int ret;
- const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
+ const bool save_orig =
+#if defined(CONFIG_CHECKPOINT_RESTORE) || defined(SECCOMP_ARCH_NATIVE)
+ true;
+#else
+ false;
+#endif
if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
return ERR_PTR(-EINVAL);
@@ -558,8 +649,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
* behavior of privileged children.
*/
if (!task_no_new_privs(current) &&
- security_capable(current_cred(), current_user_ns(),
- CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0)
+ !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/* Allocate a new seccomp_filter */
@@ -610,6 +700,148 @@ out:
return filter;
}
+#ifdef SECCOMP_ARCH_NATIVE
+/**
+ * seccomp_is_const_allow - check if filter is constant allow with given data
+ * @fprog: The BPF programs
+ * @sd: The seccomp data to check against, only syscall number and arch
+ * number are considered constant.
+ */
+static bool seccomp_is_const_allow(struct sock_fprog_kern *fprog,
+ struct seccomp_data *sd)
+{
+ unsigned int reg_value = 0;
+ unsigned int pc;
+ bool op_res;
+
+ if (WARN_ON_ONCE(!fprog))
+ return false;
+
+ for (pc = 0; pc < fprog->len; pc++) {
+ struct sock_filter *insn = &fprog->filter[pc];
+ u16 code = insn->code;
+ u32 k = insn->k;
+
+ switch (code) {
+ case BPF_LD | BPF_W | BPF_ABS:
+ switch (k) {
+ case offsetof(struct seccomp_data, nr):
+ reg_value = sd->nr;
+ break;
+ case offsetof(struct seccomp_data, arch):
+ reg_value = sd->arch;
+ break;
+ default:
+ /* can't optimize (non-constant value load) */
+ return false;
+ }
+ break;
+ case BPF_RET | BPF_K:
+ /* reached return with constant values only, check allow */
+ return k == SECCOMP_RET_ALLOW;
+ case BPF_JMP | BPF_JA:
+ pc += insn->k;
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ switch (BPF_OP(code)) {
+ case BPF_JEQ:
+ op_res = reg_value == k;
+ break;
+ case BPF_JGE:
+ op_res = reg_value >= k;
+ break;
+ case BPF_JGT:
+ op_res = reg_value > k;
+ break;
+ case BPF_JSET:
+ op_res = !!(reg_value & k);
+ break;
+ default:
+ /* can't optimize (unknown jump) */
+ return false;
+ }
+
+ pc += op_res ? insn->jt : insn->jf;
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ reg_value &= k;
+ break;
+ default:
+ /* can't optimize (unknown insn) */
+ return false;
+ }
+ }
+
+ /* ran off the end of the filter?! */
+ WARN_ON(1);
+ return false;
+}
+
+static void seccomp_cache_prepare_bitmap(struct seccomp_filter *sfilter,
+ void *bitmap, const void *bitmap_prev,
+ size_t bitmap_size, int arch)
+{
+ struct sock_fprog_kern *fprog = sfilter->prog->orig_prog;
+ struct seccomp_data sd;
+ int nr;
+
+ if (bitmap_prev) {
+ /* The new filter must be as restrictive as the last. */
+ bitmap_copy(bitmap, bitmap_prev, bitmap_size);
+ } else {
+ /* Before any filters, all syscalls are always allowed. */
+ bitmap_fill(bitmap, bitmap_size);
+ }
+
+ for (nr = 0; nr < bitmap_size; nr++) {
+ /* No bitmap change: not a cacheable action. */
+ if (!test_bit(nr, bitmap))
+ continue;
+
+ sd.nr = nr;
+ sd.arch = arch;
+
+ /* No bitmap change: continue to always allow. */
+ if (seccomp_is_const_allow(fprog, &sd))
+ continue;
+
+ /*
+ * Not a cacheable action: always run filters.
+ * atomic clear_bit() not needed, filter not visible yet.
+ */
+ __clear_bit(nr, bitmap);
+ }
+}
+
+/**
+ * seccomp_cache_prepare - emulate the filter to find cachable syscalls
+ * @sfilter: The seccomp filter
+ *
+ * Returns 0 if successful or -errno if error occurred.
+ */
+static void seccomp_cache_prepare(struct seccomp_filter *sfilter)
+{
+ struct action_cache *cache = &sfilter->cache;
+ const struct action_cache *cache_prev =
+ sfilter->prev ? &sfilter->prev->cache : NULL;
+
+ seccomp_cache_prepare_bitmap(sfilter, cache->allow_native,
+ cache_prev ? cache_prev->allow_native : NULL,
+ SECCOMP_ARCH_NATIVE_NR,
+ SECCOMP_ARCH_NATIVE);
+
+#ifdef SECCOMP_ARCH_COMPAT
+ seccomp_cache_prepare_bitmap(sfilter, cache->allow_compat,
+ cache_prev ? cache_prev->allow_compat : NULL,
+ SECCOMP_ARCH_COMPAT_NR,
+ SECCOMP_ARCH_COMPAT);
+#endif /* SECCOMP_ARCH_COMPAT */
+}
+#endif /* SECCOMP_ARCH_NATIVE */
+
/**
* seccomp_attach_filter: validate and attach filter
* @flags: flags to change filter behavior
@@ -659,6 +891,7 @@ static long seccomp_attach_filter(unsigned int flags,
* task reference.
*/
filter->prev = current->seccomp.filter;
+ seccomp_cache_prepare(filter);
current->seccomp.filter = filter;
atomic_inc(&current->seccomp.filter_count);
@@ -929,7 +1162,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
/*
* Make sure that any changes to mode from another thread have
- * been seen after TIF_SECCOMP was seen.
+ * been seen after SYSCALL_WORK_SECCOMP was seen.
*/
rmb();
@@ -1968,7 +2201,7 @@ static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
return true;
}
-static int read_actions_logged(struct ctl_table *ro_table, void __user *buffer,
+static int read_actions_logged(struct ctl_table *ro_table, void *buffer,
size_t *lenp, loff_t *ppos)
{
char names[sizeof(seccomp_actions_avail)];
@@ -1986,7 +2219,7 @@ static int read_actions_logged(struct ctl_table *ro_table, void __user *buffer,
return proc_dostring(&table, 0, buffer, lenp, ppos);
}
-static int write_actions_logged(struct ctl_table *ro_table, void __user *buffer,
+static int write_actions_logged(struct ctl_table *ro_table, void *buffer,
size_t *lenp, loff_t *ppos, u32 *actions_logged)
{
char names[sizeof(seccomp_actions_avail)];
@@ -2104,3 +2337,59 @@ static int __init seccomp_sysctl_init(void)
device_initcall(seccomp_sysctl_init)
#endif /* CONFIG_SYSCTL */
+
+#ifdef CONFIG_SECCOMP_CACHE_DEBUG
+/* Currently CONFIG_SECCOMP_CACHE_DEBUG implies SECCOMP_ARCH_NATIVE */
+static void proc_pid_seccomp_cache_arch(struct seq_file *m, const char *name,
+ const void *bitmap, size_t bitmap_size)
+{
+ int nr;
+
+ for (nr = 0; nr < bitmap_size; nr++) {
+ bool cached = test_bit(nr, bitmap);
+ char *status = cached ? "ALLOW" : "FILTER";
+
+ seq_printf(m, "%s %d %s\n", name, nr, status);
+ }
+}
+
+int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+{
+ struct seccomp_filter *f;
+ unsigned long flags;
+
+ /*
+ * We don't want some sandboxed process to know what their seccomp
+ * filters consist of.
+ */
+ if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!lock_task_sighand(task, &flags))
+ return -ESRCH;
+
+ f = READ_ONCE(task->seccomp.filter);
+ if (!f) {
+ unlock_task_sighand(task, &flags);
+ return 0;
+ }
+
+ /* prevent filter from being freed while we are printing it */
+ __get_seccomp_filter(f);
+ unlock_task_sighand(task, &flags);
+
+ proc_pid_seccomp_cache_arch(m, SECCOMP_ARCH_NATIVE_NAME,
+ f->cache.allow_native,
+ SECCOMP_ARCH_NATIVE_NR);
+
+#ifdef SECCOMP_ARCH_COMPAT
+ proc_pid_seccomp_cache_arch(m, SECCOMP_ARCH_COMPAT_NAME,
+ f->cache.allow_compat,
+ SECCOMP_ARCH_COMPAT_NR);
+#endif /* SECCOMP_ARCH_COMPAT */
+
+ __put_seccomp_filter(f);
+ return 0;
+}
+#endif /* CONFIG_SECCOMP_CACHE_DEBUG */
diff --git a/kernel/signal.c b/kernel/signal.c
index a38b3edc6851..5ad8566534e7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -391,16 +391,17 @@ static bool task_participate_group_stop(struct task_struct *task)
void task_join_group_stop(struct task_struct *task)
{
+ unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
+ struct signal_struct *sig = current->signal;
+
+ if (sig->group_stop_count) {
+ sig->group_stop_count++;
+ mask |= JOBCTL_STOP_CONSUME;
+ } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
+ return;
+
/* Have the new thread join an on-going signal group stop */
- unsigned long jobctl = current->jobctl;
- if (jobctl & JOBCTL_STOP_PENDING) {
- struct signal_struct *sig = current->signal;
- unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
- unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
- if (task_set_jobctl_pending(task, signr | gstop)) {
- sig->group_stop_count++;
- }
- }
+ task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}
/*
@@ -983,7 +984,7 @@ static inline bool wants_signal(int sig, struct task_struct *p)
if (task_is_stopped_or_traced(p))
return false;
- return task_curr(p) || !signal_pending(p);
+ return task_curr(p) || !task_sigpending(p);
}
static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
@@ -2523,12 +2524,47 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info)
return signr;
}
+static void hide_si_addr_tag_bits(struct ksignal *ksig)
+{
+ switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
+ case SIL_FAULT:
+ case SIL_FAULT_MCEERR:
+ case SIL_FAULT_BNDERR:
+ case SIL_FAULT_PKUERR:
+ ksig->info.si_addr = arch_untagged_si_addr(
+ ksig->info.si_addr, ksig->sig, ksig->info.si_code);
+ break;
+ case SIL_KILL:
+ case SIL_TIMER:
+ case SIL_POLL:
+ case SIL_CHLD:
+ case SIL_RT:
+ case SIL_SYS:
+ break;
+ }
+}
+
bool get_signal(struct ksignal *ksig)
{
struct sighand_struct *sighand = current->sighand;
struct signal_struct *signal = current->signal;
int signr;
+ if (unlikely(current->task_works))
+ task_work_run();
+
+ /*
+ * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
+ * that the arch handlers don't all have to do it. If we get here
+ * without TIF_SIGPENDING, just exit after running signal work.
+ */
+ if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
+ if (test_thread_flag(TIF_NOTIFY_SIGNAL))
+ tracehook_notify_signal();
+ if (!task_sigpending(current))
+ return false;
+ }
+
if (unlikely(uprobe_deny_signal()))
return false;
@@ -2541,26 +2577,6 @@ bool get_signal(struct ksignal *ksig)
relock:
spin_lock_irq(&sighand->siglock);
- /*
- * Make sure we can safely read ->jobctl() in task_work add. As Oleg
- * states:
- *
- * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
- * roughly have
- *
- * task_work_add: get_signal:
- * STORE(task->task_works, new_work); STORE(task->jobctl);
- * mb(); mb();
- * LOAD(task->jobctl); LOAD(task->task_works);
- *
- * and we can rely on STORE-MB-LOAD [ in task_work_add].
- */
- smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
- if (unlikely(current->task_works)) {
- spin_unlock_irq(&sighand->siglock);
- task_work_run();
- goto relock;
- }
/*
* Every stopped thread goes here after wakeup. Check to see if
@@ -2760,6 +2776,10 @@ relock:
spin_unlock_irq(&sighand->siglock);
ksig->sig = signr;
+
+ if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
+ hide_si_addr_tag_bits(ksig);
+
return ksig->sig > 0;
}
@@ -2822,7 +2842,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
/* Remove the signals this thread can handle. */
sigandsets(&retarget, &retarget, &t->blocked);
- if (!signal_pending(t))
+ if (!task_sigpending(t))
signal_wake_up(t, 0);
if (sigisemptyset(&retarget))
@@ -2856,7 +2876,7 @@ void exit_signals(struct task_struct *tsk)
cgroup_threadgroup_change_end(tsk);
- if (!signal_pending(tsk))
+ if (!task_sigpending(tsk))
goto out;
unblocked = tsk->blocked;
@@ -2900,7 +2920,7 @@ long do_no_restart_syscall(struct restart_block *param)
static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
{
- if (signal_pending(tsk) && !thread_group_empty(tsk)) {
+ if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
sigset_t newblocked;
/* A set of now blocked but previously unblocked signals. */
sigandnsets(&newblocked, newset, &current->blocked);
@@ -3684,7 +3704,8 @@ static bool access_pidfd_pidns(struct pid *pid)
return true;
}
-static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
+ siginfo_t __user *info)
{
#ifdef CONFIG_COMPAT
/*
@@ -3984,6 +4005,22 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
if (oact)
*oact = *k;
+ /*
+ * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
+ * e.g. by having an architecture use the bit in their uapi.
+ */
+ BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
+
+ /*
+ * Clear unknown flag bits in order to allow userspace to detect missing
+ * support for flag bits and to allow the kernel to use non-uapi bits
+ * internally.
+ */
+ if (act)
+ act->sa.sa_flags &= UAPI_SA_FLAGS;
+ if (oact)
+ oact->sa.sa_flags &= UAPI_SA_FLAGS;
+
sigaction_compat_abi(act, oact);
if (act) {
diff --git a/kernel/smp.c b/kernel/smp.c
index 4d17501433be..aeb0adfa0606 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
@@ -27,7 +28,7 @@
#include "smpboot.h"
#include "sched/smp.h"
-#define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
+#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
struct call_function_data {
call_single_data_t __percpu *csd;
@@ -130,7 +131,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
csd_type = CSD_TYPE(csd);
if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
- return csd->dst; /* Other CSD_TYPE_ values might not have ->dst. */
+ return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
return -1;
}
@@ -146,7 +147,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
bool firsttime;
u64 ts2, ts_delta;
call_single_data_t *cpu_cur_csd;
- unsigned int flags = READ_ONCE(csd->flags);
+ unsigned int flags = READ_ONCE(csd->node.u_flags);
if (!(flags & CSD_FLAG_LOCK)) {
if (!unlikely(*bug_id))
@@ -224,14 +225,14 @@ static void csd_lock_record(call_single_data_t *csd)
static __always_inline void csd_lock_wait(call_single_data_t *csd)
{
- smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
+ smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
}
#endif
static __always_inline void csd_lock(call_single_data_t *csd)
{
csd_lock_wait(csd);
- csd->flags |= CSD_FLAG_LOCK;
+ csd->node.u_flags |= CSD_FLAG_LOCK;
/*
* prevent CPU from reordering the above assignment
@@ -243,12 +244,12 @@ static __always_inline void csd_lock(call_single_data_t *csd)
static __always_inline void csd_unlock(call_single_data_t *csd)
{
- WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
+ WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
*/
- smp_store_release(&csd->flags, 0);
+ smp_store_release(&csd->node.u_flags, 0);
}
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
@@ -300,7 +301,7 @@ static int generic_exec_single(int cpu, call_single_data_t *csd)
return -ENXIO;
}
- __smp_call_single_queue(cpu, &csd->llist);
+ __smp_call_single_queue(cpu, &csd->node.llist);
return 0;
}
@@ -353,7 +354,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
* We don't have to use the _safe() variant here
* because we are not invoking the IPI handlers yet.
*/
- llist_for_each_entry(csd, entry, llist) {
+ llist_for_each_entry(csd, entry, node.llist) {
switch (CSD_TYPE(csd)) {
case CSD_TYPE_ASYNC:
case CSD_TYPE_SYNC:
@@ -378,16 +379,16 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
* First; run all SYNC callbacks, people are waiting for us.
*/
prev = NULL;
- llist_for_each_entry_safe(csd, csd_next, entry, llist) {
+ llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
/* Do we wait until *after* callback? */
if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
smp_call_func_t func = csd->func;
void *info = csd->info;
if (prev) {
- prev->next = &csd_next->llist;
+ prev->next = &csd_next->node.llist;
} else {
- entry = &csd_next->llist;
+ entry = &csd_next->node.llist;
}
csd_lock_record(csd);
@@ -395,7 +396,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
csd_unlock(csd);
csd_lock_record(NULL);
} else {
- prev = &csd->llist;
+ prev = &csd->node.llist;
}
}
@@ -406,14 +407,14 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
* Second; run all !SYNC callbacks.
*/
prev = NULL;
- llist_for_each_entry_safe(csd, csd_next, entry, llist) {
+ llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
int type = CSD_TYPE(csd);
if (type != CSD_TYPE_TTWU) {
if (prev) {
- prev->next = &csd_next->llist;
+ prev->next = &csd_next->node.llist;
} else {
- entry = &csd_next->llist;
+ entry = &csd_next->node.llist;
}
if (type == CSD_TYPE_ASYNC) {
@@ -429,7 +430,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
}
} else {
- prev = &csd->llist;
+ prev = &csd->node.llist;
}
}
@@ -449,6 +450,9 @@ void flush_smp_call_function_from_idle(void)
local_irq_save(flags);
flush_smp_call_function_queue(true);
+ if (local_softirq_pending())
+ do_softirq();
+
local_irq_restore(flags);
}
@@ -465,7 +469,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
{
call_single_data_t *csd;
call_single_data_t csd_stack = {
- .flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
+ .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
};
int this_cpu;
int err;
@@ -502,8 +506,8 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
csd->func = func;
csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
- csd->src = smp_processor_id();
- csd->dst = cpu;
+ csd->node.src = smp_processor_id();
+ csd->node.dst = cpu;
#endif
err = generic_exec_single(cpu, csd);
@@ -544,12 +548,12 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd)
preempt_disable();
- if (csd->flags & CSD_FLAG_LOCK) {
+ if (csd->node.u_flags & CSD_FLAG_LOCK) {
err = -EBUSY;
goto out;
}
- csd->flags = CSD_FLAG_LOCK;
+ csd->node.u_flags = CSD_FLAG_LOCK;
smp_wmb();
err = generic_exec_single(cpu, csd);
@@ -667,14 +671,14 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
csd_lock(csd);
if (wait)
- csd->flags |= CSD_TYPE_SYNC;
+ csd->node.u_flags |= CSD_TYPE_SYNC;
csd->func = func;
csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
- csd->src = smp_processor_id();
- csd->dst = cpu;
+ csd->node.src = smp_processor_id();
+ csd->node.dst = cpu;
#endif
- if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+ if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu)))
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
}
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 2efe1e206167..f25208e8df83 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
kfree(td);
return PTR_ERR(tsk);
}
+ kthread_set_per_cpu(tsk, cpu);
/*
* Park the thread so that it could start right on the CPU
* when it is available.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 09229ad82209..9d71046ea247 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -92,6 +92,13 @@ static bool ksoftirqd_running(unsigned long pending)
!__kthread_should_park(tsk);
}
+#ifdef CONFIG_TRACE_IRQFLAGS
+DEFINE_PER_CPU(int, hardirqs_enabled);
+DEFINE_PER_CPU(int, hardirq_context);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
+#endif
+
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -102,17 +109,11 @@ static bool ksoftirqd_running(unsigned long pending)
* softirq and whether we just have bh disabled.
*/
+#ifdef CONFIG_TRACE_IRQFLAGS
/*
- * This one is for softirq.c-internal use,
- * where hardirqs are disabled legitimately:
+ * This is for softirq.c-internal use, where hardirqs are disabled
+ * legitimately:
*/
-#ifdef CONFIG_TRACE_IRQFLAGS
-
-DEFINE_PER_CPU(int, hardirqs_enabled);
-DEFINE_PER_CPU(int, hardirq_context);
-EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
-EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
-
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
@@ -185,7 +186,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
* Keep preemption disabled until we are done with
* softirq processing:
*/
- preempt_count_sub(cnt - 1);
+ __preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) {
/*
@@ -203,6 +204,50 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
}
EXPORT_SYMBOL(__local_bh_enable_ip);
+static inline void invoke_softirq(void)
+{
+ if (ksoftirqd_running(local_softirq_pending()))
+ return;
+
+ if (!force_irqthreads) {
+#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
+ /*
+ * We can safely execute softirq on the current stack if
+ * it is the irq stack, because it should be near empty
+ * at this stage.
+ */
+ __do_softirq();
+#else
+ /*
+ * Otherwise, irq_exit() is called on the task stack that can
+ * be potentially deep already. So call softirq in its own stack
+ * to prevent from any overrun.
+ */
+ do_softirq_own_stack();
+#endif
+ } else {
+ wakeup_softirqd();
+ }
+}
+
+asmlinkage __visible void do_softirq(void)
+{
+ __u32 pending;
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ pending = local_softirq_pending();
+
+ if (pending && !ksoftirqd_running(pending))
+ do_softirq_own_stack();
+
+ local_irq_restore(flags);
+}
+
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
* but break the loop if need_resched() is set or after 2 ms.
@@ -270,10 +315,10 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
- account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
in_hardirq = lockdep_softirq_start();
+ account_softirq_enter(current);
restart:
/* Reset the pending bitmask before enabling irqs */
@@ -320,46 +365,24 @@ restart:
wakeup_softirqd();
}
+ account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
- account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC);
}
-asmlinkage __visible void do_softirq(void)
-{
- __u32 pending;
- unsigned long flags;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- pending = local_softirq_pending();
-
- if (pending && !ksoftirqd_running(pending))
- do_softirq_own_stack();
-
- local_irq_restore(flags);
-}
-
/**
* irq_enter_rcu - Enter an interrupt context with RCU watching
*/
void irq_enter_rcu(void)
{
- if (is_idle_task(current) && !in_interrupt()) {
- /*
- * Prevent raise_softirq from needlessly waking up ksoftirqd
- * here, as softirq will be serviced on return from interrupt.
- */
- local_bh_disable();
+ __irq_enter_raw();
+
+ if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
tick_irq_enter();
- _local_bh_enable();
- }
- __irq_enter();
+
+ account_hardirq_enter(current);
}
/**
@@ -371,32 +394,6 @@ void irq_enter(void)
irq_enter_rcu();
}
-static inline void invoke_softirq(void)
-{
- if (ksoftirqd_running(local_softirq_pending()))
- return;
-
- if (!force_irqthreads) {
-#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
- /*
- * We can safely execute softirq on the current stack if
- * it is the irq stack, because it should be near empty
- * at this stage.
- */
- __do_softirq();
-#else
- /*
- * Otherwise, irq_exit() is called on the task stack that can
- * be potentially deep already. So call softirq in its own stack
- * to prevent from any overrun.
- */
- do_softirq_own_stack();
-#endif
- } else {
- wakeup_softirqd();
- }
-}
-
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON
@@ -417,7 +414,7 @@ static inline void __irq_exit_rcu(void)
#else
lockdep_assert_irqs_disabled();
#endif
- account_irq_exit_time(current);
+ account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
diff --git a/kernel/static_call.c b/kernel/static_call.c
index 84565c2a41b8..6906c6ec4c97 100644
--- a/kernel/static_call.c
+++ b/kernel/static_call.c
@@ -12,6 +12,8 @@
extern struct static_call_site __start_static_call_sites[],
__stop_static_call_sites[];
+extern struct static_call_tramp_key __start_static_call_tramp_key[],
+ __stop_static_call_tramp_key[];
static bool static_call_initialized;
@@ -323,10 +325,59 @@ static int __static_call_mod_text_reserved(void *start, void *end)
return ret;
}
+static unsigned long tramp_key_lookup(unsigned long addr)
+{
+ struct static_call_tramp_key *start = __start_static_call_tramp_key;
+ struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
+ struct static_call_tramp_key *tramp_key;
+
+ for (tramp_key = start; tramp_key != stop; tramp_key++) {
+ unsigned long tramp;
+
+ tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
+ if (tramp == addr)
+ return (long)tramp_key->key + (long)&tramp_key->key;
+ }
+
+ return 0;
+}
+
static int static_call_add_module(struct module *mod)
{
- return __static_call_init(mod, mod->static_call_sites,
- mod->static_call_sites + mod->num_static_call_sites);
+ struct static_call_site *start = mod->static_call_sites;
+ struct static_call_site *stop = start + mod->num_static_call_sites;
+ struct static_call_site *site;
+
+ for (site = start; site != stop; site++) {
+ unsigned long addr = (unsigned long)static_call_key(site);
+ unsigned long key;
+
+ /*
+ * Is the key is exported, 'addr' points to the key, which
+ * means modules are allowed to call static_call_update() on
+ * it.
+ *
+ * Otherwise, the key isn't exported, and 'addr' points to the
+ * trampoline so we need to lookup the key.
+ *
+ * We go through this dance to prevent crazy modules from
+ * abusing sensitive static calls.
+ */
+ if (!kernel_text_address(addr))
+ continue;
+
+ key = tramp_key_lookup(addr);
+ if (!key) {
+ pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
+ static_call_addr(site));
+ return -EINVAL;
+ }
+
+ site->key = (key - (long)&site->key) |
+ (site->key & STATIC_CALL_SITE_FLAGS);
+ }
+
+ return __static_call_init(mod, start, stop);
}
static void static_call_del_module(struct module *mod)
@@ -438,6 +489,11 @@ int __init static_call_init(void)
}
early_initcall(static_call_init);
+long __static_call_return0(void)
+{
+ return 0;
+}
+
#ifdef CONFIG_STATIC_CALL_SELFTEST
static int func_a(int x)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 865bb0228ab6..971d8acceaec 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -42,11 +42,27 @@ struct cpu_stopper {
struct list_head works; /* list of pending works */
struct cpu_stop_work stop_work; /* for stop_cpus */
+ unsigned long caller;
+ cpu_stop_fn_t fn;
};
static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
static bool stop_machine_initialized = false;
+void print_stop_info(const char *log_lvl, struct task_struct *task)
+{
+ /*
+ * If @task is a stopper task, it cannot migrate and task_cpu() is
+ * stable.
+ */
+ struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
+
+ if (task != stopper->thread)
+ return;
+
+ printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
+}
+
/* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex);
static bool stop_cpus_in_progress;
@@ -123,7 +139,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
{
struct cpu_stop_done done;
- struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
+ struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
cpu_stop_init_done(&done, 1);
if (!cpu_stop_queue_work(cpu, &work))
@@ -178,7 +194,7 @@ static void ack_state(struct multi_stop_data *msdata)
set_state(msdata, msdata->state + 1);
}
-void __weak stop_machine_yield(const struct cpumask *cpumask)
+notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
{
cpu_relax();
}
@@ -331,7 +347,8 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
work1 = work2 = (struct cpu_stop_work){
.fn = multi_cpu_stop,
.arg = &msdata,
- .done = &done
+ .done = &done,
+ .caller = _RET_IP_,
};
cpu_stop_init_done(&done, 2);
@@ -367,7 +384,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf)
{
- *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
+ *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
return cpu_stop_queue_work(cpu, work_buf);
}
@@ -487,6 +504,8 @@ repeat:
int ret;
/* cpu stop callbacks must not sleep, make in_atomic() == T */
+ stopper->caller = work->caller;
+ stopper->fn = fn;
preempt_count_inc();
ret = fn(arg);
if (done) {
@@ -495,6 +514,8 @@ repeat:
cpu_stop_signal_done(done);
}
preempt_count_dec();
+ stopper->fn = NULL;
+ stopper->caller = 0;
WARN_ONCE(preempt_count(),
"cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
goto repeat;
diff --git a/kernel/sys.c b/kernel/sys.c
index a730c03ee607..6928d23c46ea 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -24,7 +24,6 @@
#include <linux/times.h>
#include <linux/posix-timers.h>
#include <linux/security.h>
-#include <linux/dcookies.h>
#include <linux/suspend.h>
#include <linux/tty.h>
#include <linux/signal.h>
@@ -42,6 +41,7 @@
#include <linux/syscore_ops.h>
#include <linux/version.h>
#include <linux/ctype.h>
+#include <linux/syscall_user_dispatch.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -2530,6 +2530,10 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
break;
+ case PR_SET_SYSCALL_USER_DISPATCH:
+ error = set_syscall_user_dispatch(arg2, arg3, arg4,
+ (char __user *) arg5);
+ break;
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index f27ac94d5fa7..19aa806890d5 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -68,6 +68,8 @@ COND_SYSCALL(epoll_create1);
COND_SYSCALL(epoll_ctl);
COND_SYSCALL(epoll_pwait);
COND_SYSCALL_COMPAT(epoll_pwait);
+COND_SYSCALL(epoll_pwait2);
+COND_SYSCALL_COMPAT(epoll_pwait2);
/* fs/fcntl.c */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index afad085960b8..c9fbdd848138 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2650,6 +2650,17 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
+#if defined(CONFIG_TREE_RCU)
+ {
+ .procname = "max_rcu_stall_to_panic",
+ .data = &sysctl_max_rcu_stall_to_panic,
+ .maxlen = sizeof(sysctl_max_rcu_stall_to_panic),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+#endif
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
{
.procname = "stack_erasing",
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 8d6e1217c451..9cde961875c0 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -33,7 +33,6 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
enum task_work_notify_mode notify)
{
struct callback_head *head;
- unsigned long flags;
do {
head = READ_ONCE(task->task_works);
@@ -49,17 +48,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
set_notify_resume(task);
break;
case TWA_SIGNAL:
- /*
- * Only grab the sighand lock if we don't already have some
- * task_work pending. This pairs with the smp_store_mb()
- * in get_signal(), see comment there.
- */
- if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) &&
- lock_task_sighand(task, &flags)) {
- task->jobctl |= JOBCTL_TASK_WORK;
- signal_wake_up(task, 0);
- unlock_task_sighand(task, &flags);
- }
+ set_notify_signal(task);
break;
default:
WARN_ON_ONCE(1);
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index a2802b6ff4bb..2b4898b4752e 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -346,7 +346,7 @@ static int parse(struct nlattr *na, struct cpumask *mask)
data = kmalloc(len, GFP_KERNEL);
if (!data)
return -ENOMEM;
- nla_strlcpy(data, na, len);
+ nla_strscpy(data, na, len);
ret = cpulist_parse(data, mask);
kfree(data);
return ret;
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index a09b1d61df6a..83e158d016ba 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -26,13 +26,9 @@ config CLOCKSOURCE_VALIDATE_LAST_CYCLE
config GENERIC_TIME_VSYSCALL
bool
-# Old style timekeeping
-config ARCH_USES_GETTIMEOFFSET
- bool
-
# The generic clock events infrastructure
config GENERIC_CLOCKEVENTS
- bool
+ def_bool !LEGACY_TIMER_TICK
# Architecture can handle broadcast in a driver-agnostic way
config ARCH_HAS_TICK_BROADCAST
@@ -61,6 +57,13 @@ config POSIX_CPU_TIMERS_TASK_WORK
bool
default y if POSIX_TIMERS && HAVE_POSIX_CPU_TIMERS_TASK_WORK
+config LEGACY_TIMER_TICK
+ bool
+ help
+ The legacy timer tick helper is used by platforms that
+ lack support for the generic clockevent framework.
+ New platforms should use generic clockevents instead.
+
if GENERIC_CLOCKEVENTS
menu "Timers subsystem"
@@ -72,7 +75,6 @@ config TICK_ONESHOT
config NO_HZ_COMMON
bool
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
select TICK_ONESHOT
choice
@@ -87,7 +89,6 @@ config HZ_PERIODIC
config NO_HZ_IDLE
bool "Idle dynticks system (tickless idle)"
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
select NO_HZ_COMMON
help
This option enables a tickless idle system: timer interrupts
@@ -99,7 +100,6 @@ config NO_HZ_IDLE
config NO_HZ_FULL
bool "Full dynticks system (tickless)"
# NO_HZ_COMMON dependency
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
# We need at least one periodic CPU for timekeeping
depends on SMP
depends on HAVE_CONTEXT_TRACKING
@@ -141,7 +141,7 @@ config CONTEXT_TRACKING_FORCE
dynticks working.
This option stands for testing when an arch implements the
- context tracking backend but doesn't yet fullfill all the
+ context tracking backend but doesn't yet fulfill all the
requirements to make the full dynticks feature working.
Without the full dynticks, there is no way to test the support
for context tracking and the subsystems that rely on it: RCU
@@ -158,7 +158,6 @@ config CONTEXT_TRACKING_FORCE
config NO_HZ
bool "Old Idle dynticks config"
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
help
This is the old config entry that enables dynticks idle.
We keep it around for a little while to enforce backward
@@ -166,7 +165,6 @@ config NO_HZ
config HIGH_RES_TIMERS
bool "High Resolution Timer Support"
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
select TICK_ONESHOT
help
This option enables high resolution timer support. If your
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index c8f00168afe8..1fb1c1ef6a19 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -16,6 +16,7 @@ ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
endif
obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
+obj-$(CONFIG_LEGACY_TIMER_TICK) += tick-legacy.o
obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index f4ace1bf8382..98d7a15e8cf6 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -527,8 +527,11 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
/**
* alarm_handle_timer - Callback for posix timers
* @alarm: alarm that fired
+ * @now: time at the timer expiration
*
* Posix timer callback for expired alarm timers.
+ *
+ * Return: whether the timer is to be restarted
*/
static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
ktime_t now)
@@ -715,8 +718,11 @@ static int alarm_timer_create(struct k_itimer *new_timer)
/**
* alarmtimer_nsleep_wakeup - Wakeup function for alarm_timer_nsleep
* @alarm: ptr to alarm that fired
+ * @now: time at the timer expiration
*
* Wakes up the task that set the alarmtimer
+ *
+ * Return: ALARMTIMER_NORESTART
*/
static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm,
ktime_t now)
@@ -733,6 +739,7 @@ static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm,
* alarmtimer_do_nsleep - Internal alarmtimer nsleep implementation
* @alarm: ptr to alarmtimer
* @absexp: absolute expiration time
+ * @type: alarm type (BOOTTIME/REALTIME).
*
* Sets the alarm timer and sleeps until it is fired or interrupted.
*/
@@ -806,7 +813,6 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
* @which_clock: clockid
* @flags: determins abstime or relative
* @tsreq: requested sleep time (abs or rel)
- * @rmtp: remaining sleep time saved
*
* Handles clock_nanosleep calls against _ALARM clockids
*/
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 02441ead3c3b..cce484a2cc7c 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -705,8 +705,6 @@ static inline void clocksource_update_max_deferment(struct clocksource *cs)
&cs->max_cycles);
}
-#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
-
static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
{
struct clocksource *cs;
@@ -798,12 +796,6 @@ static void clocksource_select_fallback(void)
__clocksource_select(true);
}
-#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
-static inline void clocksource_select(void) { }
-static inline void clocksource_select_fallback(void) { }
-
-#endif
-
/*
* clocksource_done_booting - Called near the end of core bootup
*
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 3624b9b5835d..743c852e10f2 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -425,11 +425,6 @@ static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
debug_object_deactivate(timer, &hrtimer_debug_descr);
}
-static inline void debug_hrtimer_free(struct hrtimer *timer)
-{
- debug_object_free(timer, &hrtimer_debug_descr);
-}
-
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode);
@@ -1289,7 +1284,7 @@ int hrtimer_cancel(struct hrtimer *timer)
EXPORT_SYMBOL_GPL(hrtimer_cancel);
/**
- * hrtimer_get_remaining - get remaining time for the timer
+ * __hrtimer_get_remaining - get remaining time for the timer
* @timer: the timer to read
* @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index ca4e6d57d68b..00629e658ca1 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -172,10 +172,6 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
u64 oval, nval, ointerval, ninterval;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
- /*
- * Use the to_ktime conversion because that clamps the maximum
- * value to KTIME_MAX and avoid multiplication overflows.
- */
nval = timespec64_to_ns(&value->it_value);
ninterval = timespec64_to_ns(&value->it_interval);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index eddcf4970444..a5cffe2a1770 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -59,7 +59,8 @@ static struct clocksource clocksource_jiffies = {
};
__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
-__cacheline_aligned_in_smp seqcount_t jiffies_seq;
+__cacheline_aligned_in_smp seqcount_raw_spinlock_t jiffies_seq =
+ SEQCNT_RAW_SPINLOCK_ZERO(jiffies_seq, &jiffies_lock);
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
diff --git a/kernel/time/namespace.c b/kernel/time/namespace.c
index afc65e6be33e..12eab0d2ae28 100644
--- a/kernel/time/namespace.c
+++ b/kernel/time/namespace.c
@@ -92,7 +92,7 @@ static struct time_namespace *clone_time_ns(struct user_namespace *user_ns,
if (!ns)
goto fail_dec;
- kref_init(&ns->kref);
+ refcount_set(&ns->ns.count, 1);
ns->vvar_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!ns->vvar_page)
@@ -226,11 +226,8 @@ out:
mutex_unlock(&offset_lock);
}
-void free_time_ns(struct kref *kref)
+void free_time_ns(struct time_namespace *ns)
{
- struct time_namespace *ns;
-
- ns = container_of(kref, struct time_namespace, kref);
dec_time_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
ns_free_inum(&ns->ns);
@@ -308,22 +305,20 @@ static int timens_install(struct nsset *nsset, struct ns_common *new)
return 0;
}
-int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk)
+void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk)
{
struct ns_common *nsc = &nsproxy->time_ns_for_children->ns;
struct time_namespace *ns = to_time_ns(nsc);
/* create_new_namespaces() already incremented the ref counter */
if (nsproxy->time_ns == nsproxy->time_ns_for_children)
- return 0;
+ return;
get_time_ns(ns);
put_time_ns(nsproxy->time_ns);
nsproxy->time_ns = ns;
timens_commit(tsk, ns);
-
- return 0;
}
static struct user_namespace *timens_owner(struct ns_common *ns)
@@ -464,15 +459,9 @@ const struct proc_ns_operations timens_for_children_operations = {
};
struct time_namespace init_time_ns = {
- .kref = KREF_INIT(3),
+ .ns.count = REFCOUNT_INIT(3),
.user_ns = &init_user_ns,
.ns.inum = PROC_TIME_INIT_INO,
.ns.ops = &timens_operations,
.frozen_offsets = true,
};
-
-static int __init time_ns_init(void)
-{
- return 0;
-}
-subsys_initcall(time_ns_init);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 069ca78fb0bf..5247afd7f345 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -494,65 +494,74 @@ out:
return leap;
}
+#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
static void sync_hw_clock(struct work_struct *work);
-static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock);
-
-static void sched_sync_hw_clock(struct timespec64 now,
- unsigned long target_nsec, bool fail)
+static DECLARE_WORK(sync_work, sync_hw_clock);
+static struct hrtimer sync_hrtimer;
+#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
+static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
{
- struct timespec64 next;
-
- ktime_get_real_ts64(&next);
- if (!fail)
- next.tv_sec = 659;
- else {
- /*
- * Try again as soon as possible. Delaying long periods
- * decreases the accuracy of the work queue timer. Due to this
- * the algorithm is very likely to require a short-sleep retry
- * after the above long sleep to synchronize ts_nsec.
- */
- next.tv_sec = 0;
- }
-
- /* Compute the needed delay that will get to tv_nsec == target_nsec */
- next.tv_nsec = target_nsec - next.tv_nsec;
- if (next.tv_nsec <= 0)
- next.tv_nsec += NSEC_PER_SEC;
- if (next.tv_nsec >= NSEC_PER_SEC) {
- next.tv_sec++;
- next.tv_nsec -= NSEC_PER_SEC;
- }
+ queue_work(system_freezable_power_efficient_wq, &sync_work);
- queue_delayed_work(system_power_efficient_wq, &sync_work,
- timespec64_to_jiffies(&next));
+ return HRTIMER_NORESTART;
}
-static void sync_rtc_clock(void)
+static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
{
- unsigned long target_nsec;
- struct timespec64 adjust, now;
- int rc;
+ ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
- if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
- return;
+ if (retry)
+ exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
+ else
+ exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
- ktime_get_real_ts64(&now);
+ hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS);
+}
- adjust = now;
- if (persistent_clock_is_local)
- adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
+/*
+ * Check whether @now is correct versus the required time to update the RTC
+ * and calculate the value which needs to be written to the RTC so that the
+ * next seconds increment of the RTC after the write is aligned with the next
+ * seconds increment of clock REALTIME.
+ *
+ * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
+ *
+ * t2.tv_nsec == 0
+ * tsched = t2 - set_offset_nsec
+ * newval = t2 - NSEC_PER_SEC
+ *
+ * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC
+ *
+ * As the execution of this code is not guaranteed to happen exactly at
+ * tsched this allows it to happen within a fuzzy region:
+ *
+ * abs(now - tsched) < FUZZ
+ *
+ * If @now is not inside the allowed window the function returns false.
+ */
+static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
+ struct timespec64 *to_set,
+ const struct timespec64 *now)
+{
+ /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
+ const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
+ struct timespec64 delay = {.tv_sec = -1,
+ .tv_nsec = set_offset_nsec};
- /*
- * The current RTC in use will provide the target_nsec it wants to be
- * called at, and does rtc_tv_nsec_ok internally.
- */
- rc = rtc_set_ntp_time(adjust, &target_nsec);
- if (rc == -ENODEV)
- return;
+ *to_set = timespec64_add(*now, delay);
+
+ if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
+ to_set->tv_nsec = 0;
+ return true;
+ }
- sched_sync_hw_clock(now, target_nsec, rc);
+ if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
+ to_set->tv_sec++;
+ to_set->tv_nsec = 0;
+ return true;
+ }
+ return false;
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -560,48 +569,47 @@ int __weak update_persistent_clock64(struct timespec64 now64)
{
return -ENODEV;
}
+#else
+static inline int update_persistent_clock64(struct timespec64 now64)
+{
+ return -ENODEV;
+}
#endif
-static bool sync_cmos_clock(void)
+#ifdef CONFIG_RTC_SYSTOHC
+/* Save NTP synchronized time to the RTC */
+static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
{
- static bool no_cmos;
- struct timespec64 now;
- struct timespec64 adjust;
- int rc = -EPROTO;
- long target_nsec = NSEC_PER_SEC / 2;
+ struct rtc_device *rtc;
+ struct rtc_time tm;
+ int err = -ENODEV;
- if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE))
- return false;
+ rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
+ if (!rtc)
+ return -ENODEV;
- if (no_cmos)
- return false;
+ if (!rtc->ops || !rtc->ops->set_time)
+ goto out_close;
- /*
- * Historically update_persistent_clock64() has followed x86
- * semantics, which match the MC146818A/etc RTC. This RTC will store
- * 'adjust' and then in .5s it will advance once second.
- *
- * Architectures are strongly encouraged to use rtclib and not
- * implement this legacy API.
- */
- ktime_get_real_ts64(&now);
- if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
- if (persistent_clock_is_local)
- adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
- rc = update_persistent_clock64(adjust);
- /*
- * The machine does not support update_persistent_clock64 even
- * though it defines CONFIG_GENERIC_CMOS_UPDATE.
- */
- if (rc == -ENODEV) {
- no_cmos = true;
- return false;
- }
+ /* First call might not have the correct offset */
+ if (*offset_nsec == rtc->set_offset_nsec) {
+ rtc_time64_to_tm(to_set->tv_sec, &tm);
+ err = rtc_set_time(rtc, &tm);
+ } else {
+ /* Store the update offset and let the caller try again */
+ *offset_nsec = rtc->set_offset_nsec;
+ err = -EAGAIN;
}
-
- sched_sync_hw_clock(now, target_nsec, rc);
- return true;
+out_close:
+ rtc_class_close(rtc);
+ return err;
+}
+#else
+static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
+{
+ return -ENODEV;
}
+#endif
/*
* If we have an externally synchronized Linux clock, then update RTC clock
@@ -613,24 +621,64 @@ static bool sync_cmos_clock(void)
*/
static void sync_hw_clock(struct work_struct *work)
{
- if (!ntp_synced())
- return;
+ /*
+ * The default synchronization offset is 500ms for the deprecated
+ * update_persistent_clock64() under the assumption that it uses
+ * the infamous CMOS clock (MC146818).
+ */
+ static unsigned long offset_nsec = NSEC_PER_SEC / 2;
+ struct timespec64 now, to_set;
+ int res = -EAGAIN;
- if (sync_cmos_clock())
+ /*
+ * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer()
+ * managed to schedule the work between the timer firing and the
+ * work being able to rearm the timer. Wait for the timer to expire.
+ */
+ if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer))
return;
- sync_rtc_clock();
+ ktime_get_real_ts64(&now);
+ /* If @now is not in the allowed window, try again */
+ if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now))
+ goto rearm;
+
+ /* Take timezone adjusted RTCs into account */
+ if (persistent_clock_is_local)
+ to_set.tv_sec -= (sys_tz.tz_minuteswest * 60);
+
+ /* Try the legacy RTC first. */
+ res = update_persistent_clock64(to_set);
+ if (res != -ENODEV)
+ goto rearm;
+
+ /* Try the RTC class */
+ res = update_rtc(&to_set, &offset_nsec);
+ if (res == -ENODEV)
+ return;
+rearm:
+ sched_sync_hw_clock(offset_nsec, res != 0);
}
void ntp_notify_cmos_timer(void)
{
- if (!ntp_synced())
- return;
+ /*
+ * When the work is currently executed but has not yet the timer
+ * rearmed this queues the work immediately again. No big issue,
+ * just a pointless work scheduled.
+ */
+ if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer))
+ queue_work(system_freezable_power_efficient_wq, &sync_work);
+}
- if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) ||
- IS_ENABLED(CONFIG_RTC_SYSTOHC))
- queue_delayed_work(system_power_efficient_wq, &sync_work, 0);
+static void __init ntp_init_cmos_sync(void)
+{
+ hrtimer_init(&sync_hrtimer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ sync_hrtimer.function = sync_timer_callback;
}
+#else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
+static inline void __init ntp_init_cmos_sync(void) { }
+#endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
/*
* Propagate a new txc->status value into the NTP state:
@@ -1044,4 +1092,5 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
void __init ntp_init(void)
{
ntp_clear();
+ ntp_init_cmos_sync();
}
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 908ecaa65fc3..23d1b74c3065 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -12,4 +12,11 @@ extern int __do_adjtimex(struct __kernel_timex *txc,
const struct timespec64 *ts,
s32 *time_tai, struct audit_ntp_data *ad);
extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
+
+#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
+extern void ntp_notify_cmos_timer(void);
+#else
+static inline void ntp_notify_cmos_timer(void) { }
+#endif
+
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0642013dace4..b1b9b12899f5 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -68,13 +68,13 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
return (cyc * mult) >> shift;
}
-struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
+notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
{
*seq = raw_read_seqcount_latch(&cd.seq);
return cd.read_data + (*seq & 1);
}
-int sched_clock_read_retry(unsigned int seq)
+notrace int sched_clock_read_retry(unsigned int seq)
{
return read_seqcount_latch_retry(&cd.seq, seq);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 36d7464c8962..5a23829372c7 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -331,7 +331,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
bc_local = tick_do_periodic_broadcast();
if (clockevent_state_oneshot(dev)) {
- ktime_t next = ktime_add(dev->next_event, tick_period);
+ ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC);
clockevents_program_event(dev, next, true);
}
@@ -877,6 +877,22 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
}
}
+static inline ktime_t tick_get_next_period(void)
+{
+ ktime_t next;
+
+ /*
+ * Protect against concurrent updates (store /load tearing on
+ * 32bit). It does not matter if the time is already in the
+ * past. The broadcast device which is about to be programmed will
+ * fire in any case.
+ */
+ raw_spin_lock(&jiffies_lock);
+ next = tick_next_period;
+ raw_spin_unlock(&jiffies_lock);
+ return next;
+}
+
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
*/
@@ -905,10 +921,11 @@ static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
tick_broadcast_oneshot_mask, tmpmask);
if (was_periodic && !cpumask_empty(tmpmask)) {
+ ktime_t nextevt = tick_get_next_period();
+
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
- tick_broadcast_init_next_event(tmpmask,
- tick_next_period);
- tick_broadcast_set_event(bc, cpu, tick_next_period);
+ tick_broadcast_init_next_event(tmpmask, nextevt);
+ tick_broadcast_set_event(bc, cpu, nextevt);
} else
bc->next_event = KTIME_MAX;
} else {
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 6c9c342dd0e5..9d3a22510bab 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -27,10 +27,11 @@
*/
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
/*
- * Tick next event: keeps track of the tick time
+ * Tick next event: keeps track of the tick time. It's updated by the
+ * CPU which handles the tick and protected by jiffies_lock. There is
+ * no requirement to write hold the jiffies seqcount for it.
*/
ktime_t tick_next_period;
-ktime_t tick_period;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
@@ -88,7 +89,7 @@ static void tick_periodic(int cpu)
write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
- tick_next_period = ktime_add(tick_next_period, tick_period);
+ tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
do_timer(1);
write_seqcount_end(&jiffies_seq);
@@ -127,7 +128,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
* Setup the next period for devices, which do not have
* periodic mode:
*/
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
if (!clockevents_program_event(dev, next, false))
return;
@@ -173,7 +174,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
}
}
}
@@ -220,7 +221,6 @@ static void tick_setup_device(struct tick_device *td,
tick_do_timer_cpu = cpu;
tick_next_period = ktime_get();
- tick_period = NSEC_PER_SEC / HZ;
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set
@@ -407,17 +407,13 @@ EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
/*
* Transfer the do_timer job away from a dying cpu.
*
- * Called with interrupts disabled. Not locking required. If
+ * Called with interrupts disabled. No locking required. If
* tick_do_timer_cpu is owned by this cpu, nothing can change it.
*/
void tick_handover_do_timer(void)
{
- if (tick_do_timer_cpu == smp_processor_id()) {
- int cpu = cpumask_first(cpu_online_mask);
-
- tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
- TICK_DO_TIMER_NONE;
- }
+ if (tick_do_timer_cpu == smp_processor_id())
+ tick_do_timer_cpu = cpumask_first(cpu_online_mask);
}
/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 7b2496136729..7a981c9e87a4 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -15,7 +15,6 @@
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern ktime_t tick_next_period;
-extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
diff --git a/kernel/time/tick-legacy.c b/kernel/time/tick-legacy.c
new file mode 100644
index 000000000000..af225b32f5b3
--- /dev/null
+++ b/kernel/time/tick-legacy.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer tick function for architectures that lack generic clockevents,
+ * consolidated here from m68k/ia64/parisc/arm.
+ */
+
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/timekeeper_internal.h>
+
+#include "tick-internal.h"
+
+/**
+ * legacy_timer_tick() - advances the timekeeping infrastructure
+ * @ticks: number of ticks, that have elapsed since the last call.
+ *
+ * This is used by platforms that have not been converted to
+ * generic clockevents.
+ *
+ * If 'ticks' is zero, the CPU is not handling timekeeping, so
+ * only perform process accounting and profiling.
+ *
+ * Must be called with interrupts disabled.
+ */
+void legacy_timer_tick(unsigned long ticks)
+{
+ if (ticks) {
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
+ do_timer(ticks);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
+ update_wall_time();
+ }
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
+}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 81632cd5e3b7..e10a4af88737 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
#include <linux/sched/clock.h>
#include <linux/sched/stat.h>
#include <linux/sched/nohz.h>
+#include <linux/sched/loadavg.h>
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
@@ -44,7 +45,9 @@ struct tick_sched *tick_get_tick_sched(int cpu)
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
/*
- * The time, when the last jiffy update happened. Protected by jiffies_lock.
+ * The time, when the last jiffy update happened. Write access must hold
+ * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
+ * consistent view of jiffies and last_jiffies_update.
*/
static ktime_t last_jiffies_update;
@@ -53,50 +56,97 @@ static ktime_t last_jiffies_update;
*/
static void tick_do_update_jiffies64(ktime_t now)
{
- unsigned long ticks = 0;
- ktime_t delta;
+ unsigned long ticks = 1;
+ ktime_t delta, nextp;
/*
- * Do a quick check without holding jiffies_lock:
- * The READ_ONCE() pairs with two updates done later in this function.
+ * 64bit can do a quick check without holding jiffies lock and
+ * without looking at the sequence count. The smp_load_acquire()
+ * pairs with the update done later in this function.
+ *
+ * 32bit cannot do that because the store of tick_next_period
+ * consists of two 32bit stores and the first store could move it
+ * to a random point in the future.
*/
- delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
- if (delta < tick_period)
- return;
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ if (ktime_before(now, smp_load_acquire(&tick_next_period)))
+ return;
+ } else {
+ unsigned int seq;
+
+ /*
+ * Avoid contention on jiffies_lock and protect the quick
+ * check with the sequence count.
+ */
+ do {
+ seq = read_seqcount_begin(&jiffies_seq);
+ nextp = tick_next_period;
+ } while (read_seqcount_retry(&jiffies_seq, seq));
+
+ if (ktime_before(now, nextp))
+ return;
+ }
- /* Reevaluate with jiffies_lock held */
+ /* Quick check failed, i.e. update is required. */
raw_spin_lock(&jiffies_lock);
+ /*
+ * Reevaluate with the lock held. Another CPU might have done the
+ * update already.
+ */
+ if (ktime_before(now, tick_next_period)) {
+ raw_spin_unlock(&jiffies_lock);
+ return;
+ }
+
write_seqcount_begin(&jiffies_seq);
- delta = ktime_sub(now, last_jiffies_update);
- if (delta >= tick_period) {
+ delta = ktime_sub(now, tick_next_period);
+ if (unlikely(delta >= TICK_NSEC)) {
+ /* Slow path for long idle sleep times */
+ s64 incr = TICK_NSEC;
- delta = ktime_sub(delta, tick_period);
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add(last_jiffies_update, tick_period));
+ ticks += ktime_divns(delta, incr);
- /* Slow path for long timeouts */
- if (unlikely(delta >= tick_period)) {
- s64 incr = ktime_to_ns(tick_period);
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ incr * ticks);
+ } else {
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ TICK_NSEC);
+ }
- ticks = ktime_divns(delta, incr);
+ /* Advance jiffies to complete the jiffies_seq protected job */
+ jiffies_64 += ticks;
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add_ns(last_jiffies_update,
- incr * ticks));
- }
- do_timer(++ticks);
+ /*
+ * Keep the tick_next_period variable up to date.
+ */
+ nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
- /* Keep the tick_next_period variable up to date */
- tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ /*
+ * Pairs with smp_load_acquire() in the lockless quick
+ * check above and ensures that the update to jiffies_64 is
+ * not reordered vs. the store to tick_next_period, neither
+ * by the compiler nor by the CPU.
+ */
+ smp_store_release(&tick_next_period, nextp);
} else {
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
- return;
+ /*
+ * A plain store is good enough on 32bit as the quick check
+ * above is protected by the sequence count.
+ */
+ tick_next_period = nextp;
}
+
+ /*
+ * Release the sequence count. calc_global_load() below is not
+ * protected by it, but jiffies_lock needs to be held to prevent
+ * concurrent invocations.
+ */
write_seqcount_end(&jiffies_seq);
+
+ calc_global_load();
+
raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -243,10 +293,8 @@ static void nohz_full_kick_func(struct irq_work *work)
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
}
-static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
- .func = nohz_full_kick_func,
- .flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ),
-};
+static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
+ IRQ_WORK_INIT_HARD(nohz_full_kick_func);
/*
* Kick this CPU if it's full dynticks in order to force it to
@@ -661,7 +709,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
/* Forward the time to expire in the future */
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start_expires(&ts->sched_timer,
@@ -941,13 +989,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
*/
if (tick_do_timer_cpu == cpu)
return false;
- /*
- * Boot safety: make sure the timekeeping duty has been
- * assigned before entering dyntick-idle mode,
- * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
- */
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
- return false;
/* Should not happen for nohz-full */
if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
@@ -1230,7 +1271,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
if (unlikely(ts->tick_stopped))
return;
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
@@ -1267,7 +1308,7 @@ static void tick_nohz_switch_to_nohz(void)
next = tick_init_jiffy_update();
hrtimer_set_expires(&ts->sched_timer, next);
- hrtimer_forward_now(&ts->sched_timer, tick_period);
+ hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
@@ -1333,7 +1374,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;
- hrtimer_forward(timer, now, tick_period);
+ hrtimer_forward(timer, now, TICK_NSEC);
return HRTIMER_RESTART;
}
@@ -1367,13 +1408,13 @@ void tick_setup_sched_timer(void)
/* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
- u64 offset = ktime_to_ns(tick_period) >> 1;
+ u64 offset = TICK_NSEC >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
index 589e0a552129..62e3b46717a6 100644
--- a/kernel/time/timeconv.c
+++ b/kernel/time/timeconv.c
@@ -70,10 +70,10 @@ static const unsigned short __mon_yday[2][13] = {
/**
* time64_to_tm - converts the calendar time to local broken-down time
*
- * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
+ * @totalsecs: the number of seconds elapsed since 00:00:00 on January 1, 1970,
* Coordinated Universal Time (UTC).
- * @offset offset seconds adding to totalsecs.
- * @result pointer to struct tm variable to receive broken-down time
+ * @offset: offset seconds adding to totalsecs.
+ * @result: pointer to struct tm variable to receive broken-down time
*/
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
{
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6858a31364b6..6aee5768c86f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -369,13 +369,6 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
/* Timekeeper helper functions. */
-#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
-static u32 default_arch_gettimeoffset(void) { return 0; }
-u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
-#else
-static inline u32 arch_gettimeoffset(void) { return 0; }
-#endif
-
static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
{
u64 nsec;
@@ -383,8 +376,7 @@ static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 de
nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift;
- /* If arch requires, add in get_arch_timeoffset() */
- return nsec + arch_gettimeoffset();
+ return nsec;
}
static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
@@ -407,6 +399,7 @@ static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 c
/**
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
* @tkr: Timekeeping readout base from which we take the update
+ * @tkf: Pointer to NMI safe timekeeper
*
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
@@ -436,6 +429,27 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
memcpy(base + 1, base, sizeof(*base));
}
+static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+{
+ struct tk_read_base *tkr;
+ unsigned int seq;
+ u64 now;
+
+ do {
+ seq = raw_read_seqcount_latch(&tkf->seq);
+ tkr = tkf->base + (seq & 0x01);
+ now = ktime_to_ns(tkr->base);
+
+ now += timekeeping_delta_to_ns(tkr,
+ clocksource_delta(
+ tk_clock_read(tkr),
+ tkr->cycle_last,
+ tkr->mask));
+ } while (read_seqcount_latch_retry(&tkf->seq, seq));
+
+ return now;
+}
+
/**
* ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
*
@@ -462,39 +476,24 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
*
* So reader 6 will observe time going backwards versus reader 5.
*
- * While other CPUs are likely to be able observe that, the only way
+ * While other CPUs are likely to be able to observe that, the only way
* for a CPU local observation is when an NMI hits in the middle of
* the update. Timestamps taken from that NMI context might be ahead
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
-static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
-{
- struct tk_read_base *tkr;
- unsigned int seq;
- u64 now;
-
- do {
- seq = raw_read_seqcount_latch(&tkf->seq);
- tkr = tkf->base + (seq & 0x01);
- now = ktime_to_ns(tkr->base);
-
- now += timekeeping_delta_to_ns(tkr,
- clocksource_delta(
- tk_clock_read(tkr),
- tkr->cycle_last,
- tkr->mask));
- } while (read_seqcount_latch_retry(&tkf->seq, seq));
-
- return now;
-}
-
u64 ktime_get_mono_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_mono);
}
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
+/**
+ * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
+ *
+ * Contrary to ktime_get_mono_fast_ns() this is always correct because the
+ * conversion factor is not affected by NTP/PTP correction.
+ */
u64 ktime_get_raw_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_raw);
@@ -521,6 +520,9 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
* (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
* partially updated. Since the tk->offs_boot update is a rare event, this
* should be a rare occurrence which postprocessing should be able to handle.
+ *
+ * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
+ * apply as well.
*/
u64 notrace ktime_get_boot_fast_ns(void)
{
@@ -530,9 +532,6 @@ u64 notrace ktime_get_boot_fast_ns(void)
}
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
-/*
- * See comment for __ktime_get_fast_ns() vs. timestamp ordering
- */
static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
{
struct tk_read_base *tkr;
@@ -557,6 +556,8 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
/**
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+ *
+ * See ktime_get_fast_ns() for documentation of the time stamp ordering.
*/
u64 ktime_get_real_fast_ns(void)
{
@@ -654,6 +655,7 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
/**
* pvclock_gtod_register_notifier - register a pvclock timedata update listener
+ * @nb: Pointer to the notifier block to register
*/
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
@@ -673,6 +675,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
/**
* pvclock_gtod_unregister_notifier - unregister a pvclock
* timedata update listener
+ * @nb: Pointer to the notifier block to unregister
*/
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
@@ -763,6 +766,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
/**
* timekeeping_forward_now - update clock to the current time
+ * @tk: Pointer to the timekeeper to update
*
* Forward the current clock to update its state since the last call to
* update_wall_time(). This is useful before significant clock changes,
@@ -778,16 +782,8 @@ static void timekeeping_forward_now(struct timekeeper *tk)
tk->tkr_raw.cycle_last = cycle_now;
tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
-
- /* If arch requires, add in get_arch_timeoffset() */
- tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
-
-
tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
- /* If arch requires, add in get_arch_timeoffset() */
- tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
-
tk_normalize_xtime(tk);
}
@@ -995,8 +991,7 @@ EXPORT_SYMBOL_GPL(ktime_get_seconds);
/**
* ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
*
- * Returns the wall clock seconds since 1970. This replaces the
- * get_seconds() interface which is not y2038 safe on 32bit systems.
+ * Returns the wall clock seconds since 1970.
*
* For 64bit systems the fast access to tk->xtime_sec is preserved. On
* 32bit systems the access must be protected with the sequence
@@ -1339,7 +1334,7 @@ EXPORT_SYMBOL(do_settimeofday64);
/**
* timekeeping_inject_offset - Adds or subtracts from the current time.
- * @tv: pointer to the timespec variable containing the offset
+ * @ts: Pointer to the timespec variable containing the offset
*
* Adds or subtracts an offset value from the current time.
*/
@@ -1415,9 +1410,8 @@ void timekeeping_warp_clock(void)
}
}
-/**
+/*
* __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
- *
*/
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
{
@@ -1425,7 +1419,7 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
}
-/**
+/*
* change_clocksource - Swaps clocksources if a new one is available
*
* Accumulates current time interval and initializes new clocksource
@@ -1548,6 +1542,7 @@ u64 timekeeping_max_deferment(void)
/**
* read_persistent_clock64 - Return time from the persistent clock.
+ * @ts: Pointer to the storage for the readout value
*
* Weak dummy function for arches that do not yet support it.
* Reads the time from the battery backed persistent clock.
@@ -1566,8 +1561,9 @@ void __weak read_persistent_clock64(struct timespec64 *ts)
* from the boot.
*
* Weak dummy function for arches that do not yet support it.
- * wall_time - current time as returned by persistent clock
- * boot_offset - offset that is defined as wall_time - boot_time
+ * @wall_time: - current time as returned by persistent clock
+ * @boot_offset: - offset that is defined as wall_time - boot_time
+ *
* The default function calculates offset based on the current value of
* local_clock(). This way architectures that support sched_clock() but don't
* support dedicated boot time clock will provide the best estimate of the
@@ -1652,7 +1648,8 @@ static struct timespec64 timekeeping_suspend_time;
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
- * @delta: pointer to a timespec delta value
+ * @tk: Pointer to the timekeeper to be updated
+ * @delta: Pointer to the delta value in timespec64 format
*
* Takes a timespec offset measuring a suspend interval and properly
* adds the sleep offset to the timekeeping variables.
@@ -2023,13 +2020,12 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
}
}
-/**
+/*
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
* Helper function that accumulates the nsecs greater than a second
* from the xtime_nsec field to the xtime_secs field.
* It also calls into the NTP code to handle leapsecond processing.
- *
*/
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
{
@@ -2071,7 +2067,7 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
return clock_set;
}
-/**
+/*
* logarithmic_accumulation - shifted accumulation of cycles
*
* This functions accumulates a shifted interval of cycles into
@@ -2133,19 +2129,12 @@ static void timekeeping_advance(enum timekeeping_adv_mode mode)
if (unlikely(timekeeping_suspended))
goto out;
-#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
- offset = real_tk->cycle_interval;
-
- if (mode != TK_ADV_TICK)
- goto out;
-#else
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
/* Check if there's really nothing to do */
if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
goto out;
-#endif
/* Do some additional sanity checking */
timekeeping_check_update(tk, offset);
@@ -2314,7 +2303,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
return base;
}
-/**
+/*
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
*/
static int timekeeping_validate_timex(const struct __kernel_timex *txc)
@@ -2461,19 +2450,3 @@ void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
}
EXPORT_SYMBOL(hardpps);
#endif /* CONFIG_NTP_PPS */
-
-/**
- * xtime_update() - advances the timekeeping infrastructure
- * @ticks: number of ticks, that have elapsed since the last call.
- *
- * Must be called with interrupts disabled.
- */
-void xtime_update(unsigned long ticks)
-{
- raw_spin_lock(&jiffies_lock);
- write_seqcount_begin(&jiffies_seq);
- do_timer(ticks);
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
- update_wall_time();
-}
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 099737f6f10c..543beba096c7 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -22,11 +22,12 @@ static inline int sched_clock_suspend(void) { return 0; }
static inline void sched_clock_resume(void) { }
#endif
+extern void update_process_times(int user);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
extern raw_spinlock_t jiffies_lock;
-extern seqcount_t jiffies_seq;
+extern seqcount_raw_spinlock_t jiffies_seq;
#define CS_NAME_LEN 32
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index de37e33a868d..f475f1a027c8 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -732,11 +732,6 @@ static inline void debug_timer_deactivate(struct timer_list *timer)
debug_object_deactivate(timer, &timer_debug_descr);
}
-static inline void debug_timer_free(struct timer_list *timer)
-{
- debug_object_free(timer, &timer_debug_descr);
-}
-
static inline void debug_timer_assert_init(struct timer_list *timer)
{
debug_object_assert_init(timer, &timer_debug_descr);
@@ -1242,6 +1237,20 @@ int try_to_del_timer_sync(struct timer_list *timer)
}
EXPORT_SYMBOL(try_to_del_timer_sync);
+bool timer_curr_running(struct timer_list *timer)
+{
+ int i;
+
+ for (i = 0; i < NR_BASES; i++) {
+ struct timer_base *base = this_cpu_ptr(&timer_bases[i]);
+
+ if (base->running_timer == timer)
+ return true;
+ }
+
+ return false;
+}
+
#ifdef CONFIG_PREEMPT_RT
static __init void timer_base_init_expiry_lock(struct timer_base *base)
{
@@ -1288,7 +1297,7 @@ static void del_timer_wait_running(struct timer_list *timer)
u32 tf;
tf = READ_ONCE(timer->flags);
- if (!(tf & TIMER_MIGRATING)) {
+ if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
struct timer_base *base = get_timer_base(tf);
/*
@@ -1372,6 +1381,13 @@ int del_timer_sync(struct timer_list *timer)
*/
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
+ /*
+ * Must be able to sleep on PREEMPT_RT because of the slowpath in
+ * del_timer_wait_running().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
+ lockdep_assert_preemption_enabled();
+
do {
ret = try_to_del_timer_sync(timer);
@@ -1698,29 +1714,6 @@ void timer_clear_idle(void)
}
#endif
-/*
- * Called from the timer interrupt handler to charge one tick to the current
- * process. user_tick is 1 if the tick is user time, 0 for system.
- */
-void update_process_times(int user_tick)
-{
- struct task_struct *p = current;
-
- PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
-
- /* Note: this timer irq context must be accounted for as well. */
- account_process_tick(p, user_tick);
- run_local_timers();
- rcu_sched_clock_irq(user_tick);
-#ifdef CONFIG_IRQ_WORK
- if (in_irq())
- irq_work_tick();
-#endif
- scheduler_tick();
- if (IS_ENABLED(CONFIG_POSIX_TIMERS))
- run_posix_cpu_timers();
-}
-
/**
* __run_timers - run all expired timers (if any) on this CPU.
* @base: the timer vector to be processed.
@@ -1770,7 +1763,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
/*
* Called by the local, per-CPU timer interrupt on SMP.
*/
-void run_local_timers(void)
+static void run_local_timers(void)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
@@ -1788,6 +1781,29 @@ void run_local_timers(void)
}
/*
+ * Called from the timer interrupt handler to charge one tick to the current
+ * process. user_tick is 1 if the tick is user time, 0 for system.
+ */
+void update_process_times(int user_tick)
+{
+ struct task_struct *p = current;
+
+ PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
+
+ /* Note: this timer irq context must be accounted for as well. */
+ account_process_tick(p, user_tick);
+ run_local_timers();
+ rcu_sched_clock_irq(user_tick);
+#ifdef CONFIG_IRQ_WORK
+ if (in_irq())
+ irq_work_tick();
+#endif
+ scheduler_tick();
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ run_posix_cpu_timers();
+}
+
+/*
* Since schedule_timeout()'s timer is defined on the stack, it must store
* the target task on the stack as well.
*/
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index acb326f5f50a..6939140ab7c5 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -42,24 +42,11 @@ static void SEQ_printf(struct seq_file *m, const char *fmt, ...)
va_end(args);
}
-static void print_name_offset(struct seq_file *m, void *sym)
-{
- char symname[KSYM_NAME_LEN];
-
- if (lookup_symbol_name((unsigned long)sym, symname) < 0)
- SEQ_printf(m, "<%pK>", sym);
- else
- SEQ_printf(m, "%s", symname);
-}
-
static void
print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
int idx, u64 now)
{
- SEQ_printf(m, " #%d: ", idx);
- print_name_offset(m, taddr);
- SEQ_printf(m, ", ");
- print_name_offset(m, timer->function);
+ SEQ_printf(m, " #%d: <%pK>, %ps", idx, taddr, timer->function);
SEQ_printf(m, ", S:%02x", timer->state);
SEQ_printf(m, "\n");
SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
@@ -116,9 +103,7 @@ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
SEQ_printf(m, " .resolution: %u nsecs\n", hrtimer_resolution);
- SEQ_printf(m, " .get_time: ");
- print_name_offset(m, base->get_time);
- SEQ_printf(m, "\n");
+ SEQ_printf(m, " .get_time: %ps\n", base->get_time);
#ifdef CONFIG_HIGH_RES_TIMERS
SEQ_printf(m, " .offset: %Lu nsecs\n",
(unsigned long long) ktime_to_ns(base->offset));
@@ -218,42 +203,29 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
SEQ_printf(m, " next_event: %Ld nsecs\n",
(unsigned long long) ktime_to_ns(dev->next_event));
- SEQ_printf(m, " set_next_event: ");
- print_name_offset(m, dev->set_next_event);
- SEQ_printf(m, "\n");
+ SEQ_printf(m, " set_next_event: %ps\n", dev->set_next_event);
- if (dev->set_state_shutdown) {
- SEQ_printf(m, " shutdown: ");
- print_name_offset(m, dev->set_state_shutdown);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_shutdown)
+ SEQ_printf(m, " shutdown: %ps\n",
+ dev->set_state_shutdown);
- if (dev->set_state_periodic) {
- SEQ_printf(m, " periodic: ");
- print_name_offset(m, dev->set_state_periodic);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_periodic)
+ SEQ_printf(m, " periodic: %ps\n",
+ dev->set_state_periodic);
- if (dev->set_state_oneshot) {
- SEQ_printf(m, " oneshot: ");
- print_name_offset(m, dev->set_state_oneshot);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_oneshot)
+ SEQ_printf(m, " oneshot: %ps\n",
+ dev->set_state_oneshot);
- if (dev->set_state_oneshot_stopped) {
- SEQ_printf(m, " oneshot stopped: ");
- print_name_offset(m, dev->set_state_oneshot_stopped);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_oneshot_stopped)
+ SEQ_printf(m, " oneshot stopped: %ps\n",
+ dev->set_state_oneshot_stopped);
- if (dev->tick_resume) {
- SEQ_printf(m, " resume: ");
- print_name_offset(m, dev->tick_resume);
- SEQ_printf(m, "\n");
- }
+ if (dev->tick_resume)
+ SEQ_printf(m, " resume: %ps\n",
+ dev->tick_resume);
- SEQ_printf(m, " event_handler: ");
- print_name_offset(m, dev->event_handler);
+ SEQ_printf(m, " event_handler: %ps\n", dev->event_handler);
SEQ_printf(m, "\n");
SEQ_printf(m, " retries: %lu\n", dev->retries);
SEQ_printf(m, "\n");
diff --git a/kernel/torture.c b/kernel/torture.c
index 1061492f14bd..01e336f1e5b2 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -48,6 +48,12 @@ module_param(disable_onoff_at_boot, bool, 0444);
static bool ftrace_dump_at_shutdown;
module_param(ftrace_dump_at_shutdown, bool, 0444);
+static int verbose_sleep_frequency;
+module_param(verbose_sleep_frequency, int, 0444);
+
+static int verbose_sleep_duration = 1;
+module_param(verbose_sleep_duration, int, 0444);
+
static char *torture_type;
static int verbose;
@@ -58,6 +64,95 @@ static int verbose;
static int fullstop = FULLSTOP_RMMOD;
static DEFINE_MUTEX(fullstop_mutex);
+static atomic_t verbose_sleep_counter;
+
+/*
+ * Sleep if needed from VERBOSE_TOROUT*().
+ */
+void verbose_torout_sleep(void)
+{
+ if (verbose_sleep_frequency > 0 &&
+ verbose_sleep_duration > 0 &&
+ !(atomic_inc_return(&verbose_sleep_counter) % verbose_sleep_frequency))
+ schedule_timeout_uninterruptible(verbose_sleep_duration);
+}
+EXPORT_SYMBOL_GPL(verbose_torout_sleep);
+
+/*
+ * Schedule a high-resolution-timer sleep in nanoseconds, with a 32-bit
+ * nanosecond random fuzz. This function and its friends desynchronize
+ * testing from the timer wheel.
+ */
+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
+{
+ ktime_t hto = baset_ns;
+
+ if (trsp)
+ hto += (torture_random(trsp) >> 3) % fuzzt_ns;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
+}
+EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
+
+/*
+ * Schedule a high-resolution-timer sleep in microseconds, with a 32-bit
+ * nanosecond (not microsecond!) random fuzz.
+ */
+int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp)
+{
+ ktime_t baset_ns = baset_us * NSEC_PER_USEC;
+
+ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
+}
+EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
+
+/*
+ * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit
+ * microsecond (not millisecond!) random fuzz.
+ */
+int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp)
+{
+ ktime_t baset_ns = baset_ms * NSEC_PER_MSEC;
+ u32 fuzzt_ns;
+
+ if ((u32)~0U / NSEC_PER_USEC < fuzzt_us)
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
+ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
+}
+EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
+
+/*
+ * Schedule a high-resolution-timer sleep in jiffies, with an
+ * implied one-jiffy random fuzz. This is intended to replace calls to
+ * schedule_timeout_interruptible() and friends.
+ */
+int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
+{
+ ktime_t baset_ns = jiffies_to_nsecs(baset_j);
+
+ return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
+}
+EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
+
+/*
+ * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit
+ * millisecond (not second!) random fuzz.
+ */
+int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp)
+{
+ ktime_t baset_ns = baset_s * NSEC_PER_SEC;
+ u32 fuzzt_ns;
+
+ if ((u32)~0U / NSEC_PER_MSEC < fuzzt_ms)
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
+ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
+}
+EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
+
#ifdef CONFIG_HOTPLUG_CPU
/*
@@ -80,6 +175,19 @@ static unsigned long sum_online;
static int min_online = -1;
static int max_online;
+static int torture_online_cpus = NR_CPUS;
+
+/*
+ * Some torture testing leverages confusion as to the number of online
+ * CPUs. This function returns the torture-testing view of this number,
+ * which allows torture tests to load-balance appropriately.
+ */
+int torture_num_online_cpus(void)
+{
+ return READ_ONCE(torture_online_cpus);
+}
+EXPORT_SYMBOL_GPL(torture_num_online_cpus);
+
/*
* Attempt to take a CPU offline. Return false if the CPU is already
* offline or if it is not subject to CPU-hotplug operations. The
@@ -134,6 +242,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
*min_offl = delta;
if (*max_offl < delta)
*max_offl = delta;
+ WRITE_ONCE(torture_online_cpus, torture_online_cpus - 1);
+ WARN_ON_ONCE(torture_online_cpus <= 0);
}
return true;
@@ -190,6 +300,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
*min_onl = delta;
if (*max_onl < delta)
*max_onl = delta;
+ WRITE_ONCE(torture_online_cpus, torture_online_cpus + 1);
}
return true;
@@ -197,6 +308,26 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
EXPORT_SYMBOL_GPL(torture_online);
/*
+ * Get everything online at the beginning and ends of tests.
+ */
+static void torture_online_all(char *phase)
+{
+ int cpu;
+ int ret;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ ret = add_cpu(cpu);
+ if (ret && verbose) {
+ pr_alert("%s" TORTURE_FLAG
+ "%s: %s online %d: errno %d\n",
+ __func__, phase, torture_type, cpu, ret);
+ }
+ }
+}
+
+/*
* Execute random CPU-hotplug operations at the interval specified
* by the onoff_interval.
*/
@@ -206,25 +337,12 @@ torture_onoff(void *arg)
int cpu;
int maxcpu = -1;
DEFINE_TORTURE_RANDOM(rand);
- int ret;
VERBOSE_TOROUT_STRING("torture_onoff task started");
for_each_online_cpu(cpu)
maxcpu = cpu;
WARN_ON(maxcpu < 0);
- if (!IS_MODULE(CONFIG_TORTURE_TEST)) {
- for_each_possible_cpu(cpu) {
- if (cpu_online(cpu))
- continue;
- ret = add_cpu(cpu);
- if (ret && verbose) {
- pr_alert("%s" TORTURE_FLAG
- "%s: Initial online %d: errno %d\n",
- __func__, torture_type, cpu, ret);
- }
- }
- }
-
+ torture_online_all("Initial");
if (maxcpu == 0) {
VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
goto stop;
@@ -252,6 +370,7 @@ torture_onoff(void *arg)
stop:
torture_kthread_stopping("torture_onoff");
+ torture_online_all("Final");
return 0;
}
@@ -602,18 +721,25 @@ static int stutter_gap;
*/
bool stutter_wait(const char *title)
{
- int spt;
+ unsigned int i = 0;
bool ret = false;
+ int spt;
cond_resched_tasks_rcu_qs();
spt = READ_ONCE(stutter_pause_test);
for (; spt; spt = READ_ONCE(stutter_pause_test)) {
- ret = true;
+ if (!ret) {
+ sched_set_normal(current, MAX_NICE);
+ ret = true;
+ }
if (spt == 1) {
schedule_timeout_interruptible(1);
} else if (spt == 2) {
- while (READ_ONCE(stutter_pause_test))
+ while (READ_ONCE(stutter_pause_test)) {
+ if (!(i++ & 0xffff))
+ torture_hrtimeout_us(10, 0, NULL);
cond_resched();
+ }
} else {
schedule_timeout_interruptible(round_jiffies_relative(HZ));
}
@@ -629,24 +755,25 @@ EXPORT_SYMBOL_GPL(stutter_wait);
*/
static int torture_stutter(void *arg)
{
+ DEFINE_TORTURE_RANDOM(rand);
int wtime;
VERBOSE_TOROUT_STRING("torture_stutter task started");
do {
if (!torture_must_stop() && stutter > 1) {
wtime = stutter;
- if (stutter > HZ + 1) {
+ if (stutter > 2) {
WRITE_ONCE(stutter_pause_test, 1);
- wtime = stutter - HZ - 1;
- schedule_timeout_interruptible(wtime);
- wtime = HZ + 1;
+ wtime = stutter - 3;
+ torture_hrtimeout_jiffies(wtime, &rand);
+ wtime = 2;
}
WRITE_ONCE(stutter_pause_test, 2);
- schedule_timeout_interruptible(wtime);
+ torture_hrtimeout_jiffies(wtime, NULL);
}
WRITE_ONCE(stutter_pause_test, 0);
if (!torture_must_stop())
- schedule_timeout_interruptible(stutter_gap);
+ torture_hrtimeout_jiffies(stutter_gap, NULL);
torture_shutdown_absorb("torture_stutter");
} while (!torture_must_stop());
torture_kthread_stopping("torture_stutter");
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a4020c0b4508..c1a62ae7e812 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -31,6 +31,15 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS
config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
bool
+config HAVE_DYNAMIC_FTRACE_WITH_ARGS
+ bool
+ help
+ If this is set, then arguments and stack can be found from
+ the pt_regs passed into the function callback regs parameter
+ by default, even without setting the REGS flag in the ftrace_ops.
+ This allows for use of regs_get_kernel_argument() and
+ kernel_stack_pointer().
+
config HAVE_FTRACE_MCOUNT_RECORD
bool
help
@@ -202,7 +211,7 @@ config DYNAMIC_FTRACE_WITH_REGS
config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
def_bool y
- depends on DYNAMIC_FTRACE
+ depends on DYNAMIC_FTRACE_WITH_REGS
depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
config FUNCTION_PROFILER
@@ -253,7 +262,6 @@ config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
depends on TRACE_IRQFLAGS_SUPPORT
- depends on !ARCH_USES_GETTIMEOFFSET
select TRACE_IRQFLAGS
select GENERIC_TRACER
select TRACER_MAX_TRACE
@@ -277,7 +285,6 @@ config IRQSOFF_TRACER
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
- depends on !ARCH_USES_GETTIMEOFFSET
depends on PREEMPTION
select GENERIC_TRACER
select TRACER_MAX_TRACE
@@ -531,7 +538,7 @@ config KPROBE_EVENTS
config KPROBE_EVENTS_ON_NOTRACE
bool "Do NOT protect notrace function from kprobe events"
depends on KPROBE_EVENTS
- depends on KPROBES_ON_FTRACE
+ depends on DYNAMIC_FTRACE
default n
help
This is only for the developers who want to debug ftrace itself
@@ -727,6 +734,45 @@ config TRACE_EVAL_MAP_FILE
If unsure, say N.
+config FTRACE_RECORD_RECURSION
+ bool "Record functions that recurse in function tracing"
+ depends on FUNCTION_TRACER
+ help
+ All callbacks that attach to the function tracing have some sort
+ of protection against recursion. Even though the protection exists,
+ it adds overhead. This option will create a file in the tracefs
+ file system called "recursed_functions" that will list the functions
+ that triggered a recursion.
+
+ This will add more overhead to cases that have recursion.
+
+ If unsure, say N
+
+config FTRACE_RECORD_RECURSION_SIZE
+ int "Max number of recursed functions to record"
+ default 128
+ depends on FTRACE_RECORD_RECURSION
+ help
+ This defines the limit of number of functions that can be
+ listed in the "recursed_functions" file, that lists all
+ the functions that caused a recursion to happen.
+ This file can be reset, but the limit can not change in
+ size at runtime.
+
+config RING_BUFFER_RECORD_RECURSION
+ bool "Record functions that recurse in the ring buffer"
+ depends on FTRACE_RECORD_RECURSION
+ # default y, because it is coupled with FTRACE_RECORD_RECURSION
+ default y
+ help
+ The ring buffer has its own internal recursion. Although when
+ recursion happens it wont cause harm because of the protection,
+ but it does cause an unwanted overhead. Enabling this option will
+ place where recursion was detected into the ftrace "recursed_functions"
+ file.
+
+ This will add more overhead to cases that have recursion.
+
config GCOV_PROFILE_FTRACE
bool "Enable GCOV profiling on ftrace subsystem"
depends on GCOV_KERNEL
@@ -797,6 +843,26 @@ config RING_BUFFER_STARTUP_TEST
If unsure, say N
+config RING_BUFFER_VALIDATE_TIME_DELTAS
+ bool "Verify ring buffer time stamp deltas"
+ depends on RING_BUFFER
+ help
+ This will audit the time stamps on the ring buffer sub
+ buffer to make sure that all the time deltas for the
+ events on a sub buffer matches the current time stamp.
+ This audit is performed for every event that is not
+ interrupted, or interrupting another event. A check
+ is also made when traversing sub buffers to make sure
+ that all the deltas on the previous sub buffer do not
+ add up to be greater than the current time stamp.
+
+ NOTE: This adds significant overhead to recording of events,
+ and should only be used to test the logic of the ring buffer.
+ Do not use it on production systems.
+
+ Only say Y if you understand what this does, and you
+ still want it enabled. Otherwise say N
+
config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index e153be351548..7e44cea89fdc 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
+obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index f1022945e346..9e9ee4945043 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -449,7 +449,7 @@ static struct dentry *blk_create_buf_file_callback(const char *filename,
&relay_file_operations);
}
-static struct rchan_callbacks blk_relay_callbacks = {
+static const struct rchan_callbacks blk_relay_callbacks = {
.subbuf_start = blk_subbuf_start_callback,
.create_buf_file = blk_create_buf_file_callback,
.remove_buf_file = blk_remove_buf_file_callback,
@@ -458,14 +458,9 @@ static struct rchan_callbacks blk_relay_callbacks = {
static void blk_trace_setup_lba(struct blk_trace *bt,
struct block_device *bdev)
{
- struct hd_struct *part = NULL;
-
- if (bdev)
- part = bdev->bd_part;
-
- if (part) {
- bt->start_lba = part->start_sect;
- bt->end_lba = part->start_sect + part->nr_sects;
+ if (bdev) {
+ bt->start_lba = bdev->bd_start_sect;
+ bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
} else {
bt->start_lba = 0;
bt->end_lba = -1ULL;
@@ -800,12 +795,12 @@ static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
#endif
static u64
-blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
+blk_trace_request_get_cgid(struct request *rq)
{
if (!rq->bio)
return 0;
/* Use the first bio */
- return blk_trace_bio_get_cgid(q, rq->bio);
+ return blk_trace_bio_get_cgid(rq->q, rq->bio);
}
/*
@@ -846,40 +841,35 @@ static void blk_add_trace_rq(struct request *rq, int error,
rcu_read_unlock();
}
-static void blk_add_trace_rq_insert(void *ignore,
- struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
}
-static void blk_add_trace_rq_issue(void *ignore,
- struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
}
-static void blk_add_trace_rq_merge(void *ignore,
- struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
}
-static void blk_add_trace_rq_requeue(void *ignore,
- struct request_queue *q,
- struct request *rq)
+static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
}
static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
int error, unsigned int nr_bytes)
{
blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
- blk_trace_request_get_cgid(rq->q, rq));
+ blk_trace_request_get_cgid(rq));
}
/**
@@ -911,10 +901,9 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
rcu_read_unlock();
}
-static void blk_add_trace_bio_bounce(void *ignore,
- struct request_queue *q, struct bio *bio)
+static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
}
static void blk_add_trace_bio_complete(void *ignore,
@@ -924,63 +913,26 @@ static void blk_add_trace_bio_complete(void *ignore,
blk_status_to_errno(bio->bi_status));
}
-static void blk_add_trace_bio_backmerge(void *ignore,
- struct request_queue *q,
- struct request *rq,
- struct bio *bio)
+static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
+ 0);
}
-static void blk_add_trace_bio_frontmerge(void *ignore,
- struct request_queue *q,
- struct request *rq,
- struct bio *bio)
+static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
+ 0);
}
-static void blk_add_trace_bio_queue(void *ignore,
- struct request_queue *q, struct bio *bio)
+static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
}
-static void blk_add_trace_getrq(void *ignore,
- struct request_queue *q,
- struct bio *bio, int rw)
+static void blk_add_trace_getrq(void *ignore, struct bio *bio)
{
- if (bio)
- blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
- else {
- struct blk_trace *bt;
-
- rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
- if (bt)
- __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
- NULL, 0);
- rcu_read_unlock();
- }
-}
-
-
-static void blk_add_trace_sleeprq(void *ignore,
- struct request_queue *q,
- struct bio *bio, int rw)
-{
- if (bio)
- blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
- else {
- struct blk_trace *bt;
-
- rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
- if (bt)
- __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
- 0, 0, NULL, 0);
- rcu_read_unlock();
- }
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
}
static void blk_add_trace_plug(void *ignore, struct request_queue *q)
@@ -1015,10 +967,9 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
rcu_read_unlock();
}
-static void blk_add_trace_split(void *ignore,
- struct request_queue *q, struct bio *bio,
- unsigned int pdu)
+static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
{
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blk_trace *bt;
rcu_read_lock();
@@ -1039,20 +990,16 @@ static void blk_add_trace_split(void *ignore,
/**
* blk_add_trace_bio_remap - Add a trace for a bio-remap operation
* @ignore: trace callback data parameter (not used)
- * @q: queue the io is for
* @bio: the source bio
- * @dev: target device
+ * @dev: source device
* @from: source sector
*
- * Description:
- * Device mapper or raid target sometimes need to split a bio because
- * it spans a stripe (or similar). Add a trace for that action.
- *
+ * Called after a bio is remapped to a different device and/or sector.
**/
-static void blk_add_trace_bio_remap(void *ignore,
- struct request_queue *q, struct bio *bio,
- dev_t dev, sector_t from)
+static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
+ sector_t from)
{
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blk_trace *bt;
struct blk_io_trace_remap r;
@@ -1077,7 +1024,6 @@ static void blk_add_trace_bio_remap(void *ignore,
/**
* blk_add_trace_rq_remap - Add a trace for a request-remap operation
* @ignore: trace callback data parameter (not used)
- * @q: queue the io is for
* @rq: the source request
* @dev: target device
* @from: source sector
@@ -1087,16 +1033,14 @@ static void blk_add_trace_bio_remap(void *ignore,
* Add a trace for that action.
*
**/
-static void blk_add_trace_rq_remap(void *ignore,
- struct request_queue *q,
- struct request *rq, dev_t dev,
+static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
sector_t from)
{
struct blk_trace *bt;
struct blk_io_trace_remap r;
rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
+ bt = rcu_dereference(rq->q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return;
@@ -1108,13 +1052,12 @@ static void blk_add_trace_rq_remap(void *ignore,
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
- sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
+ sizeof(r), &r, blk_trace_request_get_cgid(rq));
rcu_read_unlock();
}
/**
* blk_add_driver_data - Add binary message with driver-specific data
- * @q: queue the io is for
* @rq: io request
* @data: driver-specific data
* @len: length of driver-specific data
@@ -1123,14 +1066,12 @@ static void blk_add_trace_rq_remap(void *ignore,
* Some drivers might want to write driver-specific data per request.
*
**/
-void blk_add_driver_data(struct request_queue *q,
- struct request *rq,
- void *data, size_t len)
+void blk_add_driver_data(struct request *rq, void *data, size_t len)
{
struct blk_trace *bt;
rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
+ bt = rcu_dereference(rq->q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return;
@@ -1138,7 +1079,7 @@ void blk_add_driver_data(struct request_queue *q,
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
BLK_TA_DRV_DATA, 0, len, data,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1169,8 +1110,6 @@ static void blk_register_tracepoints(void)
WARN_ON(ret);
ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
WARN_ON(ret);
- ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
- WARN_ON(ret);
ret = register_trace_block_plug(blk_add_trace_plug, NULL);
WARN_ON(ret);
ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
@@ -1190,7 +1129,6 @@ static void blk_unregister_tracepoints(void)
unregister_trace_block_split(blk_add_trace_split, NULL);
unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
unregister_trace_block_plug(blk_add_trace_plug, NULL);
- unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
@@ -1343,7 +1281,7 @@ static void blk_log_action(struct trace_iterator *iter, const char *act,
* ones now use the 64bit ino as the whole ID and
* no longer use generation.
*
- * Regarldess of the content, always output
+ * Regardless of the content, always output
* "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
* be mapped back to @id on both 64 and 32bit ino
* setups. See __kernfs_fh_to_dentry().
@@ -1385,7 +1323,7 @@ static void blk_log_dump_pdu(struct trace_seq *s,
i == 0 ? "" : " ", pdu_buf[i]);
/*
- * stop when the rest is just zeroes and indicate so
+ * stop when the rest is just zeros and indicate so
* with a ".." appended
*/
if (i == end && end != pdu_len - 1) {
@@ -1815,30 +1753,15 @@ static ssize_t blk_trace_mask2str(char *buf, int mask)
return p - buf;
}
-static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
-{
- if (bdev->bd_disk == NULL)
- return NULL;
-
- return bdev_get_queue(bdev);
-}
-
static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct block_device *bdev = bdget_part(dev_to_part(dev));
- struct request_queue *q;
+ struct block_device *bdev = dev_to_bdev(dev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct blk_trace *bt;
ssize_t ret = -ENXIO;
- if (bdev == NULL)
- goto out;
-
- q = blk_trace_get_queue(bdev);
- if (q == NULL)
- goto out_bdput;
-
mutex_lock(&q->debugfs_mutex);
bt = rcu_dereference_protected(q->blk_trace,
@@ -1861,9 +1784,6 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
out_unlock_bdev:
mutex_unlock(&q->debugfs_mutex);
-out_bdput:
- bdput(bdev);
-out:
return ret;
}
@@ -1871,8 +1791,8 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct block_device *bdev;
- struct request_queue *q;
+ struct block_device *bdev = dev_to_bdev(dev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct blk_trace *bt;
u64 value;
ssize_t ret = -EINVAL;
@@ -1888,17 +1808,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
goto out;
value = ret;
}
- } else if (kstrtoull(buf, 0, &value))
- goto out;
-
- ret = -ENXIO;
- bdev = bdget_part(dev_to_part(dev));
- if (bdev == NULL)
- goto out;
-
- q = blk_trace_get_queue(bdev);
- if (q == NULL)
- goto out_bdput;
+ } else {
+ if (kstrtoull(buf, 0, &value))
+ goto out;
+ }
mutex_lock(&q->debugfs_mutex);
@@ -1936,8 +1849,6 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
out_unlock_bdev:
mutex_unlock(&q->debugfs_mutex);
-out_bdput:
- bdput(bdev);
out:
return ret ? ret : count;
}
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4517c8b66518..b0c45d923f0f 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -16,6 +16,9 @@
#include <linux/syscalls.h>
#include <linux/error-injection.h>
#include <linux/btf_ids.h>
+#include <linux/bpf_lsm.h>
+
+#include <net/bpf_sk_storage.h>
#include <uapi/linux/bpf.h>
#include <uapi/linux/btf.h>
@@ -93,9 +96,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
unsigned int ret;
- if (in_nmi()) /* not supported yet */
- return 1;
-
cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
@@ -113,7 +113,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
* Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
* to all call sites, we did a bpf_prog_array_valid() there to check
* whether call->prog_array is empty or not, which is
- * a heurisitc to speed up execution.
+ * a heuristic to speed up execution.
*
* If bpf_prog_array_valid() fetched prog_array was
* non-NULL, we go into trace_call_bpf() and do the actual
@@ -181,6 +181,16 @@ bpf_probe_read_user_str_common(void *dst, u32 size,
{
int ret;
+ /*
+ * NB: We rely on strncpy_from_user() not copying junk past the NUL
+ * terminator into `dst`.
+ *
+ * strncpy_from_user() does long-sized strides in the fast path. If the
+ * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
+ * then there could be junk after the NUL in `dst`. If user takes `dst`
+ * and keys a hash map with it, then semantically identical strings can
+ * occupy multiple entries in the map.
+ */
ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
@@ -1022,6 +1032,20 @@ const struct bpf_func_proto bpf_get_current_task_proto = {
.ret_type = RET_INTEGER,
};
+BPF_CALL_0(bpf_get_current_task_btf)
+{
+ return (unsigned long) current;
+}
+
+BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
+
+static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
+ .func = bpf_get_current_task_btf,
+ .gpl_only = true,
+ .ret_type = RET_PTR_TO_BTF_ID,
+ .ret_btf_id = &bpf_get_current_btf_ids[0],
+};
+
BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -1086,7 +1110,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
return -EINVAL;
work = this_cpu_ptr(&send_signal_work);
- if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
+ if (irq_work_is_busy(&work->irq_work))
return -EBUSY;
/* Add the current task, which is the target of sending signal,
@@ -1164,7 +1188,15 @@ BTF_SET_END(btf_allowlist_d_path)
static bool bpf_d_path_allowed(const struct bpf_prog *prog)
{
- return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
+ if (prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_ITER)
+ return true;
+
+ if (prog->type == BPF_PROG_TYPE_LSM)
+ return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
+
+ return btf_id_set_contains(&btf_allowlist_d_path,
+ prog->aux->attach_btf_id);
}
BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
@@ -1198,7 +1230,7 @@ static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
*btf = bpf_get_btf_vmlinux();
if (IS_ERR_OR_NULL(*btf))
- return PTR_ERR(*btf);
+ return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
if (ptr->type_id > 0)
*btf_id = ptr->type_id;
@@ -1259,12 +1291,16 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns:
return &bpf_ktime_get_boot_ns_proto;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ return &bpf_ktime_get_coarse_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_tail_call_proto;
case BPF_FUNC_get_current_pid_tgid:
return &bpf_get_current_pid_tgid_proto;
case BPF_FUNC_get_current_task:
return &bpf_get_current_task_proto;
+ case BPF_FUNC_get_current_task_btf:
+ return &bpf_get_current_task_btf_proto;
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
case BPF_FUNC_get_current_comm:
@@ -1327,9 +1363,9 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
case BPF_FUNC_snprintf_btf:
return &bpf_snprintf_btf_proto;
- case BPF_FUNC_bpf_per_cpu_ptr:
+ case BPF_FUNC_per_cpu_ptr:
return &bpf_per_cpu_ptr_proto;
- case BPF_FUNC_bpf_this_cpu_ptr:
+ case BPF_FUNC_this_cpu_ptr:
return &bpf_this_cpu_ptr_proto;
default:
return NULL;
@@ -1719,6 +1755,14 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_to_tcp_request_sock_proto;
case BPF_FUNC_skc_to_udp6_sock:
return &bpf_skc_to_udp6_sock_proto;
+ case BPF_FUNC_sk_storage_get:
+ return &bpf_sk_storage_get_tracing_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &bpf_sk_storage_delete_tracing_proto;
+ case BPF_FUNC_sock_from_file:
+ return &bpf_sock_from_file_proto;
+ case BPF_FUNC_get_socket_cookie:
+ return &bpf_get_socket_ptr_cookie_proto;
#endif
case BPF_FUNC_seq_printf:
return prog->expected_attach_type == BPF_TRACE_ITER ?
@@ -2031,10 +2075,12 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
{
- struct module *mod = __module_address((unsigned long)btp);
+ struct module *mod;
- if (mod)
- module_put(mod);
+ preempt_disable();
+ mod = __module_address((unsigned long)btp);
+ module_put(mod);
+ preempt_enable();
}
static __always_inline
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 5658f13037b3..29a6ebeebc9e 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -334,8 +334,7 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
static struct ftrace_ops graph_ops = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED |
+ .flags = FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID |
FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
@@ -395,7 +394,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
}
if (t->ret_stack == NULL) {
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
t->curr_ret_depth = -1;
@@ -490,7 +488,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
/* make curr_ret_stack visible before we add the ret_stack */
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8185f7240095..4d8e35575549 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -80,7 +80,7 @@ enum {
struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
+ .flags = FTRACE_OPS_FL_STUB,
INIT_OPS_HASH(ftrace_list_end)
};
@@ -121,7 +121,7 @@ struct ftrace_ops global_ops;
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
#else
/* See comment below, where ftrace_ops_list_func is defined */
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
@@ -140,7 +140,7 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
}
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
int pid;
@@ -154,7 +154,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
return;
}
- op->saved_func(ip, parent_ip, op, regs);
+ op->saved_func(ip, parent_ip, op, fregs);
}
static void ftrace_sync_ipi(void *data)
@@ -754,7 +754,7 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
static void
function_profile_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct ftrace_profile_stat *stat;
struct ftrace_profile *rec;
@@ -866,7 +866,7 @@ static void unregister_ftrace_profiler(void)
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ .flags = FTRACE_OPS_FL_INITIALIZED,
INIT_OPS_HASH(ftrace_profile_ops)
};
@@ -1040,8 +1040,7 @@ struct ftrace_ops global_ops = {
.local_hash.notrace_hash = EMPTY_HASH,
.local_hash.filter_hash = EMPTY_HASH,
INIT_OPS_HASH(global_ops)
- .flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED |
+ .flags = FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID,
};
@@ -1629,6 +1628,8 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
+static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
@@ -1778,7 +1779,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
* to it.
*/
if (ftrace_rec_count(rec) == 1 &&
- ftrace_find_tramp_ops_any(rec))
+ ftrace_find_tramp_ops_any_other(rec, ops))
rec->flags |= FTRACE_FL_TRAMP;
else
rec->flags &= ~FTRACE_FL_TRAMP;
@@ -2144,6 +2145,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
else
rec->flags &= ~FTRACE_FL_TRAMP_EN;
}
+
if (flag & FTRACE_FL_DIRECT) {
/*
* If there's only one user (direct_ops helper)
@@ -2245,6 +2247,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
}
static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
+{
+ struct ftrace_ops *op;
+ unsigned long ip = rec->ip;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+ if (op == op_exclude || !op->trampoline)
+ continue;
+
+ if (hash_contains_ip(ip, op->func_hash))
+ return op;
+ } while_for_each_ftrace_op(op);
+
+ return NULL;
+}
+
+static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
struct ftrace_ops *op)
{
@@ -2369,8 +2389,9 @@ unsigned long ftrace_find_rec_direct(unsigned long ip)
}
static void call_direct_funcs(unsigned long ip, unsigned long pip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
+ struct pt_regs *regs = ftrace_get_regs(fregs);
unsigned long addr;
addr = ftrace_find_rec_direct(ip);
@@ -2382,7 +2403,7 @@ static void call_direct_funcs(unsigned long ip, unsigned long pip,
struct ftrace_ops direct_ops = {
.func = call_direct_funcs,
- .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
+ .flags = FTRACE_OPS_FL_IPMODIFY
| FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_PERMANENT,
/*
@@ -4163,7 +4184,6 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
struct ftrace_hash **orig_hash, *new_hash;
LIST_HEAD(process_mods);
char *func;
- int ret;
mutex_lock(&ops->func_hash->regex_lock);
@@ -4216,7 +4236,7 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
mutex_lock(&ftrace_lock);
- ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
+ ftrace_hash_move_and_update_ops(ops, orig_hash,
new_hash, enable);
mutex_unlock(&ftrace_lock);
@@ -4294,7 +4314,7 @@ static int __init ftrace_mod_cmd_init(void)
core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct ftrace_probe_ops *probe_ops;
struct ftrace_func_probe *probe;
@@ -5568,7 +5588,6 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
struct ftrace_hash **orig_hash;
struct trace_parser *parser;
int filter_hash;
- int ret;
if (file->f_mode & FMODE_READ) {
iter = m->private;
@@ -5596,7 +5615,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
orig_hash = &iter->ops->func_hash->notrace_hash;
mutex_lock(&ftrace_lock);
- ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
+ ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
iter->hash, filter_hash);
mutex_unlock(&ftrace_lock);
} else {
@@ -6864,8 +6883,7 @@ void ftrace_init_trace_array(struct trace_array *tr)
struct ftrace_ops global_ops = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED |
+ .flags = FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID,
};
@@ -6915,12 +6933,13 @@ void ftrace_reset_array_ops(struct trace_array *tr)
static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ignored, struct pt_regs *regs)
+ struct ftrace_ops *ignored, struct ftrace_regs *fregs)
{
+ struct pt_regs *regs = ftrace_get_regs(fregs);
struct ftrace_ops *op;
int bit;
- bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0)
return;
@@ -6949,7 +6968,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
pr_warn("op=%p %pS\n", op, op);
goto out;
}
- op->func(ip, parent_ip, op, regs);
+ op->func(ip, parent_ip, op, fregs);
}
} while_for_each_ftrace_op(op);
out:
@@ -6972,9 +6991,9 @@ out:
*/
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
+ __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
}
NOKPROBE_SYMBOL(ftrace_ops_list_func);
#else
@@ -6991,18 +7010,18 @@ NOKPROBE_SYMBOL(ftrace_ops_no_ops);
* this function will be called by the mcount trampoline.
*/
static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
int bit;
- bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0)
return;
preempt_disable_notrace();
if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
- op->func(ip, parent_ip, op, regs);
+ op->func(ip, parent_ip, op, fregs);
preempt_enable_notrace();
trace_clear_recursion(bit);
@@ -7023,11 +7042,11 @@ NOKPROBE_SYMBOL(ftrace_ops_assist_func);
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
{
/*
- * If the function does not handle recursion, needs to be RCU safe,
- * or does per cpu logic, then we need to call the assist handler.
+ * If the function does not handle recursion or needs to be RCU safe,
+ * then we need to call the assist handler.
*/
- if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
- ops->flags & FTRACE_OPS_FL_RCU)
+ if (ops->flags & (FTRACE_OPS_FL_RECURSION |
+ FTRACE_OPS_FL_RCU))
return ftrace_ops_assist_func;
return ops->func;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7f45fd9d5a45..ec08f948dd80 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
+#include <linux/trace_recursion.h>
#include <linux/trace_events.h>
#include <linux/ring_buffer.h>
#include <linux/trace_clock.h>
@@ -129,7 +130,16 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
-#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT)
+
+#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
+# define RB_FORCE_8BYTE_ALIGNMENT 0
+# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT 1
+# define RB_ARCH_ALIGNMENT 8U
+#endif
+
+#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
@@ -438,14 +448,16 @@ enum {
};
/*
* Used for which event context the event is in.
- * NMI = 0
- * IRQ = 1
- * SOFTIRQ = 2
- * NORMAL = 3
+ * TRANSITION = 0
+ * NMI = 1
+ * IRQ = 2
+ * SOFTIRQ = 3
+ * NORMAL = 4
*
* See trace_recursive_lock() comment below for more details.
*/
enum {
+ RB_CTX_TRANSITION,
RB_CTX_NMI,
RB_CTX_IRQ,
RB_CTX_SOFTIRQ,
@@ -1420,7 +1432,8 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
return 0;
}
-static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ long nr_pages, struct list_head *pages)
{
struct buffer_page *bpage, *tmp;
bool user_thread = current->mm != NULL;
@@ -1460,13 +1473,15 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
struct page *page;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
- mflags, cpu_to_node(cpu));
+ mflags, cpu_to_node(cpu_buffer->cpu));
if (!bpage)
goto free_pages;
+ rb_check_bpage(cpu_buffer, bpage);
+
list_add(&bpage->list, pages);
- page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
+ page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
if (!page)
goto free_pages;
bpage->page = page_address(page);
@@ -1498,7 +1513,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
WARN_ON(!nr_pages);
- if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
+ if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
return -ENOMEM;
/*
@@ -1971,8 +1986,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
if (nr_pages < 2)
nr_pages = 2;
- size = nr_pages * BUF_PAGE_SIZE;
-
/* prevent another thread from changing buffer sizes */
mutex_lock(&buffer->mutex);
@@ -2007,8 +2020,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
* allocated without receiving ENOMEM
*/
INIT_LIST_HEAD(&cpu_buffer->new_pages);
- if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
- &cpu_buffer->new_pages, cpu)) {
+ if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
+ &cpu_buffer->new_pages)) {
/* not enough memory for new pages */
err = -ENOMEM;
goto out_err;
@@ -2073,8 +2086,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
INIT_LIST_HEAD(&cpu_buffer->new_pages);
if (cpu_buffer->nr_pages_to_update > 0 &&
- __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
- &cpu_buffer->new_pages, cpu_id)) {
+ __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
+ &cpu_buffer->new_pages)) {
err = -ENOMEM;
goto out_err;
}
@@ -2626,9 +2639,6 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
return skip_time_extend(event);
}
-static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event);
-
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline bool sched_clock_stable(void)
{
@@ -2717,7 +2727,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
event->time_delta = delta;
length -= RB_EVNT_HDR_SIZE;
- if (length > RB_MAX_SMALL_DATA) {
+ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
event->type_len = 0;
event->array[0] = length;
} else
@@ -2732,11 +2742,11 @@ static unsigned rb_calculate_event_length(unsigned length)
if (!length)
length++;
- if (length > RB_MAX_SMALL_DATA)
+ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
length += sizeof(event.array[0]);
length += RB_EVNT_HDR_SIZE;
- length = ALIGN(length, RB_ALIGNMENT);
+ length = ALIGN(length, RB_ARCH_ALIGNMENT);
/*
* In case the time delta is larger than the 27 bits for it
@@ -2756,20 +2766,6 @@ static unsigned rb_calculate_event_length(unsigned length)
return length;
}
-static __always_inline bool
-rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event)
-{
- unsigned long addr = (unsigned long)event;
- unsigned long index;
-
- index = rb_event_index(event);
- addr &= PAGE_MASK;
-
- return cpu_buffer->commit_page->page == (void *)addr &&
- rb_commit_index(cpu_buffer) == index;
-}
-
static u64 rb_time_delta(struct ring_buffer_event *event)
{
switch (event->type_len) {
@@ -3004,6 +3000,13 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
irq_work_queue(&cpu_buffer->irq_work.work);
}
+#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
+# define do_ring_buffer_record_recursion() \
+ do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
+#else
+# define do_ring_buffer_record_recursion() do { } while (0)
+#endif
+
/*
* The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified
@@ -3014,10 +3017,10 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* a bit of overhead in something as critical as function tracing,
* we use a bitmask trick.
*
- * bit 0 = NMI context
- * bit 1 = IRQ context
- * bit 2 = SoftIRQ context
- * bit 3 = normal context.
+ * bit 1 = NMI context
+ * bit 2 = IRQ context
+ * bit 3 = SoftIRQ context
+ * bit 4 = normal context.
*
* This works because this is the order of contexts that can
* preempt other contexts. A SoftIRQ never preempts an IRQ
@@ -3040,6 +3043,30 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* The least significant bit can be cleared this way, and it
* just so happens that it is the same bit corresponding to
* the current context.
+ *
+ * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
+ * is set when a recursion is detected at the current context, and if
+ * the TRANSITION bit is already set, it will fail the recursion.
+ * This is needed because there's a lag between the changing of
+ * interrupt context and updating the preempt count. In this case,
+ * a false positive will be found. To handle this, one extra recursion
+ * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
+ * bit is already set, then it is considered a recursion and the function
+ * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
+ *
+ * On the trace_recursive_unlock(), the TRANSITION bit will be the first
+ * to be cleared. Even if it wasn't the context that set it. That is,
+ * if an interrupt comes in while NORMAL bit is set and the ring buffer
+ * is called before preempt_count() is updated, since the check will
+ * be on the NORMAL bit, the TRANSITION bit will then be set. If an
+ * NMI then comes in, it will set the NMI bit, but when the NMI code
+ * does the trace_recursive_unlock() it will clear the TRANSTION bit
+ * and leave the NMI bit set. But this is fine, because the interrupt
+ * code that set the TRANSITION bit will then clear the NMI bit when it
+ * calls trace_recursive_unlock(). If another NMI comes in, it will
+ * set the TRANSITION bit and continue.
+ *
+ * Note: The TRANSITION bit only handles a single transition between context.
*/
static __always_inline int
@@ -3055,8 +3082,18 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
bit = pc & NMI_MASK ? RB_CTX_NMI :
pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
- if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
- return 1;
+ if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
+ /*
+ * It is possible that this was called by transitioning
+ * between interrupt context, and preempt_count() has not
+ * been updated yet. In this case, use the TRANSITION bit.
+ */
+ bit = RB_CTX_TRANSITION;
+ if (val & (1 << (bit + cpu_buffer->nest))) {
+ do_ring_buffer_record_recursion();
+ return 1;
+ }
+ }
val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer->current_context = val;
@@ -3071,8 +3108,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->current_context - (1 << cpu_buffer->nest);
}
-/* The recursive locking above uses 4 bits */
-#define NESTED_BITS 4
+/* The recursive locking above uses 5 bits */
+#define NESTED_BITS 5
/**
* ring_buffer_nest_start - Allow to trace while nested
@@ -3149,6 +3186,153 @@ int ring_buffer_unlock_commit(struct trace_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
+/* Special value to validate all deltas on a page. */
+#define CHECK_FULL_PAGE 1L
+
+#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
+static void dump_buffer_page(struct buffer_data_page *bpage,
+ struct rb_event_info *info,
+ unsigned long tail)
+{
+ struct ring_buffer_event *event;
+ u64 ts, delta;
+ int e;
+
+ ts = bpage->time_stamp;
+ pr_warn(" [%lld] PAGE TIME STAMP\n", ts);
+
+ for (e = 0; e < tail; e += rb_event_length(event)) {
+
+ event = (struct ring_buffer_event *)(bpage->data + e);
+
+ switch (event->type_len) {
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ delta = ring_buffer_event_time_stamp(event);
+ ts += delta;
+ pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta);
+ break;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ delta = ring_buffer_event_time_stamp(event);
+ ts = delta;
+ pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta);
+ break;
+
+ case RINGBUF_TYPE_PADDING:
+ ts += event->time_delta;
+ pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta);
+ break;
+
+ case RINGBUF_TYPE_DATA:
+ ts += event->time_delta;
+ pr_warn(" [%lld] delta:%d\n", ts, event->time_delta);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static DEFINE_PER_CPU(atomic_t, checking);
+static atomic_t ts_dump;
+
+/*
+ * Check if the current event time stamp matches the deltas on
+ * the buffer page.
+ */
+static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+ struct rb_event_info *info,
+ unsigned long tail)
+{
+ struct ring_buffer_event *event;
+ struct buffer_data_page *bpage;
+ u64 ts, delta;
+ bool full = false;
+ int e;
+
+ bpage = info->tail_page->page;
+
+ if (tail == CHECK_FULL_PAGE) {
+ full = true;
+ tail = local_read(&bpage->commit);
+ } else if (info->add_timestamp &
+ (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
+ /* Ignore events with absolute time stamps */
+ return;
+ }
+
+ /*
+ * Do not check the first event (skip possible extends too).
+ * Also do not check if previous events have not been committed.
+ */
+ if (tail <= 8 || tail > local_read(&bpage->commit))
+ return;
+
+ /*
+ * If this interrupted another event,
+ */
+ if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
+ goto out;
+
+ ts = bpage->time_stamp;
+
+ for (e = 0; e < tail; e += rb_event_length(event)) {
+
+ event = (struct ring_buffer_event *)(bpage->data + e);
+
+ switch (event->type_len) {
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ delta = ring_buffer_event_time_stamp(event);
+ ts += delta;
+ break;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ delta = ring_buffer_event_time_stamp(event);
+ ts = delta;
+ break;
+
+ case RINGBUF_TYPE_PADDING:
+ if (event->time_delta == 1)
+ break;
+ /* fall through */
+ case RINGBUF_TYPE_DATA:
+ ts += event->time_delta;
+ break;
+
+ default:
+ RB_WARN_ON(cpu_buffer, 1);
+ }
+ }
+ if ((full && ts > info->ts) ||
+ (!full && ts + info->delta != info->ts)) {
+ /* If another report is happening, ignore this one */
+ if (atomic_inc_return(&ts_dump) != 1) {
+ atomic_dec(&ts_dump);
+ goto out;
+ }
+ atomic_inc(&cpu_buffer->record_disabled);
+ pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld after:%lld\n",
+ cpu_buffer->cpu,
+ ts + info->delta, info->ts, info->delta, info->after);
+ dump_buffer_page(bpage, info, tail);
+ atomic_dec(&ts_dump);
+ /* Do not re-enable checking */
+ return;
+ }
+out:
+ atomic_dec(this_cpu_ptr(&checking));
+}
+#else
+static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+ struct rb_event_info *info,
+ unsigned long tail)
+{
+}
+#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
+
static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
struct rb_event_info *info)
@@ -3200,14 +3384,14 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* See if we shot pass the end of this buffer page */
if (unlikely(write > BUF_PAGE_SIZE)) {
- if (tail != w) {
- /* before and after may now different, fix it up*/
- b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
- if (a_ok && b_ok && info->before != info->after)
- (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
- info->before, info->after);
- }
+ /* before and after may now different, fix it up*/
+ b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+ if (a_ok && b_ok && info->before != info->after)
+ (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
+ info->before, info->after);
+ if (a_ok && b_ok)
+ check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
return rb_move_tail(cpu_buffer, tail, info);
}
@@ -3225,9 +3409,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* This did not interrupt any time update */
info->delta = info->ts - info->after;
else
- /* Just use full timestamp for inerrupting event */
+ /* Just use full timestamp for interrupting event */
info->delta = info->ts;
barrier();
+ check_buffer(cpu_buffer, info, tail);
if (unlikely(info->ts != save_before)) {
/* SLOW PATH - Interrupted between C and E */
@@ -3253,15 +3438,15 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
ts = rb_time_stamp(cpu_buffer->buffer);
barrier();
/*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
- info->after < ts) {
+ info->after < ts &&
+ rb_time_cmpxchg(&cpu_buffer->write_stamp,
+ info->after, ts)) {
/* Nothing came after this event between C and E */
info->delta = ts - info->after;
- (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
- info->after, info->ts);
info->ts = ts;
} else {
/*
- * Interrupted beween C and E:
+ * Interrupted between C and E:
* Lost the previous events time stamp. Just set the
* delta to zero, and this will be the same time as
* the event this event interrupted. And the events that
@@ -3468,7 +3653,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
}
/**
- * ring_buffer_commit_discard - discard an event that has not been committed
+ * ring_buffer_discard_commit - discard an event that has not been committed
* @buffer: the ring buffer
* @event: non committed event to discard
*
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
index edd912cd14aa..a4b4bbf8c3bf 100644
--- a/kernel/trace/synth_event_gen_test.c
+++ b/kernel/trace/synth_event_gen_test.c
@@ -307,7 +307,7 @@ static int __init test_create_synth_event(void)
return ret;
delete:
/* We got an error after creating the event, delete it */
- ret = synth_event_delete("create_synth_test");
+ synth_event_delete("create_synth_test");
goto out;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 528971714fc6..b5815a022ecc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -68,10 +68,21 @@ bool ring_buffer_expanded;
static bool __read_mostly tracing_selftest_running;
/*
- * If a tracer is running, we do not want to run SELFTEST.
+ * If boot-time tracing including tracers/events via kernel cmdline
+ * is running, we do not want to run SELFTEST.
*/
bool __read_mostly tracing_selftest_disabled;
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+void __init disable_tracing_selftest(const char *reason)
+{
+ if (!tracing_selftest_disabled) {
+ tracing_selftest_disabled = true;
+ pr_info("Ftrace startup test is disabled due to %s\n", reason);
+ }
+}
+#endif
+
/* Pipe tracepoints to printk */
struct trace_iterator *tracepoint_print_iter;
int tracepoint_printk;
@@ -163,7 +174,8 @@ static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
int tracing_set_tracer(struct trace_array *tr, const char *buf);
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
unsigned long flags, int pc);
#define MAX_TRACER_SIZE 100
@@ -2112,11 +2124,7 @@ int __init register_tracer(struct tracer *type)
apply_trace_boot_options();
/* disable other selftests, since this will break it. */
- tracing_selftest_disabled = true;
-#ifdef CONFIG_FTRACE_STARTUP_TEST
- printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
- type->name);
-#endif
+ disable_tracing_selftest("running a tracer");
out_unlock:
return ret;
@@ -2737,7 +2745,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if (val == 1) {
+ if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
trace_event_setup(entry, type, flags, pc);
entry->array[0] = len;
return entry;
@@ -2750,7 +2758,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
- * to store the trace event for the tigger to use. It's recusive
+ * to store the trace event for the trigger to use. It's recursive
* safe and will not be recorded anywhere.
*/
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
@@ -2870,7 +2878,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
* two. They are not that meaningful.
*/
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
- ftrace_trace_userstack(buffer, flags, pc);
+ ftrace_trace_userstack(tr, buffer, flags, pc);
}
/*
@@ -2952,7 +2960,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
/* This should never happen. If it does, yell once and skip */
- if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
+ if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
goto out;
/*
@@ -3056,13 +3064,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
static DEFINE_PER_CPU(int, user_stack_count);
static void
-ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer, unsigned long flags, int pc)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
- if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
+ if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
@@ -3101,7 +3110,8 @@ ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
preempt_enable();
}
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
unsigned long flags, int pc)
{
}
@@ -3118,7 +3128,7 @@ struct trace_buffer_struct {
static struct trace_buffer_struct *trace_percpu_buffer;
/*
- * Thise allows for lockless recording. If we're nested too deeply, then
+ * This allows for lockless recording. If we're nested too deeply, then
* this returns NULL.
*/
static char *get_trace_buf(void)
@@ -3132,7 +3142,7 @@ static char *get_trace_buf(void)
/* Interrupts must see nesting incremented before we use the buffer */
barrier();
- return &buffer->buffer[buffer->nesting][0];
+ return &buffer->buffer[buffer->nesting - 1][0];
}
static void put_trace_buf(void)
@@ -3534,7 +3544,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
}
#define STATIC_TEMP_BUF_SIZE 128
-static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
+static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
@@ -9059,7 +9069,10 @@ int tracing_init_dentry(void)
extern struct trace_eval_map *__start_ftrace_eval_maps[];
extern struct trace_eval_map *__stop_ftrace_eval_maps[];
-static void __init trace_eval_init(void)
+static struct workqueue_struct *eval_map_wq __initdata;
+static struct work_struct eval_map_work __initdata;
+
+static void __init eval_map_work_func(struct work_struct *work)
{
int len;
@@ -9067,6 +9080,33 @@ static void __init trace_eval_init(void)
trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
}
+static int __init trace_eval_init(void)
+{
+ INIT_WORK(&eval_map_work, eval_map_work_func);
+
+ eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
+ if (!eval_map_wq) {
+ pr_err("Unable to allocate eval_map_wq\n");
+ /* Do work here */
+ eval_map_work_func(&eval_map_work);
+ return -ENOMEM;
+ }
+
+ queue_work(eval_map_wq, &eval_map_work);
+ return 0;
+}
+
+static int __init trace_eval_sync(void)
+{
+ /* Make sure the eval map updates are finished */
+ if (eval_map_wq)
+ destroy_workqueue(eval_map_wq);
+ return 0;
+}
+
+late_initcall_sync(trace_eval_sync);
+
+
#ifdef CONFIG_MODULES
static void trace_module_add_evals(struct module *mod)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f3f5e77123ad..e448d2da0b99 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -558,163 +558,6 @@ struct tracer {
bool noboot;
};
-
-/* Only current can touch trace_recursion */
-
-/*
- * For function tracing recursion:
- * The order of these bits are important.
- *
- * When function tracing occurs, the following steps are made:
- * If arch does not support a ftrace feature:
- * call internal function (uses INTERNAL bits) which calls...
- * If callback is registered to the "global" list, the list
- * function is called and recursion checks the GLOBAL bits.
- * then this function calls...
- * The function callback, which can use the FTRACE bits to
- * check for recursion.
- *
- * Now if the arch does not support a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
- */
-enum {
- /* Function recursion bits */
- TRACE_FTRACE_BIT,
- TRACE_FTRACE_NMI_BIT,
- TRACE_FTRACE_IRQ_BIT,
- TRACE_FTRACE_SIRQ_BIT,
-
- /* INTERNAL_BITs must be greater than FTRACE_BITs */
- TRACE_INTERNAL_BIT,
- TRACE_INTERNAL_NMI_BIT,
- TRACE_INTERNAL_IRQ_BIT,
- TRACE_INTERNAL_SIRQ_BIT,
-
- TRACE_BRANCH_BIT,
-/*
- * Abuse of the trace_recursion.
- * As we need a way to maintain state if we are tracing the function
- * graph in irq because we want to trace a particular function that
- * was called in irq context but we have irq tracing off. Since this
- * can only be modified by current, we can reuse trace_recursion.
- */
- TRACE_IRQ_BIT,
-
- /* Set if the function is in the set_graph_function file */
- TRACE_GRAPH_BIT,
-
- /*
- * In the very unlikely case that an interrupt came in
- * at a start of graph tracing, and we want to trace
- * the function in that interrupt, the depth can be greater
- * than zero, because of the preempted start of a previous
- * trace. In an even more unlikely case, depth could be 2
- * if a softirq interrupted the start of graph tracing,
- * followed by an interrupt preempting a start of graph
- * tracing in the softirq, and depth can even be 3
- * if an NMI came in at the start of an interrupt function
- * that preempted a softirq start of a function that
- * preempted normal context!!!! Luckily, it can't be
- * greater than 3, so the next two bits are a mask
- * of what the depth is when we set TRACE_GRAPH_BIT
- */
-
- TRACE_GRAPH_DEPTH_START_BIT,
- TRACE_GRAPH_DEPTH_END_BIT,
-
- /*
- * To implement set_graph_notrace, if this bit is set, we ignore
- * function graph tracing of called functions, until the return
- * function is called to clear it.
- */
- TRACE_GRAPH_NOTRACE_BIT,
-};
-
-#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
-#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
-#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
-
-#define trace_recursion_depth() \
- (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
-#define trace_recursion_set_depth(depth) \
- do { \
- current->trace_recursion &= \
- ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
- current->trace_recursion |= \
- ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
- } while (0)
-
-#define TRACE_CONTEXT_BITS 4
-
-#define TRACE_FTRACE_START TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
-
-#define TRACE_LIST_START TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
-
-#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
-
-static __always_inline int trace_get_context_bit(void)
-{
- int bit;
-
- if (in_interrupt()) {
- if (in_nmi())
- bit = 0;
-
- else if (in_irq())
- bit = 1;
- else
- bit = 2;
- } else
- bit = 3;
-
- return bit;
-}
-
-static __always_inline int trace_test_and_set_recursion(int start, int max)
-{
- unsigned int val = current->trace_recursion;
- int bit;
-
- /* A previous recursion check was made */
- if ((val & TRACE_CONTEXT_MASK) > max)
- return 0;
-
- bit = trace_get_context_bit() + start;
- if (unlikely(val & (1 << bit)))
- return -1;
-
- val |= 1 << bit;
- current->trace_recursion = val;
- barrier();
-
- return bit;
-}
-
-static __always_inline void trace_clear_recursion(int bit)
-{
- unsigned int val = current->trace_recursion;
-
- if (!bit)
- return;
-
- bit = 1 << bit;
- val &= ~bit;
-
- barrier();
- current->trace_recursion = val;
-}
-
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
@@ -876,6 +719,8 @@ extern bool ring_buffer_expanded;
extern bool tracing_selftest_disabled;
#ifdef CONFIG_FTRACE_STARTUP_TEST
+extern void __init disable_tracing_selftest(const char *reason);
+
extern int trace_selftest_startup_function(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_function_graph(struct tracer *trace,
@@ -899,6 +744,9 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
*/
#define __tracer_data __refdata
#else
+static inline void __init disable_tracing_selftest(const char *reason)
+{
+}
/* Tracers are seldom changed. Optimize when selftests are disabled. */
#define __tracer_data __read_mostly
#endif /* CONFIG_FTRACE_STARTUP_TEST */
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 2e9a4746ea85..801c2a7f7605 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -31,7 +31,7 @@ static bool ok_to_run;
* it simply writes "START". As the first write is cold cache and
* the rest is hot, we save off that time in bm_first and it is
* reported as "first", which is shown in the second write to the
- * tracepoint. The "first" field is writen within the statics from
+ * tracepoint. The "first" field is written within the statics from
* then on but never changes.
*/
static void trace_do_benchmark(void)
@@ -112,7 +112,7 @@ static void trace_do_benchmark(void)
int i = 0;
/*
* stddev is the square of standard deviation but
- * we want the actualy number. Use the average
+ * we want the actually number. Use the average
* as our seed to find the std.
*
* The next try is:
@@ -155,7 +155,7 @@ static int benchmark_event_kthread(void *arg)
/*
* We don't go to sleep, but let others run as well.
- * This is bascially a "yield()" to let any task that
+ * This is basically a "yield()" to let any task that
* wants to run, schedule in, but if the CPU is idle,
* we'll keep burning cycles.
*
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index c22a152ef0b4..a82f03f385f8 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -344,6 +344,8 @@ static int __init trace_boot_init(void)
trace_boot_init_one_instance(tr, trace_node);
trace_boot_init_instances(trace_node);
+ disable_tracing_selftest("running boot-time tracing");
+
return 0;
}
/*
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 5fa49cfd2bb6..4f967d5cd917 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -276,7 +276,7 @@ int dynevent_arg_add(struct dynevent_cmd *cmd,
* arguments of the form 'type variable_name;' or 'x+y'.
*
* The lhs argument string will be appended to the current cmd string,
- * followed by an operator, if applicable, followd by the rhs string,
+ * followed by an operator, if applicable, followed by the rhs string,
* followed finally by a separator, if applicable. Before the
* argument is added, the @check_arg function, if present, will be
* used to check the sanity of the current arg strings.
diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h
index d6857a254ede..d6f72dcb7269 100644
--- a/kernel/trace/trace_dynevent.h
+++ b/kernel/trace/trace_dynevent.h
@@ -29,10 +29,10 @@ struct dyn_event;
* @show: Showing method. This is invoked when user reads the event definitions
* via dynamic_events interface.
* @is_busy: Check whether given event is busy so that it can not be deleted.
- * Return true if it is busy, otherwides false.
- * @free: Delete the given event. Return 0 if success, otherwides error.
+ * Return true if it is busy, otherwise false.
+ * @free: Delete the given event. Return 0 if success, otherwise error.
* @match: Check whether given event and system name match this event. The argc
- * and argv is used for exact match. Return true if it matches, otherwides
+ * and argv is used for exact match. Return true if it matches, otherwise
* false.
*
* Except for @create, these methods are called under holding event_mutex.
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 18c4a58aff79..4547ac59da61 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -32,7 +32,7 @@
* to be deciphered for the format file. Although these macros
* may become out of sync with the internal structure, they
* will create a compile error if it happens. Since the
- * internel structures are just tracing helpers, this is not
+ * internal structures are just tracing helpers, this is not
* an issue.
*
* When an internal structure is used, it should use:
@@ -93,10 +93,10 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
F_STRUCT(
__field_struct( struct ftrace_graph_ret, ret )
__field_packed( unsigned long, ret, func )
- __field_packed( unsigned long, ret, overrun )
+ __field_packed( int, ret, depth )
+ __field_packed( unsigned int, ret, overrun )
__field_packed( unsigned long long, ret, calltime)
__field_packed( unsigned long long, ret, rettime )
- __field_packed( int, ret, depth )
),
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 643e0b19920d..a71181655958 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -432,17 +432,25 @@ NOKPROBE_SYMBOL(perf_trace_buf_update);
#ifdef CONFIG_FUNCTION_TRACER
static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *pt_regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct ftrace_entry *entry;
struct perf_event *event;
struct hlist_head head;
struct pt_regs regs;
int rctx;
+ int bit;
+
+ if (!rcu_is_watching())
+ return;
if ((unsigned long)ops->private != smp_processor_id())
return;
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
event = container_of(ops, struct perf_event, ftrace_ops);
/*
@@ -463,13 +471,15 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
if (!entry)
- return;
+ goto out;
entry->ip = ip;
entry->parent_ip = parent_ip;
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
1, &regs, &head, NULL);
+out:
+ ftrace_test_recursion_unlock(bit);
#undef ENTRY_SIZE
}
@@ -477,7 +487,6 @@ static int perf_ftrace_function_register(struct perf_event *event)
{
struct ftrace_ops *ops = &event->ftrace_ops;
- ops->flags = FTRACE_OPS_FL_RCU;
ops->func = perf_ftrace_function_call;
ops->private = (void *)(unsigned long)nr_cpu_ids;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 47a71f96e5bc..d387b774ceeb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1212,7 +1212,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- if (!trace_event_name(call) || !call->class || !call->class->reg)
+ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+ !trace_event_name(call) || !call->class || !call->class->reg)
continue;
if (system && strcmp(call->class->system, system->name) != 0)
@@ -2436,7 +2437,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
/*
* Since calls are grouped by systems, the likelyhood that the
* next call in the iteration belongs to the same system as the
- * previous call is high. As an optimization, we skip seaching
+ * previous call is high. As an optimization, we skip searching
* for a map[] that matches the call's system if the last call
* was from the same system. That's what last_i is for. If the
* call has the same system as the previous call, then last_i
@@ -3201,7 +3202,7 @@ static __init int setup_trace_event(char *str)
{
strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
ring_buffer_expanded = true;
- tracing_selftest_disabled = true;
+ disable_tracing_selftest("running event tracing");
return 1;
}
@@ -3271,7 +3272,7 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
*
* When a new instance is created, it needs to set up its events
* directory, as well as other files associated with events. It also
- * creates the event hierachry in the @parent/events directory.
+ * creates the event hierarchy in the @parent/events directory.
*
* Returns 0 on success.
*
@@ -3428,10 +3429,10 @@ static __init int event_trace_enable(void)
* initialize events and perhaps start any events that are on the
* command line. Unfortunately, there are some events that will not
* start this early, like the system call tracepoints that need
- * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
- * is called before pid 1 starts, and this flag is never set, making
- * the syscall tracepoint never get reached, but the event is enabled
- * regardless (and not doing anything).
+ * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
+ * event_trace_enable() is called before pid 1 starts, and this flag
+ * is never set, making the syscall tracepoint never get reached, but
+ * the event is enabled regardless (and not doing anything).
*/
static __init int event_trace_enable_again(void)
{
@@ -3673,7 +3674,7 @@ static struct trace_event_file event_trace_file __initdata;
static void __init
function_test_events_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *regs)
{
struct trace_buffer *buffer;
struct ring_buffer_event *event;
@@ -3712,7 +3713,6 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __initdata =
{
.func = function_test_events_call,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static __init void event_trace_self_test_with_function(void)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 78a678eeb140..e91259f6a722 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1561,27 +1561,6 @@ static inline void event_clear_filter(struct trace_event_file *file)
RCU_INIT_POINTER(file->filter, NULL);
}
-static inline void
-event_set_no_set_filter_flag(struct trace_event_file *file)
-{
- file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
-}
-
-static inline void
-event_clear_no_set_filter_flag(struct trace_event_file *file)
-{
- file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
-}
-
-static inline bool
-event_no_set_filter_flag(struct trace_event_file *file)
-{
- if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
- return true;
-
- return false;
-}
-
struct filter_list {
struct list_head list;
struct event_filter *filter;
@@ -1950,7 +1929,7 @@ static int __ftrace_function_set_filter(int filter, char *buf, int len,
/*
* The 'ip' field could have multiple filters set, separated
* either by space or comma. We first cut the filter and apply
- * all pieces separatelly.
+ * all pieces separately.
*/
re = ftrace_function_filter_re(buf, len, &re_cnt);
if (!re)
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 96c3f86b81c5..39ebe1826fc3 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -3355,7 +3355,7 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
} else {
field_var = NULL;
/*
- * If no explicit system.event is specfied, default to
+ * If no explicit system.event is specified, default to
* looking for fields on the onmatch(system.event.xxx)
* event.
*/
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 84b7cab55291..5a8bc0b421f1 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -584,7 +584,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
{
struct synth_field *field;
const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
- int len, ret = 0;
+ int len, ret = -ENOMEM;
struct seq_buf s;
ssize_t size;
@@ -617,10 +617,9 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
len--;
field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
- if (!field->name) {
- ret = -ENOMEM;
+ if (!field->name)
goto free;
- }
+
if (!is_good_name(field->name)) {
synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
ret = -EINVAL;
@@ -638,10 +637,9 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
len += strlen(prefix);
field->type = kzalloc(len, GFP_KERNEL);
- if (!field->type) {
- ret = -ENOMEM;
+ if (!field->type)
goto free;
- }
+
seq_buf_init(&s, field->type, len);
if (prefix)
seq_buf_puts(&s, prefix);
@@ -653,6 +651,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
}
if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
goto free;
+
s.buffer[s.len] = '\0';
size = synth_field_size(field->type);
@@ -666,10 +665,8 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
len = sizeof("__data_loc ") + strlen(field->type) + 1;
type = kzalloc(len, GFP_KERNEL);
- if (!type) {
- ret = -ENOMEM;
+ if (!type)
goto free;
- }
seq_buf_init(&s, type, len);
seq_buf_puts(&s, "__data_loc ");
@@ -1279,7 +1276,7 @@ static int __create_synth_event(int argc, const char *name, const char **argv)
/**
* synth_event_create - Create a new synthetic event
- * @name: The name of the new sythetic event
+ * @name: The name of the new synthetic event
* @fields: An array of type/name field descriptions
* @n_fields: The number of field descriptions contained in the fields array
* @mod: The module creating the event, NULL if not created from a module
@@ -1449,7 +1446,7 @@ __synth_event_trace_init(struct trace_event_file *file,
* this code to be called, etc). Because this is called
* directly by the user, we don't have that but we still need
* to honor not logging when disabled. For the iterated
- * trace case, we save the enabed state upon start and just
+ * trace case, we save the enabled state upon start and just
* ignore the following data calls.
*/
if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 90f81d33fa3f..d960f6b11b5e 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -26,7 +26,7 @@ static int ftrace_event_register(struct trace_event_call *call,
/*
* The FTRACE_ENTRY_REG macro allows ftrace entry to define register
- * function and thus become accesible via perf.
+ * function and thus become accessible via perf.
*/
#undef FTRACE_ENTRY_REG
#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 2c2126e1871d..c5095dd28e20 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -23,10 +23,10 @@ static void tracing_start_function_trace(struct trace_array *tr);
static void tracing_stop_function_trace(struct trace_array *tr);
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
static struct tracer_flags func_flags;
/* Our option */
@@ -48,7 +48,7 @@ int ftrace_allocate_ftrace_ops(struct trace_array *tr)
/* Currently only the non stack version is supported */
ops->func = function_trace_call;
- ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
+ ops->flags = FTRACE_OPS_FL_PID;
tr->ops = ops;
ops->private = tr;
@@ -89,7 +89,6 @@ void ftrace_destroy_function_files(struct trace_array *tr)
static int function_trace_init(struct trace_array *tr)
{
ftrace_func_t func;
-
/*
* Instance trace_arrays get their ops allocated
* at instance creation. Unless it failed
@@ -129,7 +128,7 @@ static void function_trace_start(struct trace_array *tr)
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
struct trace_array_cpu *data;
@@ -141,22 +140,20 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
if (unlikely(!tr->function_enabled))
return;
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
pc = preempt_count();
preempt_disable_notrace();
- bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
- if (bit < 0)
- goto out;
-
cpu = smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (!atomic_read(&data->disabled)) {
local_save_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc);
}
- trace_clear_recursion(bit);
-
- out:
+ ftrace_test_recursion_unlock(bit);
preempt_enable_notrace();
}
@@ -180,7 +177,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
struct trace_array_cpu *data;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 60d66278aa0d..d874dec87131 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -957,7 +957,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
/* Overrun */
if (flags & TRACE_GRAPH_PRINT_OVERRUN)
- trace_seq_printf(s, " (Overruns: %lu)\n",
+ trace_seq_printf(s, " (Overruns: %u)\n",
trace->overrun);
print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index c9ad5c6fbaad..c0df9b97f147 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -368,7 +368,7 @@ static int start_kthread(struct trace_array *tr)
struct task_struct *kthread;
int next_cpu;
- if (WARN_ON(hwlat_kthread))
+ if (hwlat_kthread)
return 0;
/* Just pick the first CPU on first iteration */
@@ -485,11 +485,11 @@ hwlat_width_write(struct file *filp, const char __user *ubuf,
* @ppos: The current position in @file
*
* This function provides a write implementation for the "window" interface
- * to the hardware latency detetector. The window is the total time
+ * to the hardware latency detector. The window is the total time
* in us that will be considered one sample period. Conceptually, windows
* occur back-to-back and contain a sample width period during which
* actual sampling occurs. Can be used to write a new total window size. It
- * is enfoced that any value written must be greater than the sample width
+ * is enforced that any value written must be greater than the sample width
* size, or an error results.
*/
static ssize_t
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 10bbb0f381d5..6756379b661f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -138,7 +138,7 @@ static int func_prolog_dec(struct trace_array *tr,
*/
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
@@ -562,6 +562,8 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
/* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+ /* without pause, we will produce garbage if another latency occurs */
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
tr->max_latency = 0;
irqsoff_trace = tr;
@@ -583,11 +585,13 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
{
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
stop_irqsoff_tracer(tr, is_graph(tr));
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b911e9f6d9f5..56c7fbff7bd7 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -25,11 +25,12 @@
/* Kprobe early definition from command line */
static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
-static bool kprobe_boot_events_enabled __initdata;
static int __init set_kprobe_boot_events(char *str)
{
strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
+ disable_tracing_selftest("running kprobe events");
+
return 0;
}
__setup("kprobe_event=", set_kprobe_boot_events);
@@ -220,9 +221,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
- return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
+ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
+ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
}
bool trace_kprobe_error_injectable(struct trace_event_call *call)
@@ -433,7 +434,7 @@ static int disable_trace_kprobe(struct trace_event_call *call,
return 0;
}
-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
+#if defined(CONFIG_DYNAMIC_FTRACE) && \
!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
static bool __within_notrace_func(unsigned long addr)
{
@@ -827,9 +828,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
}
if (is_return)
flags |= TPARG_FL_RETURN;
- if (kprobe_on_func_entry(NULL, symbol, offset))
+ ret = kprobe_on_func_entry(NULL, symbol, offset);
+ if (ret == 0)
flags |= TPARG_FL_FENTRY;
- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
+ /* Defer the ENOENT case until register kprobe */
+ if (ret == -EINVAL && is_return) {
trace_probe_log_err(0, BAD_RETPROBE);
goto parse_error;
}
@@ -1731,7 +1734,8 @@ NOKPROBE_SYMBOL(kprobe_dispatcher);
static int
kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
- struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
+ struct kretprobe *rp = get_kretprobe(ri);
+ struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
raw_cpu_inc(*tk->nhit);
@@ -1887,8 +1891,6 @@ static __init void setup_boot_kprobe_events(void)
ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
if (ret)
pr_warn("Failed to add event(%d): %s\n", ret, cmd);
- else
- kprobe_boot_events_enabled = true;
cmd = p;
}
@@ -1973,10 +1975,8 @@ static __init int kprobe_trace_self_tests_init(void)
if (tracing_is_disabled())
return -ENODEV;
- if (kprobe_boot_events_enabled) {
- pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
+ if (tracing_selftest_disabled)
return 0;
- }
target = kprobe_trace_selftest_target;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 000e9dc224c6..92b1575ae0ca 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -353,8 +353,8 @@ static inline const char *kretprobed(const char *name)
}
#endif /* CONFIG_KRETPROBES */
-static void
-seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
+void
+trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
{
#ifdef CONFIG_KALLSYMS
char str[KSYM_SYMBOL_LEN];
@@ -420,7 +420,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
goto out;
}
- seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
+ trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
if (sym_flags & TRACE_ITER_SYM_ADDR)
trace_seq_printf(s, " <" IP_FMT ">", ip);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 2f742b74e7e6..4c954636caf0 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -16,6 +16,7 @@ extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags);
+extern void trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset);
extern int trace_print_context(struct trace_iterator *iter);
extern int trace_print_lat_context(struct trace_iterator *iter);
diff --git a/kernel/trace/trace_recursion_record.c b/kernel/trace/trace_recursion_record.c
new file mode 100644
index 000000000000..b2edac1fe156
--- /dev/null
+++ b/kernel/trace/trace_recursion_record.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+
+#include "trace_output.h"
+
+struct recursed_functions {
+ unsigned long ip;
+ unsigned long parent_ip;
+};
+
+static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE];
+static atomic_t nr_records;
+
+/*
+ * Cache the last found function. Yes, updates to this is racey, but
+ * so is memory cache ;-)
+ */
+static unsigned long cached_function;
+
+void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip)
+{
+ int index = 0;
+ int i;
+ unsigned long old;
+
+ again:
+ /* First check the last one recorded */
+ if (ip == cached_function)
+ return;
+
+ i = atomic_read(&nr_records);
+ /* nr_records is -1 when clearing records */
+ smp_mb__after_atomic();
+ if (i < 0)
+ return;
+
+ /*
+ * If there's two writers and this writer comes in second,
+ * the cmpxchg() below to update the ip will fail. Then this
+ * writer will try again. It is possible that index will now
+ * be greater than nr_records. This is because the writer
+ * that succeeded has not updated the nr_records yet.
+ * This writer could keep trying again until the other writer
+ * updates nr_records. But if the other writer takes an
+ * interrupt, and that interrupt locks up that CPU, we do
+ * not want this CPU to lock up due to the recursion protection,
+ * and have a bug report showing this CPU as the cause of
+ * locking up the computer. To not lose this record, this
+ * writer will simply use the next position to update the
+ * recursed_functions, and it will update the nr_records
+ * accordingly.
+ */
+ if (index < i)
+ index = i;
+ if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE)
+ return;
+
+ for (i = index - 1; i >= 0; i--) {
+ if (recursed_functions[i].ip == ip) {
+ cached_function = ip;
+ return;
+ }
+ }
+
+ cached_function = ip;
+
+ /*
+ * We only want to add a function if it hasn't been added before.
+ * Add to the current location before incrementing the count.
+ * If it fails to add, then increment the index (save in i)
+ * and try again.
+ */
+ old = cmpxchg(&recursed_functions[index].ip, 0, ip);
+ if (old != 0) {
+ /* Did something else already added this for us? */
+ if (old == ip)
+ return;
+ /* Try the next location (use i for the next index) */
+ index++;
+ goto again;
+ }
+
+ recursed_functions[index].parent_ip = parent_ip;
+
+ /*
+ * It's still possible that we could race with the clearing
+ * CPU0 CPU1
+ * ---- ----
+ * ip = func
+ * nr_records = -1;
+ * recursed_functions[0] = 0;
+ * i = -1
+ * if (i < 0)
+ * nr_records = 0;
+ * (new recursion detected)
+ * recursed_functions[0] = func
+ * cmpxchg(recursed_functions[0],
+ * func, 0)
+ *
+ * But the worse that could happen is that we get a zero in
+ * the recursed_functions array, and it's likely that "func" will
+ * be recorded again.
+ */
+ i = atomic_read(&nr_records);
+ smp_mb__after_atomic();
+ if (i < 0)
+ cmpxchg(&recursed_functions[index].ip, ip, 0);
+ else if (i <= index)
+ atomic_cmpxchg(&nr_records, i, index + 1);
+}
+EXPORT_SYMBOL_GPL(ftrace_record_recursion);
+
+static DEFINE_MUTEX(recursed_function_lock);
+static struct trace_seq *tseq;
+
+static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos)
+{
+ void *ret = NULL;
+ int index;
+
+ mutex_lock(&recursed_function_lock);
+ index = atomic_read(&nr_records);
+ if (*pos < index) {
+ ret = &recursed_functions[*pos];
+ }
+
+ tseq = kzalloc(sizeof(*tseq), GFP_KERNEL);
+ if (!tseq)
+ return ERR_PTR(-ENOMEM);
+
+ trace_seq_init(tseq);
+
+ return ret;
+}
+
+static void *recursed_function_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ int index;
+ int p;
+
+ index = atomic_read(&nr_records);
+ p = ++(*pos);
+
+ return p < index ? &recursed_functions[p] : NULL;
+}
+
+static void recursed_function_seq_stop(struct seq_file *m, void *v)
+{
+ kfree(tseq);
+ mutex_unlock(&recursed_function_lock);
+}
+
+static int recursed_function_seq_show(struct seq_file *m, void *v)
+{
+ struct recursed_functions *record = v;
+ int ret = 0;
+
+ if (record) {
+ trace_seq_print_sym(tseq, record->parent_ip, true);
+ trace_seq_puts(tseq, ":\t");
+ trace_seq_print_sym(tseq, record->ip, true);
+ trace_seq_putc(tseq, '\n');
+ ret = trace_print_seq(m, tseq);
+ }
+
+ return ret;
+}
+
+static const struct seq_operations recursed_function_seq_ops = {
+ .start = recursed_function_seq_start,
+ .next = recursed_function_seq_next,
+ .stop = recursed_function_seq_stop,
+ .show = recursed_function_seq_show
+};
+
+static int recursed_function_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ mutex_lock(&recursed_function_lock);
+ /* If this file was opened for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ /* disable updating records */
+ atomic_set(&nr_records, -1);
+ smp_mb__after_atomic();
+ memset(recursed_functions, 0, sizeof(recursed_functions));
+ smp_wmb();
+ /* enable them again */
+ atomic_set(&nr_records, 0);
+ }
+ if (file->f_mode & FMODE_READ)
+ ret = seq_open(file, &recursed_function_seq_ops);
+ mutex_unlock(&recursed_function_lock);
+
+ return ret;
+}
+
+static ssize_t recursed_function_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+static int recursed_function_release(struct inode *inode, struct file *file)
+{
+ if (file->f_mode & FMODE_READ)
+ seq_release(inode, file);
+ return 0;
+}
+
+static const struct file_operations recursed_functions_fops = {
+ .open = recursed_function_open,
+ .write = recursed_function_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = recursed_function_release,
+};
+
+__init static int create_recursed_functions(void)
+{
+ struct dentry *dentry;
+
+ dentry = trace_create_file("recursed_functions", 0644, NULL, NULL,
+ &recursed_functions_fops);
+ if (!dentry)
+ pr_warn("WARNING: Failed to create recursed_functions\n");
+ return 0;
+}
+
+fs_initcall(create_recursed_functions);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 97b10bb31a1f..c0181066dbe9 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -212,7 +212,7 @@ static void wakeup_print_header(struct seq_file *s)
*/
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b5e3496cf803..73ef12092250 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -107,7 +107,7 @@ static int trace_selftest_test_probe1_cnt;
static void trace_selftest_test_probe1_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_probe1_cnt++;
}
@@ -116,7 +116,7 @@ static int trace_selftest_test_probe2_cnt;
static void trace_selftest_test_probe2_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_probe2_cnt++;
}
@@ -125,7 +125,7 @@ static int trace_selftest_test_probe3_cnt;
static void trace_selftest_test_probe3_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_probe3_cnt++;
}
@@ -134,7 +134,7 @@ static int trace_selftest_test_global_cnt;
static void trace_selftest_test_global_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_global_cnt++;
}
@@ -143,24 +143,21 @@ static int trace_selftest_test_dyn_cnt;
static void trace_selftest_test_dyn_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_dyn_cnt++;
}
static struct ftrace_ops test_probe1 = {
.func = trace_selftest_test_probe1_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe2 = {
.func = trace_selftest_test_probe2_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe3 = {
.func = trace_selftest_test_probe3_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static void print_counts(void)
@@ -417,7 +414,7 @@ static int trace_selftest_recursion_cnt;
static void trace_selftest_test_recursion_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
/*
* This function is registered without the recursion safe flag.
@@ -432,7 +429,7 @@ static void trace_selftest_test_recursion_func(unsigned long ip,
static void trace_selftest_test_recursion_safe_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
/*
* We said we would provide our own recursion. By calling
@@ -448,11 +445,11 @@ static void trace_selftest_test_recursion_safe_func(unsigned long ip,
static struct ftrace_ops test_rec_probe = {
.func = trace_selftest_test_recursion_func,
+ .flags = FTRACE_OPS_FL_RECURSION,
};
static struct ftrace_ops test_recsafe_probe = {
.func = trace_selftest_test_recursion_safe_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int
@@ -492,8 +489,13 @@ trace_selftest_function_recursion(void)
unregister_ftrace_function(&test_rec_probe);
ret = -1;
- if (trace_selftest_recursion_cnt != 1) {
- pr_cont("*callback not called once (%d)* ",
+ /*
+ * Recursion allows for transitions between context,
+ * and may call the callback twice.
+ */
+ if (trace_selftest_recursion_cnt != 1 &&
+ trace_selftest_recursion_cnt != 2) {
+ pr_cont("*callback not called once (or twice) (%d)* ",
trace_selftest_recursion_cnt);
goto out;
}
@@ -546,9 +548,11 @@ static enum {
static void trace_selftest_test_regs_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
- if (pt_regs)
+ struct pt_regs *regs = ftrace_get_regs(fregs);
+
+ if (regs)
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
else
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
@@ -556,7 +560,7 @@ static void trace_selftest_test_regs_func(unsigned long ip,
static struct ftrace_ops test_regs_probe = {
.func = trace_selftest_test_regs_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
+ .flags = FTRACE_OPS_FL_SAVE_REGS,
};
static int
@@ -782,7 +786,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
/* Have we just recovered from a hang? */
if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
- tracing_selftest_disabled = true;
+ disable_tracing_selftest("recovering from a hang");
ret = -1;
goto out;
}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index c408423e5d65..63c285042051 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -290,7 +290,7 @@ static void check_stack(unsigned long ip, unsigned long *stack)
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long stack;
@@ -318,7 +318,6 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static ssize_t
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 4b50fc0cb12c..d6bddb157ef2 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -609,7 +609,7 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
* signal that state. There are two user-visible tracing_map
* variables, 'hits' and 'drops', which are updated by this function.
* Every time an element is either successfully inserted or retrieved,
- * the 'hits' value is incrememented. Every time an element insertion
+ * the 'hits' value is incremented. Every time an element insertion
* fails, the 'drops' value is incremented.
*
* This is a lock-free tracing map insertion function implementing a
@@ -642,9 +642,9 @@ struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
* tracing_map_elt. This is a lock-free lookup; see
* tracing_map_insert() for details on tracing_map and how it works.
* Every time an element is retrieved, the 'hits' value is
- * incrememented. There is one user-visible tracing_map variable,
+ * incremented. There is one user-visible tracing_map variable,
* 'hits', which is updated by this function. Every time an element
- * is successfully retrieved, the 'hits' value is incrememented. The
+ * is successfully retrieved, the 'hits' value is incremented. The
* 'drops' value is never updated by this function.
*
* Return: the tracing_map_elt pointer val associated with the key.
diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h
index a6de61fc22de..2c765ee2a4d4 100644
--- a/kernel/trace/tracing_map.h
+++ b/kernel/trace/tracing_map.h
@@ -50,7 +50,7 @@ typedef int (*tracing_map_cmp_fn_t) (void *val_a, void *val_b);
* an instance of tracing_map_elt, where 'elt' in the latter part of
* that variable name is short for 'element'. The purpose of a
* tracing_map_elt is to hold values specific to the particular
- * 32-bit hashed key it's assocated with. Things such as the unique
+ * 32-bit hashed key it's associated with. Things such as the unique
* set of aggregated sums associated with the 32-bit hashed key, along
* with a copy of the full key associated with the entry, and which
* was used to produce the 32-bit hashed key.
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 26efd22f0633..7261fa0f5e3c 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -50,7 +50,7 @@ static bool ok_to_free_tracepoints;
*/
struct tp_probes {
struct rcu_head rcu;
- struct tracepoint_func probes[0];
+ struct tracepoint_func probes[];
};
static inline void *allocate_probes(int count)
@@ -594,7 +594,7 @@ int syscall_regfunc(void)
if (!sys_tracepoint_refcount) {
read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
- set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+ set_task_syscall_work(t, SYSCALL_TRACEPOINT);
}
read_unlock(&tasklist_lock);
}
@@ -611,7 +611,7 @@ void syscall_unregfunc(void)
if (!sys_tracepoint_refcount) {
read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
- clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+ clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
}
read_unlock(&tasklist_lock);
}
diff --git a/kernel/user.c b/kernel/user.c
index b1635d94a1f2..a2478cddf536 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -55,7 +55,7 @@ struct user_namespace init_user_ns = {
},
},
},
- .count = ATOMIC_INIT(3),
+ .ns.count = REFCOUNT_INIT(3),
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
.ns.inum = PROC_USER_INIT_INO,
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index e703d5d9cbe8..af612945a4d0 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -111,7 +111,7 @@ int create_user_ns(struct cred *new)
goto fail_free;
ns->ns.ops = &userns_operations;
- atomic_set(&ns->count, 1);
+ refcount_set(&ns->ns.count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
ns->level = parent_ns->level + 1;
@@ -197,7 +197,7 @@ static void free_user_ns(struct work_struct *work)
kmem_cache_free(user_ns_cachep, ns);
dec_user_namespaces(ucounts);
ns = parent;
- } while (atomic_dec_and_test(&parent->count));
+ } while (refcount_dec_and_test(&parent->ns.count));
}
void __put_user_ns(struct user_namespace *ns)
diff --git a/kernel/utsname.c b/kernel/utsname.c
index e488d0e2ab45..b1ac3ca870f2 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -33,7 +33,7 @@ static struct uts_namespace *create_uts_ns(void)
uts_ns = kmem_cache_alloc(uts_ns_cache, GFP_KERNEL);
if (uts_ns)
- kref_init(&uts_ns->kref);
+ refcount_set(&uts_ns->ns.count, 1);
return uts_ns;
}
@@ -103,11 +103,8 @@ struct uts_namespace *copy_utsname(unsigned long flags,
return new_ns;
}
-void free_uts_ns(struct kref *kref)
+void free_uts_ns(struct uts_namespace *ns)
{
- struct uts_namespace *ns;
-
- ns = container_of(kref, struct uts_namespace, kref);
dec_uts_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
ns_free_inum(&ns->ns);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5abb5b22ad13..71109065bd8e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -44,8 +44,6 @@ int __read_mostly soft_watchdog_user_enabled = 1;
int __read_mostly watchdog_thresh = 10;
static int __read_mostly nmi_watchdog_available;
-static struct cpumask watchdog_allowed_mask __read_mostly;
-
struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -162,6 +160,8 @@ static void lockup_detector_update_enable(void)
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
#endif
+static struct cpumask watchdog_allowed_mask __read_mostly;
+
/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 437935e7a199..894bb885b40b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1327,6 +1327,9 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
{
struct worker_pool *pool = pwq->pool;
+ /* record the work call stack in order to print it in KASAN reports */
+ kasan_record_aux_stack(work);
+
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head);
@@ -1846,18 +1849,17 @@ static void worker_attach_to_pool(struct worker *worker,
mutex_lock(&wq_pool_attach_mutex);
/*
- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
- * online CPUs. It'll be re-applied when any of the CPUs come up.
- */
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
- /*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
* definition for details.
*/
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
+ else
+ kthread_set_per_cpu(worker->task, pool->cpu);
+
+ if (worker->rescue_wq)
+ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
@@ -1880,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)
mutex_lock(&wq_pool_attach_mutex);
+ kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;
@@ -3728,17 +3731,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
* is updated and visible.
*/
if (!freezable || !workqueue_freezing) {
+ bool kick = false;
+
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->delayed_works) &&
- pwq->nr_active < pwq->max_active)
+ pwq->nr_active < pwq->max_active) {
pwq_activate_first_delayed(pwq);
+ kick = true;
+ }
/*
* Need to kick a worker after thawed or an unbound wq's
- * max_active is bumped. It's a slow path. Do it always.
+ * max_active is bumped. In realtime scenarios, always kicking a
+ * worker will cause interference on the isolated cpu cores, so
+ * let's kick iff work items were activated.
*/
- wake_up_worker(pwq->pool);
+ if (kick)
+ wake_up_worker(pwq->pool);
} else {
pwq->max_active = 0;
}
@@ -4908,6 +4918,12 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED;
raw_spin_unlock_irq(&pool->lock);
+
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, -1);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ }
+
mutex_unlock(&wq_pool_attach_mutex);
/*
@@ -4958,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)
* of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail.
*/
- for_each_pool_worker(worker, pool)
+ for_each_pool_worker(worker, pool) {
+ kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
+ }
raw_spin_lock_irq(&pool->lock);