summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/futex.c10
-rw-r--r--kernel/hrtimer.c120
-rw-r--r--kernel/hw_breakpoint.c146
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/spurious.c2
-rw-r--r--kernel/itimer.c7
-rw-r--r--kernel/kgdb.c56
-rw-r--r--kernel/lockdep.c16
-rw-r--r--kernel/perf_event.c79
-rw-r--r--kernel/pm_qos_params.c20
-rw-r--r--kernel/posix-cpu-timers.c5
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/hibernate.c30
-rw-r--r--kernel/power/main.c1
-rw-r--r--kernel/power/process.c14
-rw-r--r--kernel/power/swap.c107
-rw-r--r--kernel/power/swsusp.c130
-rw-r--r--kernel/resource.c26
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/slow-work.c7
-rw-r--r--kernel/sys.c14
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/sysctl.c895
-rw-r--r--kernel/sysctl_binary.c1507
-rw-r--r--kernel/sysctl_check.c1376
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/clockevents.c13
-rw-r--r--kernel/time/clocksource.c105
-rw-r--r--kernel/time/tick-oneshot.c4
-rw-r--r--kernel/time/tick-sched.c141
-rw-r--r--kernel/time/timecompare.c6
-rw-r--r--kernel/time/timekeeping.c125
-rw-r--r--kernel/time/timer_list.c15
-rw-r--r--kernel/trace/trace.c57
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_functions_graph.c165
-rw-r--r--kernel/trace/trace_kprobe.c41
-rw-r--r--kernel/trace/trace_ksym.c5
-rw-r--r--kernel/trace/trace_output.c75
-rw-r--r--kernel/user-return-notifier.c44
-rw-r--r--kernel/utsname_sysctl.c31
-rw-r--r--kernel/workqueue.c131
46 files changed, 2854 insertions, 2704 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 982c50e2ce53..864ff75d65f2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -4,7 +4,7 @@
obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
- sysctl.o capability.o ptrace.o timer.o user.o \
+ sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
@@ -99,6 +99,7 @@ obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b21688640377..291ac586f37f 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -400,10 +400,9 @@ int disable_nonboot_cpus(void)
if (cpu == first_cpu)
continue;
error = _cpu_down(cpu, 1);
- if (!error) {
+ if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
- printk("CPU%d is down\n", cpu);
- } else {
+ else {
printk(KERN_ERR "Error taking CPU%d down: %d\n",
cpu, error);
break;
diff --git a/kernel/exit.c b/kernel/exit.c
index 80ae941cfd2e..6f50ef55a6f3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -971,7 +971,7 @@ NORET_TYPE void do_exit(long code)
exit_thread();
cgroup_exit(tsk, 1);
- if (group_dead && tsk->signal->leader)
+ if (group_dead)
disassociate_ctty(1);
module_put(task_thread_info(tsk)->exec_domain->module);
@@ -1009,7 +1009,7 @@ NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
- exit_io_context();
+ exit_io_context(tsk);
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3d6f121bbe8a..1415dc4598ae 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -64,6 +64,7 @@
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
+#include <linux/user-return-notifier.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -249,6 +250,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
goto out;
setup_thread_stack(tsk, orig);
+ clear_user_return_notifier(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
@@ -1315,7 +1317,8 @@ bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
- put_io_context(p->io_context);
+ if (p->io_context)
+ exit_io_context(p);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
diff --git a/kernel/futex.c b/kernel/futex.c
index fb65e822fc41..d73ef1f3e55d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -304,8 +304,14 @@ void put_futex_key(int fshared, union futex_key *key)
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
- int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
- 1, 1, 0, NULL, NULL);
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, (unsigned long)uaddr,
+ 1, 1, 0, NULL, NULL);
+ up_read(&mm->mmap_sem);
+
return ret < 0 ? ret : 0;
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 3e1c36e7998f..d2f9239dc6ba 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -557,7 +557,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
static int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
- ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
int res;
@@ -582,7 +582,16 @@ static int hrtimer_reprogram(struct hrtimer *timer,
if (expires.tv64 < 0)
return -ETIME;
- if (expires.tv64 >= expires_next->tv64)
+ if (expires.tv64 >= cpu_base->expires_next.tv64)
+ return 0;
+
+ /*
+ * If a hang was detected in the last timer interrupt then we
+ * do not schedule a timer which is earlier than the expiry
+ * which we enforced in the hang detection. We want the system
+ * to make progress.
+ */
+ if (cpu_base->hang_detected)
return 0;
/*
@@ -590,7 +599,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
*/
res = tick_program_event(expires, 0);
if (!IS_ERR_VALUE(res))
- *expires_next = expires;
+ cpu_base->expires_next = expires;
return res;
}
@@ -747,17 +756,33 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
+static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
+#ifdef CONFIG_TIMER_STATS
if (timer->start_site)
return;
-
- timer->start_site = addr;
+ timer->start_site = __builtin_return_address(0);
memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
timer->start_pid = current->pid;
+#endif
}
+
+static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+ timer->start_site = NULL;
+#endif
+}
+
+static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+ if (likely(!timer_stats_active))
+ return;
+ timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+ timer->function, timer->start_comm, 0);
#endif
+}
/*
* Counterpart to lock_hrtimer_base above:
@@ -1217,29 +1242,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
#ifdef CONFIG_HIGH_RES_TIMERS
-static int force_clock_reprogram;
-
-/*
- * After 5 iteration's attempts, we consider that hrtimer_interrupt()
- * is hanging, which could happen with something that slows the interrupt
- * such as the tracing. Then we force the clock reprogramming for each future
- * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
- * threshold that we will overwrite.
- * The next tick event will be scheduled to 3 times we currently spend on
- * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
- * 1/4 of their time to process the hrtimer interrupts. This is enough to
- * let it running without serious starvation.
- */
-
-static inline void
-hrtimer_interrupt_hanging(struct clock_event_device *dev,
- ktime_t try_time)
-{
- force_clock_reprogram = 1;
- dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
- printk(KERN_WARNING "hrtimer: interrupt too slow, "
- "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
-}
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1248,21 +1250,15 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
- ktime_t expires_next, now;
- int nr_retries = 0;
- int i;
+ ktime_t expires_next, now, entry_time, delta;
+ int i, retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event.tv64 = KTIME_MAX;
- retry:
- /* 5 retries is enough to notice a hang */
- if (!(++nr_retries % 5))
- hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
-
- now = ktime_get();
-
+ entry_time = now = ktime_get();
+retry:
expires_next.tv64 = KTIME_MAX;
spin_lock(&cpu_base->lock);
@@ -1324,10 +1320,48 @@ void hrtimer_interrupt(struct clock_event_device *dev)
spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
- if (expires_next.tv64 != KTIME_MAX) {
- if (tick_program_event(expires_next, force_clock_reprogram))
- goto retry;
+ if (expires_next.tv64 == KTIME_MAX ||
+ !tick_program_event(expires_next, 0)) {
+ cpu_base->hang_detected = 0;
+ return;
}
+
+ /*
+ * The next timer was already expired due to:
+ * - tracing
+ * - long lasting callbacks
+ * - being scheduled away when running in a VM
+ *
+ * We need to prevent that we loop forever in the hrtimer
+ * interrupt routine. We give it 3 attempts to avoid
+ * overreacting on some spurious event.
+ */
+ now = ktime_get();
+ cpu_base->nr_retries++;
+ if (++retries < 3)
+ goto retry;
+ /*
+ * Give the system a chance to do something else than looping
+ * here. We stored the entry time, so we know exactly how long
+ * we spent here. We schedule the next event this amount of
+ * time away.
+ */
+ cpu_base->nr_hangs++;
+ cpu_base->hang_detected = 1;
+ delta = ktime_sub(now, entry_time);
+ if (delta.tv64 > cpu_base->max_hang_time.tv64)
+ cpu_base->max_hang_time = delta;
+ /*
+ * Limit it to a sensible value as we enforce a longer
+ * delay. Give the CPU at least 100ms to catch up.
+ */
+ if (delta.tv64 > 100 * NSEC_PER_MSEC)
+ expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
+ else
+ expires_next = ktime_add(now, delta);
+ tick_program_event(expires_next, 1);
+ printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
+ ktime_to_ns(delta));
}
/*
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index cf5ee1628411..366eedf949c0 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -52,7 +52,7 @@
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
/* Number of pinned task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
+static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
/* Number of non-pinned cpu/task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
@@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex);
static unsigned int max_task_bp_pinned(int cpu)
{
int i;
- unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
+ unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
for (i = HBP_NUM -1; i >= 0; i--) {
if (tsk_pinned[i] > 0)
@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu)
return 0;
}
+static int task_bp_pinned(struct task_struct *tsk)
+{
+ struct perf_event_context *ctx = tsk->perf_event_ctxp;
+ struct list_head *list;
+ struct perf_event *bp;
+ unsigned long flags;
+ int count = 0;
+
+ if (WARN_ONCE(!ctx, "No perf context for this task"))
+ return 0;
+
+ list = &ctx->event_list;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ /*
+ * The current breakpoint counter is not included in the list
+ * at the open() callback time
+ */
+ list_for_each_entry(bp, list, event_entry) {
+ if (bp->attr.type == PERF_TYPE_BREAKPOINT)
+ count++;
+ }
+
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return count;
+}
+
/*
* Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1).
*/
-static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
+static void
+fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
{
+ int cpu = bp->cpu;
+ struct task_struct *tsk = bp->ctx->task;
+
if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
- slots->pinned += max_task_bp_pinned(cpu);
+ if (!tsk)
+ slots->pinned += max_task_bp_pinned(cpu);
+ else
+ slots->pinned += task_bp_pinned(tsk);
slots->flexible = per_cpu(nr_bp_flexible, cpu);
return;
@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned, cpu);
- nr += max_task_bp_pinned(cpu);
+ if (!tsk)
+ nr += max_task_bp_pinned(cpu);
+ else
+ nr += task_bp_pinned(tsk);
if (nr > slots->pinned)
slots->pinned = nr;
@@ -118,35 +157,12 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
*/
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
- int count = 0;
- struct perf_event *bp;
- struct perf_event_context *ctx = tsk->perf_event_ctxp;
unsigned int *tsk_pinned;
- struct list_head *list;
- unsigned long flags;
-
- if (WARN_ONCE(!ctx, "No perf context for this task"))
- return;
-
- list = &ctx->event_list;
-
- spin_lock_irqsave(&ctx->lock, flags);
-
- /*
- * The current breakpoint counter is not included in the list
- * at the open() callback time
- */
- list_for_each_entry(bp, list, event_entry) {
- if (bp->attr.type == PERF_TYPE_BREAKPOINT)
- count++;
- }
+ int count = 0;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ count = task_bp_pinned(tsk);
- if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
- return;
-
- tsk_pinned = per_cpu(task_bp_pinned, cpu);
+ tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
if (enable) {
tsk_pinned[count]++;
if (count > 0)
@@ -193,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to a single cpu, check:
*
* (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
- * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
*
* -> If there are already non-pinned counters in this cpu, it means
* there is already a free slot for them.
@@ -204,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to every cpus, check:
*
* (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
- * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
*
* -> This is roughly the same, except we check the number of per cpu
* bp for every cpu and we keep the max one. Same for the per tasks
@@ -216,7 +232,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to a single cpu, check:
*
* ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
- * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
*
* -> Same checks as before. But now the nr_bp_flexible, if any, must keep
* one register at least (or they will never be fed).
@@ -224,7 +240,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to every cpus, check:
*
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
- * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
*/
int reserve_bp_slot(struct perf_event *bp)
{
@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp)
mutex_lock(&nr_bp_mutex);
- fetch_bp_busy_slots(&slots, bp->cpu);
+ fetch_bp_busy_slots(&slots, bp);
/* Flexible counters need to keep at least one slot */
if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
@@ -259,7 +275,7 @@ void release_bp_slot(struct perf_event *bp)
}
-int __register_perf_hw_breakpoint(struct perf_event *bp)
+int register_perf_hw_breakpoint(struct perf_event *bp)
{
int ret;
@@ -276,19 +292,12 @@ int __register_perf_hw_breakpoint(struct perf_event *bp)
* This is a quick hack that will be removed soon, once we remove
* the tmp breakpoints from ptrace
*/
- if (!bp->attr.disabled || bp->callback == perf_bp_event)
+ if (!bp->attr.disabled || !bp->overflow_handler)
ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
return ret;
}
-int register_perf_hw_breakpoint(struct perf_event *bp)
-{
- bp->callback = perf_bp_event;
-
- return __register_perf_hw_breakpoint(bp);
-}
-
/**
* register_user_hw_breakpoint - register a hardware breakpoint for user space
* @attr: breakpoint attributes
@@ -297,7 +306,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
*/
struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk)
{
return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
@@ -311,19 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
* @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
*/
-struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk)
+int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
- /*
- * FIXME: do it without unregistering
- * - We don't want to lose our slot
- * - If the new bp is incorrect, don't lose the older one
- */
- unregister_hw_breakpoint(bp);
+ u64 old_addr = bp->attr.bp_addr;
+ int old_type = bp->attr.bp_type;
+ int old_len = bp->attr.bp_len;
+ int err = 0;
- return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
+ perf_event_disable(bp);
+
+ bp->attr.bp_addr = attr->bp_addr;
+ bp->attr.bp_type = attr->bp_type;
+ bp->attr.bp_len = attr->bp_len;
+
+ if (attr->disabled)
+ goto end;
+
+ err = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
+ if (!err)
+ perf_event_enable(bp);
+
+ if (err) {
+ bp->attr.bp_addr = old_addr;
+ bp->attr.bp_type = old_type;
+ bp->attr.bp_len = old_len;
+ if (!bp->attr.disabled)
+ perf_event_enable(bp);
+
+ return err;
+ }
+
+end:
+ bp->attr.disabled = attr->disabled;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
@@ -348,7 +378,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
*/
struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered)
+ perf_overflow_handler_t triggered)
{
struct perf_event **cpu_events, **pevent, *bp;
long err;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bde4c667d24d..7305b297d1eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1067,7 +1067,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
kfree(action);
#ifdef CONFIG_DEBUG_SHIRQ
- if (irqflags & IRQF_SHARED) {
+ if (!retval && (irqflags & IRQF_SHARED)) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
* to happen immediately, so let's make sure....
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 22b0a6eedf24..e49ea1c5232d 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -220,7 +220,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
/*
* If we are seeing only the odd spurious IRQ caused by
* bus asynchronicity then don't eventually trigger an error,
- * otherwise the couter becomes a doomsday timer for otherwise
+ * otherwise the counter becomes a doomsday timer for otherwise
* working systems
*/
if (time_after(jiffies, desc->last_unhandled + HZ/10))
diff --git a/kernel/itimer.c b/kernel/itimer.c
index b03451ede528..d802883153da 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -146,6 +146,7 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
{
cputime_t cval, nval, cinterval, ninterval;
s64 ns_ninterval, ns_nval;
+ u32 error, incr_error;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
nval = timeval_to_cputime(&value->it_value);
@@ -153,8 +154,8 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
ninterval = timeval_to_cputime(&value->it_interval);
ns_ninterval = timeval_to_ns(&value->it_interval);
- it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
- it->error = cputime_sub_ns(nval, ns_nval);
+ error = cputime_sub_ns(nval, ns_nval);
+ incr_error = cputime_sub_ns(ninterval, ns_ninterval);
spin_lock_irq(&tsk->sighand->siglock);
@@ -168,6 +169,8 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
}
it->expires = nval;
it->incr = ninterval;
+ it->error = error;
+ it->incr_error = incr_error;
trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 7d7014634022..2eb517e23514 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -129,6 +129,7 @@ struct task_struct *kgdb_usethread;
struct task_struct *kgdb_contthread;
int kgdb_single_step;
+pid_t kgdb_sstep_pid;
/* Our I/O buffers. */
static char remcom_in_buffer[BUFMAX];
@@ -541,12 +542,17 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
*/
if (tid == 0 || tid == -1)
tid = -atomic_read(&kgdb_active) - 2;
- if (tid < 0) {
+ if (tid < -1 && tid > -NR_CPUS - 2) {
if (kgdb_info[-tid - 2].task)
return kgdb_info[-tid - 2].task;
else
return idle_task(-tid - 2);
}
+ if (tid <= 0) {
+ printk(KERN_ERR "KGDB: Internal thread select error\n");
+ dump_stack();
+ return NULL;
+ }
/*
* find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -619,7 +625,8 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
static int kgdb_activate_sw_breakpoints(void)
{
unsigned long addr;
- int error = 0;
+ int error;
+ int ret = 0;
int i;
for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -629,13 +636,16 @@ static int kgdb_activate_sw_breakpoints(void)
addr = kgdb_break[i].bpt_addr;
error = kgdb_arch_set_breakpoint(addr,
kgdb_break[i].saved_instr);
- if (error)
- return error;
+ if (error) {
+ ret = error;
+ printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+ continue;
+ }
kgdb_flush_swbreak_addr(addr);
kgdb_break[i].state = BP_ACTIVE;
}
- return 0;
+ return ret;
}
static int kgdb_set_sw_break(unsigned long addr)
@@ -682,7 +692,8 @@ static int kgdb_set_sw_break(unsigned long addr)
static int kgdb_deactivate_sw_breakpoints(void)
{
unsigned long addr;
- int error = 0;
+ int error;
+ int ret = 0;
int i;
for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -691,13 +702,15 @@ static int kgdb_deactivate_sw_breakpoints(void)
addr = kgdb_break[i].bpt_addr;
error = kgdb_arch_remove_breakpoint(addr,
kgdb_break[i].saved_instr);
- if (error)
- return error;
+ if (error) {
+ printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+ ret = error;
+ }
kgdb_flush_swbreak_addr(addr);
kgdb_break[i].state = BP_SET;
}
- return 0;
+ return ret;
}
static int kgdb_remove_sw_break(unsigned long addr)
@@ -1204,8 +1217,10 @@ static int gdb_cmd_exception_pass(struct kgdb_state *ks)
return 1;
} else {
- error_packet(remcom_out_buffer, -EINVAL);
- return 0;
+ kgdb_msg_write("KGDB only knows signal 9 (pass)"
+ " and 15 (pass and disconnect)\n"
+ "Executing a continue without signal passing\n", 0);
+ remcom_in_buffer[0] = 'c';
}
/* Indicate fall through */
@@ -1395,6 +1410,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
struct kgdb_state kgdb_var;
struct kgdb_state *ks = &kgdb_var;
unsigned long flags;
+ int sstep_tries = 100;
int error = 0;
int i, cpu;
@@ -1425,13 +1441,14 @@ acquirelock:
cpu_relax();
/*
- * Do not start the debugger connection on this CPU if the last
- * instance of the exception handler wanted to come into the
- * debugger on a different CPU via a single step
+ * For single stepping, try to only enter on the processor
+ * that was single stepping. To gaurd against a deadlock, the
+ * kernel will only try for the value of sstep_tries before
+ * giving up and continuing on.
*/
if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
-
+ (kgdb_info[cpu].task &&
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog();
clocksource_touch_watchdog();
@@ -1524,6 +1541,13 @@ acquirelock:
}
kgdb_restore:
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+ int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+ if (kgdb_info[sstep_cpu].task)
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+ else
+ kgdb_sstep_pid = 0;
+ }
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog();
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f5dcd36d3151..4f8df01dbe51 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -168,7 +168,7 @@ static void lock_time_inc(struct lock_time *lt, u64 time)
if (time > lt->max)
lt->max = time;
- if (time < lt->min || !lt->min)
+ if (time < lt->min || !lt->nr)
lt->min = time;
lt->total += time;
@@ -177,8 +177,15 @@ static void lock_time_inc(struct lock_time *lt, u64 time)
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
- dst->min += src->min;
- dst->max += src->max;
+ if (!src->nr)
+ return;
+
+ if (src->max > dst->max)
+ dst->max = src->max;
+
+ if (src->min < dst->min || !dst->nr)
+ dst->min = src->min;
+
dst->total += src->total;
dst->nr += src->nr;
}
@@ -379,7 +386,8 @@ static int save_trace(struct stack_trace *trace)
* complete trace that maxes out the entries provided will be reported
* as incomplete, friggin useless </rant>
*/
- if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
+ if (trace->nr_entries != 0 &&
+ trace->entries[trace->nr_entries-1] == ULONG_MAX)
trace->nr_entries--;
trace->max_entries = trace->nr_entries;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 6b7ddba1dd64..e73e53c7582f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -36,7 +36,7 @@
/*
* Each CPU has a list of per CPU events:
*/
-DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
+static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
int perf_max_events __read_mostly = 1;
static int perf_reserved_percpu __read_mostly;
@@ -476,7 +476,7 @@ static void perf_event_remove_from_context(struct perf_event *event)
if (!task) {
/*
* Per cpu events are removed via an smp call and
- * the removal is always sucessful.
+ * the removal is always successful.
*/
smp_call_function_single(event->cpu,
__perf_event_remove_from_context,
@@ -567,7 +567,7 @@ static void __perf_event_disable(void *info)
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-static void perf_event_disable(struct perf_event *event)
+void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -845,7 +845,7 @@ perf_install_in_context(struct perf_event_context *ctx,
if (!task) {
/*
* Per cpu events are installed via an smp call and
- * the install is always sucessful.
+ * the install is always successful.
*/
smp_call_function_single(cpu, __perf_install_in_context,
event, 1);
@@ -971,7 +971,7 @@ static void __perf_event_enable(void *info)
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
-static void perf_event_enable(struct perf_event *event)
+void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1579,7 +1579,6 @@ static void
__perf_event_init_context(struct perf_event_context *ctx,
struct task_struct *task)
{
- memset(ctx, 0, sizeof(*ctx));
spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->group_list);
@@ -1654,7 +1653,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
}
if (!ctx) {
- ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+ ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
err = -ENOMEM;
if (!ctx)
goto errout;
@@ -4011,6 +4010,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
event->pmu->read(event);
data.addr = 0;
+ data.raw = NULL;
data.period = event->hw.last_period;
regs = get_irq_regs();
/*
@@ -4080,8 +4080,7 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
u64 now;
now = cpu_clock(cpu);
- prev = atomic64_read(&event->hw.prev_count);
- atomic64_set(&event->hw.prev_count, now);
+ prev = atomic64_xchg(&event->hw.prev_count, now);
atomic64_add(now - prev, &event->count);
}
@@ -4286,15 +4285,8 @@ static void bp_perf_event_destroy(struct perf_event *event)
static const struct pmu *bp_perf_event_init(struct perf_event *bp)
{
int err;
- /*
- * The breakpoint is already filled if we haven't created the counter
- * through perf syscall
- * FIXME: manage to get trigerred to NULL if it comes from syscalls
- */
- if (!bp->callback)
- err = register_perf_hw_breakpoint(bp);
- else
- err = __register_perf_hw_breakpoint(bp);
+
+ err = register_perf_hw_breakpoint(bp);
if (err)
return ERR_PTR(err);
@@ -4308,6 +4300,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
struct perf_sample_data sample;
struct pt_regs *regs = data;
+ sample.raw = NULL;
sample.addr = bp->attr.bp_addr;
if (!perf_exclude_event(bp, regs))
@@ -4390,7 +4383,7 @@ perf_event_alloc(struct perf_event_attr *attr,
struct perf_event_context *ctx,
struct perf_event *group_leader,
struct perf_event *parent_event,
- perf_callback_t callback,
+ perf_overflow_handler_t overflow_handler,
gfp_t gfpflags)
{
const struct pmu *pmu;
@@ -4433,10 +4426,10 @@ perf_event_alloc(struct perf_event_attr *attr,
event->state = PERF_EVENT_STATE_INACTIVE;
- if (!callback && parent_event)
- callback = parent_event->callback;
+ if (!overflow_handler && parent_event)
+ overflow_handler = parent_event->overflow_handler;
- event->callback = callback;
+ event->overflow_handler = overflow_handler;
if (attr->disabled)
event->state = PERF_EVENT_STATE_OFF;
@@ -4776,7 +4769,8 @@ err_put_context:
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
- pid_t pid, perf_callback_t callback)
+ pid_t pid,
+ perf_overflow_handler_t overflow_handler)
{
struct perf_event *event;
struct perf_event_context *ctx;
@@ -4793,7 +4787,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
}
event = perf_event_alloc(attr, cpu, ctx, NULL,
- NULL, callback, GFP_KERNEL);
+ NULL, overflow_handler, GFP_KERNEL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_put_context;
@@ -5090,7 +5084,7 @@ again:
*/
int perf_event_init_task(struct task_struct *child)
{
- struct perf_event_context *child_ctx, *parent_ctx;
+ struct perf_event_context *child_ctx = NULL, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
@@ -5106,20 +5100,6 @@ int perf_event_init_task(struct task_struct *child)
return 0;
/*
- * This is executed from the parent task context, so inherit
- * events that have been marked for cloning.
- * First allocate and initialize a context for the child.
- */
-
- child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
- if (!child_ctx)
- return -ENOMEM;
-
- __perf_event_init_context(child_ctx, child);
- child->perf_event_ctxp = child_ctx;
- get_task_struct(child);
-
- /*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
@@ -5149,6 +5129,26 @@ int perf_event_init_task(struct task_struct *child)
continue;
}
+ if (!child->perf_event_ctxp) {
+ /*
+ * This is executed from the parent task context, so
+ * inherit events that have been marked for cloning.
+ * First allocate and initialize a context for the
+ * child.
+ */
+
+ child_ctx = kzalloc(sizeof(struct perf_event_context),
+ GFP_KERNEL);
+ if (!child_ctx) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ __perf_event_init_context(child_ctx, child);
+ child->perf_event_ctxp = child_ctx;
+ get_task_struct(child);
+ }
+
ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret) {
@@ -5177,6 +5177,7 @@ int perf_event_init_task(struct task_struct *child)
get_ctx(child_ctx->parent_ctx);
}
+exit:
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index dfdec524d1b7..3db49b9ca374 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -29,7 +29,6 @@
#include <linux/pm_qos_params.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/time.h>
@@ -344,37 +343,33 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
}
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
-#define PID_NAME_LEN sizeof("process_1234567890")
-static char name[PID_NAME_LEN];
+#define PID_NAME_LEN 32
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
int ret;
long pm_qos_class;
+ char name[PID_NAME_LEN];
- lock_kernel();
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= 0) {
filp->private_data = (void *)pm_qos_class;
- sprintf(name, "process_%d", current->pid);
+ snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
ret = pm_qos_add_requirement(pm_qos_class, name,
PM_QOS_DEFAULT_VALUE);
- if (ret >= 0) {
- unlock_kernel();
+ if (ret >= 0)
return 0;
- }
}
- unlock_kernel();
-
return -EPERM;
}
static int pm_qos_power_release(struct inode *inode, struct file *filp)
{
int pm_qos_class;
+ char name[PID_NAME_LEN];
pm_qos_class = (long)filp->private_data;
- sprintf(name, "process_%d", current->pid);
+ snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
pm_qos_remove_requirement(pm_qos_class, name);
return 0;
@@ -385,13 +380,14 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
{
s32 value;
int pm_qos_class;
+ char name[PID_NAME_LEN];
pm_qos_class = (long)filp->private_data;
if (count != sizeof(s32))
return -EINVAL;
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
- sprintf(name, "process_%d", current->pid);
+ snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
pm_qos_update_requirement(pm_qos_class, name, value);
return sizeof(s32);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 5c9dc228747b..438ff4523513 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -384,7 +384,8 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
/*
* Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
- * This is called from sys_timer_create with the new timer already locked.
+ * This is called from sys_timer_create() and do_cpu_nanosleep() with the
+ * new timer already all-zeros initialized.
*/
int posix_cpu_timer_create(struct k_itimer *new_timer)
{
@@ -396,8 +397,6 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
return -EINVAL;
INIT_LIST_HEAD(&new_timer->it.cpu.entry);
- new_timer->it.cpu.incr.sched = 0;
- new_timer->it.cpu.expires.sched = 0;
read_lock(&tasklist_lock);
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c3b81c30e5d5..43191815f874 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_PM_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
-obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o
obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 04a9e90d248f..bbfe472d7524 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -32,6 +32,7 @@ static int noresume = 0;
static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
+int in_suspend __nosavedata = 0;
enum {
HIBERNATION_INVALID,
@@ -202,6 +203,35 @@ static void platform_recover(int platform_mode)
}
/**
+ * swsusp_show_speed - print the time elapsed between two events.
+ * @start: Starting event.
+ * @stop: Final event.
+ * @nr_pages - number of pages processed between @start and @stop
+ * @msg - introductory message to print
+ */
+
+void swsusp_show_speed(struct timeval *start, struct timeval *stop,
+ unsigned nr_pages, char *msg)
+{
+ s64 elapsed_centisecs64;
+ int centisecs;
+ int k;
+ int kps;
+
+ elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
+ do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
+ centisecs = elapsed_centisecs64;
+ if (centisecs == 0)
+ centisecs = 1; /* avoid div-by-zero */
+ k = nr_pages * (PAGE_SIZE / 1024);
+ kps = (k * 100) / centisecs;
+ printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
+ msg, k,
+ centisecs / 100, centisecs % 100,
+ kps / 1000, (kps % 1000) / 10);
+}
+
+/**
* create_image - freeze devices that need to be frozen with interrupts
* off, create the hibernation image and thaw those devices. Control
* reappears in this routine after a restore.
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 347d2cc88cd0..0998c7139053 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -220,6 +220,7 @@ static struct attribute_group attr_group = {
#ifdef CONFIG_PM_RUNTIME
struct workqueue_struct *pm_wq;
+EXPORT_SYMBOL_GPL(pm_wq);
static int __init pm_start_workqueue(void)
{
diff --git a/kernel/power/process.c b/kernel/power/process.c
index cc2e55373b68..5ade1bdcf366 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
+#include <linux/delay.h>
/*
* Timeout for stopping processes
@@ -41,7 +42,7 @@ static int try_to_freeze_tasks(bool sig_only)
do_gettimeofday(&start);
end_time = jiffies + TIMEOUT;
- do {
+ while (true) {
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
@@ -62,10 +63,15 @@ static int try_to_freeze_tasks(bool sig_only)
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
- yield(); /* Yield is okay here */
- if (time_after(jiffies, end_time))
+ if (!todo || time_after(jiffies, end_time))
break;
- } while (todo);
+
+ /*
+ * We need to retry, but first give the freezing tasks some
+ * time to enter the regrigerator.
+ */
+ msleep(10);
+ }
do_gettimeofday(&end);
elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 890f6b11b1d3..09b2b0ae9e9d 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -38,6 +38,107 @@ struct swsusp_header {
static struct swsusp_header *swsusp_header;
+/**
+ * The following functions are used for tracing the allocated
+ * swap pages, so that they can be freed in case of an error.
+ */
+
+struct swsusp_extent {
+ struct rb_node node;
+ unsigned long start;
+ unsigned long end;
+};
+
+static struct rb_root swsusp_extents = RB_ROOT;
+
+static int swsusp_extents_insert(unsigned long swap_offset)
+{
+ struct rb_node **new = &(swsusp_extents.rb_node);
+ struct rb_node *parent = NULL;
+ struct swsusp_extent *ext;
+
+ /* Figure out where to put the new node */
+ while (*new) {
+ ext = container_of(*new, struct swsusp_extent, node);
+ parent = *new;
+ if (swap_offset < ext->start) {
+ /* Try to merge */
+ if (swap_offset == ext->start - 1) {
+ ext->start--;
+ return 0;
+ }
+ new = &((*new)->rb_left);
+ } else if (swap_offset > ext->end) {
+ /* Try to merge */
+ if (swap_offset == ext->end + 1) {
+ ext->end++;
+ return 0;
+ }
+ new = &((*new)->rb_right);
+ } else {
+ /* It already is in the tree */
+ return -EINVAL;
+ }
+ }
+ /* Add the new node and rebalance the tree. */
+ ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
+ if (!ext)
+ return -ENOMEM;
+
+ ext->start = swap_offset;
+ ext->end = swap_offset;
+ rb_link_node(&ext->node, parent, new);
+ rb_insert_color(&ext->node, &swsusp_extents);
+ return 0;
+}
+
+/**
+ * alloc_swapdev_block - allocate a swap page and register that it has
+ * been allocated, so that it can be freed in case of an error.
+ */
+
+sector_t alloc_swapdev_block(int swap)
+{
+ unsigned long offset;
+
+ offset = swp_offset(get_swap_page_of_type(swap));
+ if (offset) {
+ if (swsusp_extents_insert(offset))
+ swap_free(swp_entry(swap, offset));
+ else
+ return swapdev_block(swap, offset);
+ }
+ return 0;
+}
+
+/**
+ * free_all_swap_pages - free swap pages allocated for saving image data.
+ * It also frees the extents used to register which swap entres had been
+ * allocated.
+ */
+
+void free_all_swap_pages(int swap)
+{
+ struct rb_node *node;
+
+ while ((node = swsusp_extents.rb_node)) {
+ struct swsusp_extent *ext;
+ unsigned long offset;
+
+ ext = container_of(node, struct swsusp_extent, node);
+ rb_erase(node, &swsusp_extents);
+ for (offset = ext->start; offset <= ext->end; offset++)
+ swap_free(swp_entry(swap, offset));
+
+ kfree(ext);
+ }
+}
+
+int swsusp_swap_in_use(void)
+{
+ return (swsusp_extents.rb_node != NULL);
+}
+
/*
* General things
*/
@@ -336,7 +437,7 @@ static int save_image(struct swap_map_handle *handle,
if (ret)
break;
if (!(nr_pages % m))
- printk("\b\b\b\b%3d%%", nr_pages / m);
+ printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
nr_pages++;
}
err2 = wait_on_bio_chain(&bio);
@@ -344,9 +445,9 @@ static int save_image(struct swap_map_handle *handle,
if (!ret)
ret = err2;
if (!ret)
- printk("\b\b\b\bdone\n");
+ printk(KERN_CONT "\b\b\b\bdone\n");
else
- printk("\n");
+ printk(KERN_CONT "\n");
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
return ret;
}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 6a07f4dbf2f8..5b3601bd1893 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -56,133 +56,3 @@
#include "power.h"
int in_suspend __nosavedata = 0;
-
-/**
- * The following functions are used for tracing the allocated
- * swap pages, so that they can be freed in case of an error.
- */
-
-struct swsusp_extent {
- struct rb_node node;
- unsigned long start;
- unsigned long end;
-};
-
-static struct rb_root swsusp_extents = RB_ROOT;
-
-static int swsusp_extents_insert(unsigned long swap_offset)
-{
- struct rb_node **new = &(swsusp_extents.rb_node);
- struct rb_node *parent = NULL;
- struct swsusp_extent *ext;
-
- /* Figure out where to put the new node */
- while (*new) {
- ext = container_of(*new, struct swsusp_extent, node);
- parent = *new;
- if (swap_offset < ext->start) {
- /* Try to merge */
- if (swap_offset == ext->start - 1) {
- ext->start--;
- return 0;
- }
- new = &((*new)->rb_left);
- } else if (swap_offset > ext->end) {
- /* Try to merge */
- if (swap_offset == ext->end + 1) {
- ext->end++;
- return 0;
- }
- new = &((*new)->rb_right);
- } else {
- /* It already is in the tree */
- return -EINVAL;
- }
- }
- /* Add the new node and rebalance the tree. */
- ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
- if (!ext)
- return -ENOMEM;
-
- ext->start = swap_offset;
- ext->end = swap_offset;
- rb_link_node(&ext->node, parent, new);
- rb_insert_color(&ext->node, &swsusp_extents);
- return 0;
-}
-
-/**
- * alloc_swapdev_block - allocate a swap page and register that it has
- * been allocated, so that it can be freed in case of an error.
- */
-
-sector_t alloc_swapdev_block(int swap)
-{
- unsigned long offset;
-
- offset = swp_offset(get_swap_page_of_type(swap));
- if (offset) {
- if (swsusp_extents_insert(offset))
- swap_free(swp_entry(swap, offset));
- else
- return swapdev_block(swap, offset);
- }
- return 0;
-}
-
-/**
- * free_all_swap_pages - free swap pages allocated for saving image data.
- * It also frees the extents used to register which swap entres had been
- * allocated.
- */
-
-void free_all_swap_pages(int swap)
-{
- struct rb_node *node;
-
- while ((node = swsusp_extents.rb_node)) {
- struct swsusp_extent *ext;
- unsigned long offset;
-
- ext = container_of(node, struct swsusp_extent, node);
- rb_erase(node, &swsusp_extents);
- for (offset = ext->start; offset <= ext->end; offset++)
- swap_free(swp_entry(swap, offset));
-
- kfree(ext);
- }
-}
-
-int swsusp_swap_in_use(void)
-{
- return (swsusp_extents.rb_node != NULL);
-}
-
-/**
- * swsusp_show_speed - print the time elapsed between two events represented by
- * @start and @stop
- *
- * @nr_pages - number of pages processed between @start and @stop
- * @msg - introductory message to print
- */
-
-void swsusp_show_speed(struct timeval *start, struct timeval *stop,
- unsigned nr_pages, char *msg)
-{
- s64 elapsed_centisecs64;
- int centisecs;
- int k;
- int kps;
-
- elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
- do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
- centisecs = elapsed_centisecs64;
- if (centisecs == 0)
- centisecs = 1; /* avoid div-by-zero */
- k = nr_pages * (PAGE_SIZE / 1024);
- kps = (k * 100) / centisecs;
- printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
- msg, k,
- centisecs / 100, centisecs % 100,
- kps / 1000, (kps % 1000) / 10);
-}
diff --git a/kernel/resource.c b/kernel/resource.c
index fb11a58b9594..dc15686b7a77 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -308,35 +308,37 @@ static int find_resource(struct resource *root, struct resource *new,
void *alignf_data)
{
struct resource *this = root->child;
+ resource_size_t start, end;
- new->start = root->start;
+ start = root->start;
/*
* Skip past an allocated resource that starts at 0, since the assignment
* of this->start - 1 to new->end below would cause an underflow.
*/
if (this && this->start == 0) {
- new->start = this->end + 1;
+ start = this->end + 1;
this = this->sibling;
}
for(;;) {
if (this)
- new->end = this->start - 1;
+ end = this->start - 1;
else
- new->end = root->end;
- if (new->start < min)
- new->start = min;
- if (new->end > max)
- new->end = max;
- new->start = ALIGN(new->start, align);
+ end = root->end;
+ if (start < min)
+ start = min;
+ if (end > max)
+ end = max;
+ start = ALIGN(start, align);
if (alignf)
alignf(alignf_data, new, size, align);
- if (new->start < new->end && new->end - new->start >= size - 1) {
- new->end = new->start + size - 1;
+ if (start < end && end - start >= size - 1) {
+ new->start = start;
+ new->end = start + size - 1;
return 0;
}
if (!this)
break;
- new->start = this->end + 1;
+ start = this->end + 1;
this = this->sibling;
}
return -EBUSY;
diff --git a/kernel/sched.c b/kernel/sched.c
index bc68037f3199..ff39cadf621e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7449,17 +7449,16 @@ static struct ctl_table sd_ctl_dir[] = {
.procname = "sched_domain",
.mode = 0555,
},
- {0, },
+ {}
};
static struct ctl_table sd_ctl_root[] = {
{
- .ctl_name = CTL_KERN,
.procname = "kernel",
.mode = 0555,
.child = sd_ctl_dir,
},
- {0, },
+ {}
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 00889bd3c590..7494bbf5a270 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -49,7 +49,6 @@ static const int slow_work_max_vslow = 99;
ctl_table slow_work_sysctls[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "min-threads",
.data = &slow_work_min_threads,
.maxlen = sizeof(unsigned),
@@ -59,7 +58,6 @@ ctl_table slow_work_sysctls[] = {
.extra2 = &slow_work_max_threads,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "max-threads",
.data = &slow_work_max_threads,
.maxlen = sizeof(unsigned),
@@ -69,16 +67,15 @@ ctl_table slow_work_sysctls[] = {
.extra2 = (void *) &slow_work_max_max_threads,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "vslow-percentage",
.data = &vslow_work_proportion,
.maxlen = sizeof(unsigned),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = (void *) &slow_work_min_vslow,
.extra2 = (void *) &slow_work_max_vslow,
},
- { .ctl_name = 0 }
+ {}
};
#endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 9968c5fb55b9..585d6cd10040 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/utsname.h>
#include <linux/mman.h>
-#include <linux/smp_lock.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
@@ -349,6 +348,9 @@ void kernel_power_off(void)
machine_power_off();
}
EXPORT_SYMBOL_GPL(kernel_power_off);
+
+static DEFINE_MUTEX(reboot_mutex);
+
/*
* Reboot system call: for obvious reasons only root may call it,
* and even root needs to set up some magic numbers in the registers
@@ -381,7 +383,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
cmd = LINUX_REBOOT_CMD_HALT;
- lock_kernel();
+ mutex_lock(&reboot_mutex);
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
kernel_restart(NULL);
@@ -397,20 +399,18 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
case LINUX_REBOOT_CMD_HALT:
kernel_halt();
- unlock_kernel();
do_exit(0);
panic("cannot halt");
case LINUX_REBOOT_CMD_POWER_OFF:
kernel_power_off();
- unlock_kernel();
do_exit(0);
break;
case LINUX_REBOOT_CMD_RESTART2:
if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
- unlock_kernel();
- return -EFAULT;
+ ret = -EFAULT;
+ break;
}
buffer[sizeof(buffer) - 1] = '\0';
@@ -433,7 +433,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
ret = -EINVAL;
break;
}
- unlock_kernel();
+ mutex_unlock(&reboot_mutex);
return ret;
}
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index e06d0b8d1951..695384f12a7d 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -48,8 +48,10 @@ cond_syscall(sys_shutdown);
cond_syscall(sys_sendmsg);
cond_syscall(compat_sys_sendmsg);
cond_syscall(sys_recvmsg);
+cond_syscall(sys_recvmmsg);
cond_syscall(compat_sys_recvmsg);
cond_syscall(compat_sys_recvfrom);
+cond_syscall(compat_sys_recvmmsg);
cond_syscall(sys_socketcall);
cond_syscall(sys_futex);
cond_syscall(compat_sys_futex);
@@ -139,7 +141,6 @@ cond_syscall(sys_pciconfig_read);
cond_syscall(sys_pciconfig_write);
cond_syscall(sys_pciconfig_iobase);
cond_syscall(sys32_ipc);
-cond_syscall(sys32_sysctl);
cond_syscall(ppc_rtas);
cond_syscall(sys_spu_run);
cond_syscall(sys_spu_create);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b9e5a45f1e28..554ac4894f0f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -27,7 +27,6 @@
#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/kmemcheck.h>
-#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -61,7 +60,6 @@
#include <asm/io.h>
#endif
-static int deprecated_sysctl_warning(struct __sysctl_args *args);
#if defined(CONFIG_SYSCTL)
@@ -210,31 +208,26 @@ extern int lock_stat;
static struct ctl_table root_table[] = {
{
- .ctl_name = CTL_KERN,
.procname = "kernel",
.mode = 0555,
.child = kern_table,
},
{
- .ctl_name = CTL_VM,
.procname = "vm",
.mode = 0555,
.child = vm_table,
},
{
- .ctl_name = CTL_FS,
.procname = "fs",
.mode = 0555,
.child = fs_table,
},
{
- .ctl_name = CTL_DEBUG,
.procname = "debug",
.mode = 0555,
.child = debug_table,
},
{
- .ctl_name = CTL_DEV,
.procname = "dev",
.mode = 0555,
.child = dev_table,
@@ -243,7 +236,7 @@ static struct ctl_table root_table[] = {
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
*/
- { .ctl_name = 0 }
+ { }
};
#ifdef CONFIG_SCHED_DEBUG
@@ -259,198 +252,170 @@ static int max_sched_shares_ratelimit = NSEC_PER_SEC; /* 1 second */
static struct ctl_table kern_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_DEBUG
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_min_granularity_ns",
.data = &sysctl_sched_min_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &sched_proc_update_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_latency_ns",
.data = &sysctl_sched_latency,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &sched_proc_update_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_wakeup_granularity_ns",
.data = &sysctl_sched_wakeup_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &sched_proc_update_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_shares_ratelimit",
.data = &sysctl_sched_shares_ratelimit,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &sched_proc_update_handler,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_shares_ratelimit,
.extra2 = &max_sched_shares_ratelimit,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_tunable_scaling",
.data = &sysctl_sched_tunable_scaling,
.maxlen = sizeof(enum sched_tunable_scaling),
.mode = 0644,
- .proc_handler = &sched_proc_update_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_tunable_scaling,
.extra2 = &max_sched_tunable_scaling,
},
-
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_shares_thresh",
.data = &sysctl_sched_shares_thresh,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_migration_cost",
.data = &sysctl_sched_migration_cost,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_nr_migrate",
.data = &sysctl_sched_nr_migrate,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_time_avg",
.data = &sysctl_sched_time_avg,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "timer_migration",
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
#endif
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &sched_rt_handler,
+ .proc_handler = sched_rt_handler,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_rt_runtime_us",
.data = &sysctl_sched_rt_runtime,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &sched_rt_handler,
+ .proc_handler = sched_rt_handler,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_compat_yield",
.data = &sysctl_sched_compat_yield,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_PROVE_LOCKING
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "prove_locking",
.data = &prove_locking,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_LOCK_STAT
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "lock_stat",
.data = &lock_stat,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .ctl_name = KERN_PANIC,
.procname = "panic",
.data = &panic_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = KERN_CORE_USES_PID,
.procname = "core_uses_pid",
.data = &core_uses_pid,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = KERN_CORE_PATTERN,
.procname = "core_pattern",
.data = core_pattern,
.maxlen = CORENAME_MAX_SIZE,
.mode = 0644,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string,
+ .proc_handler = proc_dostring,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "core_pipe_limit",
.data = &core_pipe_limit,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
.maxlen = sizeof(long),
.mode = 0644,
- .proc_handler = &proc_taint,
+ .proc_handler = proc_taint,
},
#endif
#ifdef CONFIG_LATENCYTOP
@@ -459,181 +424,160 @@ static struct ctl_table kern_table[] = {
.data = &latencytop_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_BLK_DEV_INITRD
{
- .ctl_name = KERN_REALROOTDEV,
.procname = "real-root-dev",
.data = &real_root_dev,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "print-fatal-signals",
.data = &print_fatal_signals,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_SPARC
{
- .ctl_name = KERN_SPARC_REBOOT,
.procname = "reboot-cmd",
.data = reboot_command,
.maxlen = 256,
.mode = 0644,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string,
+ .proc_handler = proc_dostring,
},
{
- .ctl_name = KERN_SPARC_STOP_A,
.procname = "stop-a",
.data = &stop_a_enabled,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = KERN_SPARC_SCONS_PWROFF,
.procname = "scons-poweroff",
.data = &scons_pwroff,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_SPARC64
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "tsb-ratio",
.data = &sysctl_tsb_ratio,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef __hppa__
{
- .ctl_name = KERN_HPPA_PWRSW,
.procname = "soft-power",
.data = &pwrsw_enabled,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = KERN_HPPA_UNALIGNED,
.procname = "unaligned-trap",
.data = &unaligned_enabled,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .ctl_name = KERN_CTLALTDEL,
.procname = "ctrl-alt-del",
.data = &C_A_D,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_FUNCTION_TRACER
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "ftrace_enabled",
.data = &ftrace_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &ftrace_enable_sysctl,
+ .proc_handler = ftrace_enable_sysctl,
},
#endif
#ifdef CONFIG_STACK_TRACER
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "stack_tracer_enabled",
.data = &stack_tracer_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &stack_trace_sysctl,
+ .proc_handler = stack_trace_sysctl,
},
#endif
#ifdef CONFIG_TRACING
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "ftrace_dump_on_oops",
.data = &ftrace_dump_on_oops,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_MODULES
{
- .ctl_name = KERN_MODPROBE,
.procname = "modprobe",
.data = &modprobe_path,
.maxlen = KMOD_PATH_LEN,
.mode = 0644,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string,
+ .proc_handler = proc_dostring,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "modules_disabled",
.data = &modules_disabled,
.maxlen = sizeof(int),
.mode = 0644,
/* only handle a transition from default "0" to "1" */
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &one,
.extra2 = &one,
},
#endif
#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
{
- .ctl_name = KERN_HOTPLUG,
.procname = "hotplug",
.data = &uevent_helper,
.maxlen = UEVENT_HELPER_PATH_LEN,
.mode = 0644,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string,
+ .proc_handler = proc_dostring,
},
#endif
#ifdef CONFIG_CHR_DEV_SG
{
- .ctl_name = KERN_SG_BIG_BUFF,
.procname = "sg-big-buff",
.data = &sg_big_buff,
.maxlen = sizeof (int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
{
- .ctl_name = KERN_ACCT,
.procname = "acct",
.data = &acct_parm,
.maxlen = 3*sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_MAGIC_SYSRQ
{
- .ctl_name = KERN_SYSRQ,
.procname = "sysrq",
.data = &__sysrq_enabled,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_PROC_SYSCTL
@@ -642,215 +586,188 @@ static struct ctl_table kern_table[] = {
.data = NULL,
.maxlen = sizeof (int),
.mode = 0600,
- .proc_handler = &proc_do_cad_pid,
+ .proc_handler = proc_do_cad_pid,
},
#endif
{
- .ctl_name = KERN_MAX_THREADS,
.procname = "threads-max",
.data = &max_threads,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = KERN_RANDOM,
.procname = "random",
.mode = 0555,
.child = random_table,
},
{
- .ctl_name = KERN_OVERFLOWUID,
.procname = "overflowuid",
.data = &overflowuid,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
{
- .ctl_name = KERN_OVERFLOWGID,
.procname = "overflowgid",
.data = &overflowgid,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
#ifdef CONFIG_S390
#ifdef CONFIG_MATHEMU
{
- .ctl_name = KERN_IEEE_EMULATION_WARNINGS,
.procname = "ieee_emulation_warnings",
.data = &sysctl_ieee_emulation_warnings,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .ctl_name = KERN_S390_USER_DEBUG_LOGGING,
.procname = "userprocess_debug",
.data = &sysctl_userprocess_debug,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .ctl_name = KERN_PIDMAX,
.procname = "pid_max",
.data = &pid_max,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &pid_max_min,
.extra2 = &pid_max_max,
},
{
- .ctl_name = KERN_PANIC_ON_OOPS,
.procname = "panic_on_oops",
.data = &panic_on_oops,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#if defined CONFIG_PRINTK
{
- .ctl_name = KERN_PRINTK,
.procname = "printk",
.data = &console_loglevel,
.maxlen = 4*sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = KERN_PRINTK_RATELIMIT,
.procname = "printk_ratelimit",
.data = &printk_ratelimit_state.interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
},
{
- .ctl_name = KERN_PRINTK_RATELIMIT_BURST,
.procname = "printk_ratelimit_burst",
.data = &printk_ratelimit_state.burst,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "printk_delay",
.data = &printk_delay_msec,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &ten_thousand,
},
#endif
{
- .ctl_name = KERN_NGROUPS_MAX,
.procname = "ngroups_max",
.data = &ngroups_max,
.maxlen = sizeof (int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
{
- .ctl_name = KERN_UNKNOWN_NMI_PANIC,
.procname = "unknown_nmi_panic",
.data = &unknown_nmi_panic,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.procname = "nmi_watchdog",
.data = &nmi_watchdog_enabled,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_nmi_enabled,
+ .proc_handler = proc_nmi_enabled,
},
#endif
#if defined(CONFIG_X86)
{
- .ctl_name = KERN_PANIC_ON_NMI,
.procname = "panic_on_unrecovered_nmi",
.data = &panic_on_unrecovered_nmi,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "panic_on_io_nmi",
.data = &panic_on_io_nmi,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = KERN_BOOTLOADER_TYPE,
.procname = "bootloader_type",
.data = &bootloader_type,
.maxlen = sizeof (int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "bootloader_version",
.data = &bootloader_version,
.maxlen = sizeof (int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "kstack_depth_to_print",
.data = &kstack_depth_to_print,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "io_delay_type",
.data = &io_delay_type,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_MMU)
{
- .ctl_name = KERN_RANDOMIZE,
.procname = "randomize_va_space",
.data = &randomize_va_space,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
- .ctl_name = KERN_SPIN_RETRY,
.procname = "spin_retry",
.data = &spin_retry,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
@@ -859,123 +776,104 @@ static struct ctl_table kern_table[] = {
.data = &acpi_realmode_flags,
.maxlen = sizeof (unsigned long),
.mode = 0644,
- .proc_handler = &proc_doulongvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_IA64
{
- .ctl_name = KERN_IA64_UNALIGNED,
.procname = "ignore-unaligned-usertrap",
.data = &no_unaligned_warning,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "unaligned-dump-stack",
.data = &unaligned_dump_stack,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_DETECT_SOFTLOCKUP
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "softlockup_panic",
.data = &softlockup_panic,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "softlockup_thresh",
.data = &softlockup_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dosoftlockup_thresh,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dosoftlockup_thresh,
.extra1 = &neg_one,
.extra2 = &sixty,
},
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hung_task_panic",
.data = &sysctl_hung_task_panic,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hung_task_check_count",
.data = &sysctl_hung_task_check_count,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &proc_doulongvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_doulongvec_minmax,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hung_task_timeout_secs",
.data = &sysctl_hung_task_timeout_secs,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &proc_dohung_task_timeout_secs,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dohung_task_timeout_secs,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hung_task_warnings",
.data = &sysctl_hung_task_warnings,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &proc_doulongvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_COMPAT
{
- .ctl_name = KERN_COMPAT_LOG,
.procname = "compat-log",
.data = &compat_log,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_RT_MUTEXES
{
- .ctl_name = KERN_MAX_LOCK_DEPTH,
.procname = "max_lock_depth",
.data = &max_lock_depth,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "poweroff_cmd",
.data = &poweroff_cmd,
.maxlen = POWEROFF_CMD_PATH_LEN,
.mode = 0644,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string,
+ .proc_handler = proc_dostring,
},
#ifdef CONFIG_KEYS
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "keys",
.mode = 0555,
.child = key_sysctls,
@@ -983,17 +881,15 @@ static struct ctl_table kern_table[] = {
#endif
#ifdef CONFIG_RCU_TORTURE_TEST
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "rcutorture_runnable",
.data = &rcutorture_runnable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_SLOW_WORK
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "slow-work",
.mode = 0555,
.child = slow_work_sysctls,
@@ -1001,146 +897,127 @@ static struct ctl_table kern_table[] = {
#endif
#ifdef CONFIG_PERF_EVENTS
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "perf_event_paranoid",
.data = &sysctl_perf_event_paranoid,
.maxlen = sizeof(sysctl_perf_event_paranoid),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "perf_event_mlock_kb",
.data = &sysctl_perf_event_mlock,
.maxlen = sizeof(sysctl_perf_event_mlock),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "perf_event_max_sample_rate",
.data = &sysctl_perf_event_sample_rate,
.maxlen = sizeof(sysctl_perf_event_sample_rate),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_KMEMCHECK
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "kmemcheck",
.data = &kmemcheck_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_BLOCK
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "blk_iopoll",
.data = &blk_iopoll_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
*/
- { .ctl_name = 0 }
+ { }
};
static struct ctl_table vm_table[] = {
{
- .ctl_name = VM_OVERCOMMIT_MEMORY,
.procname = "overcommit_memory",
.data = &sysctl_overcommit_memory,
.maxlen = sizeof(sysctl_overcommit_memory),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = VM_PANIC_ON_OOM,
.procname = "panic_on_oom",
.data = &sysctl_panic_on_oom,
.maxlen = sizeof(sysctl_panic_on_oom),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "oom_kill_allocating_task",
.data = &sysctl_oom_kill_allocating_task,
.maxlen = sizeof(sysctl_oom_kill_allocating_task),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "oom_dump_tasks",
.data = &sysctl_oom_dump_tasks,
.maxlen = sizeof(sysctl_oom_dump_tasks),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = VM_OVERCOMMIT_RATIO,
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
.maxlen = sizeof(sysctl_overcommit_ratio),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = VM_PAGE_CLUSTER,
.procname = "page-cluster",
.data = &page_cluster,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = VM_DIRTY_BACKGROUND,
.procname = "dirty_background_ratio",
.data = &dirty_background_ratio,
.maxlen = sizeof(dirty_background_ratio),
.mode = 0644,
- .proc_handler = &dirty_background_ratio_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = dirty_background_ratio_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "dirty_background_bytes",
.data = &dirty_background_bytes,
.maxlen = sizeof(dirty_background_bytes),
.mode = 0644,
- .proc_handler = &dirty_background_bytes_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = dirty_background_bytes_handler,
.extra1 = &one_ul,
},
{
- .ctl_name = VM_DIRTY_RATIO,
.procname = "dirty_ratio",
.data = &vm_dirty_ratio,
.maxlen = sizeof(vm_dirty_ratio),
.mode = 0644,
- .proc_handler = &dirty_ratio_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = dirty_ratio_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "dirty_bytes",
.data = &vm_dirty_bytes,
.maxlen = sizeof(vm_dirty_bytes),
.mode = 0644,
- .proc_handler = &dirty_bytes_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = dirty_bytes_handler,
.extra1 = &dirty_bytes_min,
},
{
@@ -1148,31 +1025,28 @@ static struct ctl_table vm_table[] = {
.data = &dirty_writeback_interval,
.maxlen = sizeof(dirty_writeback_interval),
.mode = 0644,
- .proc_handler = &dirty_writeback_centisecs_handler,
+ .proc_handler = dirty_writeback_centisecs_handler,
},
{
.procname = "dirty_expire_centisecs",
.data = &dirty_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = VM_NR_PDFLUSH_THREADS,
.procname = "nr_pdflush_threads",
.data = &nr_pdflush_threads,
.maxlen = sizeof nr_pdflush_threads,
.mode = 0444 /* read-only*/,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = VM_SWAPPINESS,
.procname = "swappiness",
.data = &vm_swappiness,
.maxlen = sizeof(vm_swappiness),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one_hundred,
},
@@ -1182,255 +1056,213 @@ static struct ctl_table vm_table[] = {
.data = NULL,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &hugetlb_sysctl_handler,
+ .proc_handler = hugetlb_sysctl_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
},
{
- .ctl_name = VM_HUGETLB_GROUP,
.procname = "hugetlb_shm_group",
.data = &sysctl_hugetlb_shm_group,
.maxlen = sizeof(gid_t),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hugepages_treat_as_movable",
.data = &hugepages_treat_as_movable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &hugetlb_treat_movable_handler,
+ .proc_handler = hugetlb_treat_movable_handler,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nr_overcommit_hugepages",
.data = NULL,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &hugetlb_overcommit_handler,
+ .proc_handler = hugetlb_overcommit_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
},
#endif
{
- .ctl_name = VM_LOWMEM_RESERVE_RATIO,
.procname = "lowmem_reserve_ratio",
.data = &sysctl_lowmem_reserve_ratio,
.maxlen = sizeof(sysctl_lowmem_reserve_ratio),
.mode = 0644,
- .proc_handler = &lowmem_reserve_ratio_sysctl_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = lowmem_reserve_ratio_sysctl_handler,
},
{
- .ctl_name = VM_DROP_PAGECACHE,
.procname = "drop_caches",
.data = &sysctl_drop_caches,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = drop_caches_sysctl_handler,
- .strategy = &sysctl_intvec,
},
{
- .ctl_name = VM_MIN_FREE_KBYTES,
.procname = "min_free_kbytes",
.data = &min_free_kbytes,
.maxlen = sizeof(min_free_kbytes),
.mode = 0644,
- .proc_handler = &min_free_kbytes_sysctl_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = &zero,
},
{
- .ctl_name = VM_PERCPU_PAGELIST_FRACTION,
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
.maxlen = sizeof(percpu_pagelist_fraction),
.mode = 0644,
- .proc_handler = &percpu_pagelist_fraction_sysctl_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = percpu_pagelist_fraction_sysctl_handler,
.extra1 = &min_percpu_pagelist_fract,
},
#ifdef CONFIG_MMU
{
- .ctl_name = VM_MAX_MAP_COUNT,
.procname = "max_map_count",
.data = &sysctl_max_map_count,
.maxlen = sizeof(sysctl_max_map_count),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
#else
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nr_trim_pages",
.data = &sysctl_nr_trim_pages,
.maxlen = sizeof(sysctl_nr_trim_pages),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
#endif
{
- .ctl_name = VM_LAPTOP_MODE,
.procname = "laptop_mode",
.data = &laptop_mode,
.maxlen = sizeof(laptop_mode),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
},
{
- .ctl_name = VM_BLOCK_DUMP,
.procname = "block_dump",
.data = &block_dump,
.maxlen = sizeof(block_dump),
.mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec,
.extra1 = &zero,
},
{
- .ctl_name = VM_VFS_CACHE_PRESSURE,
.procname = "vfs_cache_pressure",
.data = &sysctl_vfs_cache_pressure,
.maxlen = sizeof(sysctl_vfs_cache_pressure),
.mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec,
.extra1 = &zero,
},
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
{
- .ctl_name = VM_LEGACY_VA_LAYOUT,
.procname = "legacy_va_layout",
.data = &sysctl_legacy_va_layout,
.maxlen = sizeof(sysctl_legacy_va_layout),
.mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec,
.extra1 = &zero,
},
#endif
#ifdef CONFIG_NUMA
{
- .ctl_name = VM_ZONE_RECLAIM_MODE,
.procname = "zone_reclaim_mode",
.data = &zone_reclaim_mode,
.maxlen = sizeof(zone_reclaim_mode),
.mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec,
.extra1 = &zero,
},
{
- .ctl_name = VM_MIN_UNMAPPED,
.procname = "min_unmapped_ratio",
.data = &sysctl_min_unmapped_ratio,
.maxlen = sizeof(sysctl_min_unmapped_ratio),
.mode = 0644,
- .proc_handler = &sysctl_min_unmapped_ratio_sysctl_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
- .ctl_name = VM_MIN_SLAB,
.procname = "min_slab_ratio",
.data = &sysctl_min_slab_ratio,
.maxlen = sizeof(sysctl_min_slab_ratio),
.mode = 0644,
- .proc_handler = &sysctl_min_slab_ratio_sysctl_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
},
#endif
#ifdef CONFIG_SMP
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "stat_interval",
.data = &sysctl_stat_interval,
.maxlen = sizeof(sysctl_stat_interval),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
},
#endif
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "mmap_min_addr",
.data = &dac_mmap_min_addr,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &mmap_min_addr_handler,
+ .proc_handler = mmap_min_addr_handler,
},
#ifdef CONFIG_NUMA
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "numa_zonelist_order",
.data = &numa_zonelist_order,
.maxlen = NUMA_ZONELIST_ORDER_LEN,
.mode = 0644,
- .proc_handler = &numa_zonelist_order_handler,
- .strategy = &sysctl_string,
+ .proc_handler = numa_zonelist_order_handler,
},
#endif
#if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \
(defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL))
{
- .ctl_name = VM_VDSO_ENABLED,
.procname = "vdso_enabled",
.data = &vdso_enabled,
.maxlen = sizeof(vdso_enabled),
.mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec,
.extra1 = &zero,
},
#endif
#ifdef CONFIG_HIGHMEM
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "highmem_is_dirtyable",
.data = &vm_highmem_is_dirtyable,
.maxlen = sizeof(vm_highmem_is_dirtyable),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
#endif
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "scan_unevictable_pages",
.data = &scan_unevictable_pages,
.maxlen = sizeof(scan_unevictable_pages),
.mode = 0644,
- .proc_handler = &scan_unevictable_handler,
+ .proc_handler = scan_unevictable_handler,
},
#ifdef CONFIG_MEMORY_FAILURE
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "memory_failure_early_kill",
.data = &sysctl_memory_failure_early_kill,
.maxlen = sizeof(sysctl_memory_failure_early_kill),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "memory_failure_recovery",
.data = &sysctl_memory_failure_recovery,
.maxlen = sizeof(sysctl_memory_failure_recovery),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
@@ -1440,116 +1272,104 @@ static struct ctl_table vm_table[] = {
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
*/
- { .ctl_name = 0 }
+ { }
};
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
static struct ctl_table binfmt_misc_table[] = {
- { .ctl_name = 0 }
+ { }
};
#endif
static struct ctl_table fs_table[] = {
{
- .ctl_name = FS_NRINODE,
.procname = "inode-nr",
.data = &inodes_stat,
.maxlen = 2*sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_STATINODE,
.procname = "inode-state",
.data = &inodes_stat,
.maxlen = 7*sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.procname = "file-nr",
.data = &files_stat,
.maxlen = 3*sizeof(int),
.mode = 0444,
- .proc_handler = &proc_nr_files,
+ .proc_handler = proc_nr_files,
},
{
- .ctl_name = FS_MAXFILE,
.procname = "file-max",
.data = &files_stat.max_files,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nr_open",
.data = &sysctl_nr_open,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &sysctl_nr_open_min,
.extra2 = &sysctl_nr_open_max,
},
{
- .ctl_name = FS_DENTRY,
.procname = "dentry-state",
.data = &dentry_stat,
.maxlen = 6*sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_OVERFLOWUID,
.procname = "overflowuid",
.data = &fs_overflowuid,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
{
- .ctl_name = FS_OVERFLOWGID,
.procname = "overflowgid",
.data = &fs_overflowgid,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
#ifdef CONFIG_FILE_LOCKING
{
- .ctl_name = FS_LEASES,
.procname = "leases-enable",
.data = &leases_enable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_DNOTIFY
{
- .ctl_name = FS_DIR_NOTIFY,
.procname = "dir-notify-enable",
.data = &dir_notify_enable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_MMU
#ifdef CONFIG_FILE_LOCKING
{
- .ctl_name = FS_LEASE_TIME,
.procname = "lease-break-time",
.data = &lease_break_time,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_AIO
@@ -1558,19 +1378,18 @@ static struct ctl_table fs_table[] = {
.data = &aio_nr,
.maxlen = sizeof(aio_nr),
.mode = 0444,
- .proc_handler = &proc_doulongvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
},
{
.procname = "aio-max-nr",
.data = &aio_max_nr,
.maxlen = sizeof(aio_max_nr),
.mode = 0644,
- .proc_handler = &proc_doulongvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
},
#endif /* CONFIG_AIO */
#ifdef CONFIG_INOTIFY_USER
{
- .ctl_name = FS_INOTIFY,
.procname = "inotify",
.mode = 0555,
.child = inotify_table,
@@ -1585,19 +1404,16 @@ static struct ctl_table fs_table[] = {
#endif
#endif
{
- .ctl_name = KERN_SETUID_DUMPABLE,
.procname = "suid_dumpable",
.data = &suid_dumpable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two,
},
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "binfmt_misc",
.mode = 0555,
.child = binfmt_misc_table,
@@ -1607,13 +1423,12 @@ static struct ctl_table fs_table[] = {
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
*/
- { .ctl_name = 0 }
+ { }
};
static struct ctl_table debug_table[] = {
#if defined(CONFIG_X86) || defined(CONFIG_PPC)
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "exception-trace",
.data = &show_unhandled_signals,
.maxlen = sizeof(int),
@@ -1621,11 +1436,11 @@ static struct ctl_table debug_table[] = {
.proc_handler = proc_dointvec
},
#endif
- { .ctl_name = 0 }
+ { }
};
static struct ctl_table dev_table[] = {
- { .ctl_name = 0 }
+ { }
};
static DEFINE_SPINLOCK(sysctl_lock);
@@ -1779,122 +1594,6 @@ void register_sysctl_root(struct ctl_table_root *root)
spin_unlock(&sysctl_lock);
}
-#ifdef CONFIG_SYSCTL_SYSCALL
-/* Perform the actual read/write of a sysctl table entry. */
-static int do_sysctl_strategy(struct ctl_table_root *root,
- struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- int op = 0, rc;
-
- if (oldval)
- op |= MAY_READ;
- if (newval)
- op |= MAY_WRITE;
- if (sysctl_perm(root, table, op))
- return -EPERM;
-
- if (table->strategy) {
- rc = table->strategy(table, oldval, oldlenp, newval, newlen);
- if (rc < 0)
- return rc;
- if (rc > 0)
- return 0;
- }
-
- /* If there is no strategy routine, or if the strategy returns
- * zero, proceed with automatic r/w */
- if (table->data && table->maxlen) {
- rc = sysctl_data(table, oldval, oldlenp, newval, newlen);
- if (rc < 0)
- return rc;
- }
- return 0;
-}
-
-static int parse_table(int __user *name, int nlen,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen,
- struct ctl_table_root *root,
- struct ctl_table *table)
-{
- int n;
-repeat:
- if (!nlen)
- return -ENOTDIR;
- if (get_user(n, name))
- return -EFAULT;
- for ( ; table->ctl_name || table->procname; table++) {
- if (!table->ctl_name)
- continue;
- if (n == table->ctl_name) {
- int error;
- if (table->child) {
- if (sysctl_perm(root, table, MAY_EXEC))
- return -EPERM;
- name++;
- nlen--;
- table = table->child;
- goto repeat;
- }
- error = do_sysctl_strategy(root, table,
- oldval, oldlenp,
- newval, newlen);
- return error;
- }
- }
- return -ENOTDIR;
-}
-
-int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- struct ctl_table_header *head;
- int error = -ENOTDIR;
-
- if (nlen <= 0 || nlen >= CTL_MAXNAME)
- return -ENOTDIR;
- if (oldval) {
- int old_len;
- if (!oldlenp || get_user(old_len, oldlenp))
- return -EFAULT;
- }
-
- for (head = sysctl_head_next(NULL); head;
- head = sysctl_head_next(head)) {
- error = parse_table(name, nlen, oldval, oldlenp,
- newval, newlen,
- head->root, head->ctl_table);
- if (error != -ENOTDIR) {
- sysctl_head_finish(head);
- break;
- }
- }
- return error;
-}
-
-SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
-{
- struct __sysctl_args tmp;
- int error;
-
- if (copy_from_user(&tmp, args, sizeof(tmp)))
- return -EFAULT;
-
- error = deprecated_sysctl_warning(&tmp);
- if (error)
- goto out;
-
- lock_kernel();
- error = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, tmp.oldlenp,
- tmp.newval, tmp.newlen);
- unlock_kernel();
-out:
- return error;
-}
-#endif /* CONFIG_SYSCTL_SYSCALL */
-
/*
* sysctl_perm does NOT grant the superuser all rights automatically, because
* some sysctl variables are readonly even to root.
@@ -1930,7 +1629,7 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
{
- for (; table->ctl_name || table->procname; table++) {
+ for (; table->procname; table++) {
table->parent = parent;
if (table->child)
sysctl_set_parent(table, table->child);
@@ -1962,11 +1661,11 @@ static struct ctl_table *is_branch_in(struct ctl_table *branch,
return NULL;
/* ... and nothing else */
- if (branch[1].procname || branch[1].ctl_name)
+ if (branch[1].procname)
return NULL;
/* table should contain subdirectory with the same name */
- for (p = table; p->procname || p->ctl_name; p++) {
+ for (p = table; p->procname; p++) {
if (!p->child)
continue;
if (p->procname && strcmp(p->procname, s) == 0)
@@ -2011,9 +1710,6 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
*
* The members of the &struct ctl_table structure are used as follows:
*
- * ctl_name - This is the numeric sysctl value used by sysctl(2). The number
- * must be unique within that level of sysctl
- *
* procname - the name of the sysctl file under /proc/sys. Set to %NULL to not
* enter a sysctl file
*
@@ -2028,8 +1724,6 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
*
* proc_handler - the text handler routine (described below)
*
- * strategy - the strategy routine (described below)
- *
* de - for internal use by the sysctl routines
*
* extra1, extra2 - extra pointers usable by the proc handler routines
@@ -2042,19 +1736,6 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
* struct enable minimal validation of the values being written to be
* performed, and the mode field allows minimal authentication.
*
- * More sophisticated management can be enabled by the provision of a
- * strategy routine with the table entry. This will be called before
- * any automatic read or write of the data is performed.
- *
- * The strategy routine may return
- *
- * < 0 - Error occurred (error is passed to user process)
- *
- * 0 - OK - proceed with automatic read or write.
- *
- * > 0 - OK - read or write has been done by the strategy routine, so
- * return immediately.
- *
* There must be a proc_handler routine for any terminal nodes
* mirrored under /proc/sys (non-terminals are handled by a built-in
* directory handler). Several default handlers are available to
@@ -2081,13 +1762,13 @@ struct ctl_table_header *__register_sysctl_paths(
struct ctl_table_set *set;
/* Count the path components */
- for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath)
+ for (npath = 0; path[npath].procname; ++npath)
;
/*
* For each path component, allocate a 2-element ctl_table array.
* The first array element will be filled with the sysctl entry
- * for this, the second will be the sentinel (ctl_name == 0).
+ * for this, the second will be the sentinel (procname == 0).
*
* We allocate everything in one go so that we don't have to
* worry about freeing additional memory in unregister_sysctl_table.
@@ -2104,7 +1785,6 @@ struct ctl_table_header *__register_sysctl_paths(
for (n = 0; n < npath; ++n, ++path) {
/* Copy the procname */
new->procname = path->procname;
- new->ctl_name = path->ctl_name;
new->mode = 0555;
*prevp = new;
@@ -2966,286 +2646,6 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
#endif /* CONFIG_PROC_FS */
-
-#ifdef CONFIG_SYSCTL_SYSCALL
-/*
- * General sysctl support routines
- */
-
-/* The generic sysctl data routine (used if no strategy routine supplied) */
-int sysctl_data(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- size_t len;
-
- /* Get out of I don't have a variable */
- if (!table->data || !table->maxlen)
- return -ENOTDIR;
-
- if (oldval && oldlenp) {
- if (get_user(len, oldlenp))
- return -EFAULT;
- if (len) {
- if (len > table->maxlen)
- len = table->maxlen;
- if (copy_to_user(oldval, table->data, len))
- return -EFAULT;
- if (put_user(len, oldlenp))
- return -EFAULT;
- }
- }
-
- if (newval && newlen) {
- if (newlen > table->maxlen)
- newlen = table->maxlen;
-
- if (copy_from_user(table->data, newval, newlen))
- return -EFAULT;
- }
- return 1;
-}
-
-/* The generic string strategy routine: */
-int sysctl_string(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- if (!table->data || !table->maxlen)
- return -ENOTDIR;
-
- if (oldval && oldlenp) {
- size_t bufsize;
- if (get_user(bufsize, oldlenp))
- return -EFAULT;
- if (bufsize) {
- size_t len = strlen(table->data), copied;
-
- /* This shouldn't trigger for a well-formed sysctl */
- if (len > table->maxlen)
- len = table->maxlen;
-
- /* Copy up to a max of bufsize-1 bytes of the string */
- copied = (len >= bufsize) ? bufsize - 1 : len;
-
- if (copy_to_user(oldval, table->data, copied) ||
- put_user(0, (char __user *)(oldval + copied)))
- return -EFAULT;
- if (put_user(len, oldlenp))
- return -EFAULT;
- }
- }
- if (newval && newlen) {
- size_t len = newlen;
- if (len > table->maxlen)
- len = table->maxlen;
- if(copy_from_user(table->data, newval, len))
- return -EFAULT;
- if (len == table->maxlen)
- len--;
- ((char *) table->data)[len] = 0;
- }
- return 1;
-}
-
-/*
- * This function makes sure that all of the integers in the vector
- * are between the minimum and maximum values given in the arrays
- * table->extra1 and table->extra2, respectively.
- */
-int sysctl_intvec(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
-
- if (newval && newlen) {
- int __user *vec = (int __user *) newval;
- int *min = (int *) table->extra1;
- int *max = (int *) table->extra2;
- size_t length;
- int i;
-
- if (newlen % sizeof(int) != 0)
- return -EINVAL;
-
- if (!table->extra1 && !table->extra2)
- return 0;
-
- if (newlen > table->maxlen)
- newlen = table->maxlen;
- length = newlen / sizeof(int);
-
- for (i = 0; i < length; i++) {
- int value;
- if (get_user(value, vec + i))
- return -EFAULT;
- if (min && value < min[i])
- return -EINVAL;
- if (max && value > max[i])
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/* Strategy function to convert jiffies to seconds */
-int sysctl_jiffies(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- if (oldval && oldlenp) {
- size_t olen;
-
- if (get_user(olen, oldlenp))
- return -EFAULT;
- if (olen) {
- int val;
-
- if (olen < sizeof(int))
- return -EINVAL;
-
- val = *(int *)(table->data) / HZ;
- if (put_user(val, (int __user *)oldval))
- return -EFAULT;
- if (put_user(sizeof(int), oldlenp))
- return -EFAULT;
- }
- }
- if (newval && newlen) {
- int new;
- if (newlen != sizeof(int))
- return -EINVAL;
- if (get_user(new, (int __user *)newval))
- return -EFAULT;
- *(int *)(table->data) = new*HZ;
- }
- return 1;
-}
-
-/* Strategy function to convert jiffies to seconds */
-int sysctl_ms_jiffies(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- if (oldval && oldlenp) {
- size_t olen;
-
- if (get_user(olen, oldlenp))
- return -EFAULT;
- if (olen) {
- int val;
-
- if (olen < sizeof(int))
- return -EINVAL;
-
- val = jiffies_to_msecs(*(int *)(table->data));
- if (put_user(val, (int __user *)oldval))
- return -EFAULT;
- if (put_user(sizeof(int), oldlenp))
- return -EFAULT;
- }
- }
- if (newval && newlen) {
- int new;
- if (newlen != sizeof(int))
- return -EINVAL;
- if (get_user(new, (int __user *)newval))
- return -EFAULT;
- *(int *)(table->data) = msecs_to_jiffies(new);
- }
- return 1;
-}
-
-
-
-#else /* CONFIG_SYSCTL_SYSCALL */
-
-
-SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
-{
- struct __sysctl_args tmp;
- int error;
-
- if (copy_from_user(&tmp, args, sizeof(tmp)))
- return -EFAULT;
-
- error = deprecated_sysctl_warning(&tmp);
-
- /* If no error reading the parameters then just -ENOSYS ... */
- if (!error)
- error = -ENOSYS;
-
- return error;
-}
-
-int sysctl_data(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- return -ENOSYS;
-}
-
-int sysctl_string(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- return -ENOSYS;
-}
-
-int sysctl_intvec(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- return -ENOSYS;
-}
-
-int sysctl_jiffies(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- return -ENOSYS;
-}
-
-int sysctl_ms_jiffies(struct ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_SYSCTL_SYSCALL */
-
-static int deprecated_sysctl_warning(struct __sysctl_args *args)
-{
- static int msg_count;
- int name[CTL_MAXNAME];
- int i;
-
- /* Check args->nlen. */
- if (args->nlen < 0 || args->nlen > CTL_MAXNAME)
- return -ENOTDIR;
-
- /* Read in the sysctl name for better debug message logging */
- for (i = 0; i < args->nlen; i++)
- if (get_user(name[i], args->name + i))
- return -EFAULT;
-
- /* Ignore accesses to kernel.version */
- if ((args->nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
- return 0;
-
- if (msg_count < 5) {
- msg_count++;
- printk(KERN_INFO
- "warning: process `%s' used the deprecated sysctl "
- "system call with ", current->comm);
- for (i = 0; i < args->nlen; i++)
- printk("%d.", name[i]);
- printk("\n");
- }
- return 0;
-}
-
/*
* No sense putting this after each symbol definition, twice,
* exception granted :-)
@@ -3260,9 +2660,4 @@ EXPORT_SYMBOL(proc_doulongvec_minmax);
EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
EXPORT_SYMBOL(register_sysctl_table);
EXPORT_SYMBOL(register_sysctl_paths);
-EXPORT_SYMBOL(sysctl_intvec);
-EXPORT_SYMBOL(sysctl_jiffies);
-EXPORT_SYMBOL(sysctl_ms_jiffies);
-EXPORT_SYMBOL(sysctl_string);
-EXPORT_SYMBOL(sysctl_data);
EXPORT_SYMBOL(unregister_sysctl_table);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
new file mode 100644
index 000000000000..b75dbf40f573
--- /dev/null
+++ b/kernel/sysctl_binary.c
@@ -0,0 +1,1507 @@
+#include <linux/stat.h>
+#include <linux/sysctl.h>
+#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
+#include <linux/sunrpc/debug.h>
+#include <linux/string.h>
+#include <net/ip_vs.h>
+#include <linux/syscalls.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/fs.h>
+#include <linux/nsproxy.h>
+#include <linux/pid_namespace.h>
+#include <linux/file.h>
+#include <linux/ctype.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_SYSCTL_SYSCALL
+
+struct bin_table;
+typedef ssize_t bin_convert_t(struct file *file,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen);
+
+static bin_convert_t bin_dir;
+static bin_convert_t bin_string;
+static bin_convert_t bin_intvec;
+static bin_convert_t bin_ulongvec;
+static bin_convert_t bin_uuid;
+static bin_convert_t bin_dn_node_address;
+
+#define CTL_DIR bin_dir
+#define CTL_STR bin_string
+#define CTL_INT bin_intvec
+#define CTL_ULONG bin_ulongvec
+#define CTL_UUID bin_uuid
+#define CTL_DNADR bin_dn_node_address
+
+#define BUFSZ 256
+
+struct bin_table {
+ bin_convert_t *convert;
+ int ctl_name;
+ const char *procname;
+ const struct bin_table *child;
+};
+
+static const struct bin_table bin_random_table[] = {
+ { CTL_INT, RANDOM_POOLSIZE, "poolsize" },
+ { CTL_INT, RANDOM_ENTROPY_COUNT, "entropy_avail" },
+ { CTL_INT, RANDOM_READ_THRESH, "read_wakeup_threshold" },
+ { CTL_INT, RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
+ { CTL_UUID, RANDOM_BOOT_ID, "boot_id" },
+ { CTL_UUID, RANDOM_UUID, "uuid" },
+ {}
+};
+
+static const struct bin_table bin_pty_table[] = {
+ { CTL_INT, PTY_MAX, "max" },
+ { CTL_INT, PTY_NR, "nr" },
+ {}
+};
+
+static const struct bin_table bin_kern_table[] = {
+ { CTL_STR, KERN_OSTYPE, "ostype" },
+ { CTL_STR, KERN_OSRELEASE, "osrelease" },
+ /* KERN_OSREV not used */
+ { CTL_STR, KERN_VERSION, "version" },
+ /* KERN_SECUREMASK not used */
+ /* KERN_PROF not used */
+ { CTL_STR, KERN_NODENAME, "hostname" },
+ { CTL_STR, KERN_DOMAINNAME, "domainname" },
+
+ { CTL_INT, KERN_PANIC, "panic" },
+ { CTL_INT, KERN_REALROOTDEV, "real-root-dev" },
+
+ { CTL_STR, KERN_SPARC_REBOOT, "reboot-cmd" },
+ { CTL_INT, KERN_CTLALTDEL, "ctrl-alt-del" },
+ { CTL_INT, KERN_PRINTK, "printk" },
+
+ /* KERN_NAMETRANS not used */
+ /* KERN_PPC_HTABRECLAIM not used */
+ /* KERN_PPC_ZEROPAGED not used */
+ { CTL_INT, KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
+
+ { CTL_STR, KERN_MODPROBE, "modprobe" },
+ { CTL_INT, KERN_SG_BIG_BUFF, "sg-big-buff" },
+ { CTL_INT, KERN_ACCT, "acct" },
+ /* KERN_PPC_L2CR "l2cr" no longer used */
+
+ /* KERN_RTSIGNR not used */
+ /* KERN_RTSIGMAX not used */
+
+ { CTL_ULONG, KERN_SHMMAX, "shmmax" },
+ { CTL_INT, KERN_MSGMAX, "msgmax" },
+ { CTL_INT, KERN_MSGMNB, "msgmnb" },
+ /* KERN_MSGPOOL not used*/
+ { CTL_INT, KERN_SYSRQ, "sysrq" },
+ { CTL_INT, KERN_MAX_THREADS, "threads-max" },
+ { CTL_DIR, KERN_RANDOM, "random", bin_random_table },
+ { CTL_ULONG, KERN_SHMALL, "shmall" },
+ { CTL_INT, KERN_MSGMNI, "msgmni" },
+ { CTL_INT, KERN_SEM, "sem" },
+ { CTL_INT, KERN_SPARC_STOP_A, "stop-a" },
+ { CTL_INT, KERN_SHMMNI, "shmmni" },
+
+ { CTL_INT, KERN_OVERFLOWUID, "overflowuid" },
+ { CTL_INT, KERN_OVERFLOWGID, "overflowgid" },
+
+ { CTL_STR, KERN_HOTPLUG, "hotplug", },
+ { CTL_INT, KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
+
+ { CTL_INT, KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
+ { CTL_INT, KERN_CORE_USES_PID, "core_uses_pid" },
+ /* KERN_TAINTED "tainted" no longer used */
+ { CTL_INT, KERN_CADPID, "cad_pid" },
+ { CTL_INT, KERN_PIDMAX, "pid_max" },
+ { CTL_STR, KERN_CORE_PATTERN, "core_pattern" },
+ { CTL_INT, KERN_PANIC_ON_OOPS, "panic_on_oops" },
+ { CTL_INT, KERN_HPPA_PWRSW, "soft-power" },
+ { CTL_INT, KERN_HPPA_UNALIGNED, "unaligned-trap" },
+
+ { CTL_INT, KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
+ { CTL_INT, KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
+
+ { CTL_DIR, KERN_PTY, "pty", bin_pty_table },
+ { CTL_INT, KERN_NGROUPS_MAX, "ngroups_max" },
+ { CTL_INT, KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
+ /* KERN_HZ_TIMER "hz_timer" no longer used */
+ { CTL_INT, KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
+ { CTL_INT, KERN_BOOTLOADER_TYPE, "bootloader_type" },
+ { CTL_INT, KERN_RANDOMIZE, "randomize_va_space" },
+
+ { CTL_INT, KERN_SPIN_RETRY, "spin_retry" },
+ /* KERN_ACPI_VIDEO_FLAGS "acpi_video_flags" no longer used */
+ { CTL_INT, KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
+ { CTL_INT, KERN_COMPAT_LOG, "compat-log" },
+ { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
+ { CTL_INT, KERN_NMI_WATCHDOG, "nmi_watchdog" },
+ { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
+ {}
+};
+
+static const struct bin_table bin_vm_table[] = {
+ { CTL_INT, VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
+ { CTL_INT, VM_PAGE_CLUSTER, "page-cluster" },
+ { CTL_INT, VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
+ { CTL_INT, VM_DIRTY_RATIO, "dirty_ratio" },
+ /* VM_DIRTY_WB_CS "dirty_writeback_centisecs" no longer used */
+ /* VM_DIRTY_EXPIRE_CS "dirty_expire_centisecs" no longer used */
+ { CTL_INT, VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
+ { CTL_INT, VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
+ /* VM_PAGEBUF unused */
+ /* VM_HUGETLB_PAGES "nr_hugepages" no longer used */
+ { CTL_INT, VM_SWAPPINESS, "swappiness" },
+ { CTL_INT, VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
+ { CTL_INT, VM_MIN_FREE_KBYTES, "min_free_kbytes" },
+ { CTL_INT, VM_MAX_MAP_COUNT, "max_map_count" },
+ { CTL_INT, VM_LAPTOP_MODE, "laptop_mode" },
+ { CTL_INT, VM_BLOCK_DUMP, "block_dump" },
+ { CTL_INT, VM_HUGETLB_GROUP, "hugetlb_shm_group" },
+ { CTL_INT, VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
+ { CTL_INT, VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
+ /* VM_SWAP_TOKEN_TIMEOUT unused */
+ { CTL_INT, VM_DROP_PAGECACHE, "drop_caches" },
+ { CTL_INT, VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
+ { CTL_INT, VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
+ { CTL_INT, VM_MIN_UNMAPPED, "min_unmapped_ratio" },
+ { CTL_INT, VM_PANIC_ON_OOM, "panic_on_oom" },
+ { CTL_INT, VM_VDSO_ENABLED, "vdso_enabled" },
+ { CTL_INT, VM_MIN_SLAB, "min_slab_ratio" },
+
+ {}
+};
+
+static const struct bin_table bin_net_core_table[] = {
+ { CTL_INT, NET_CORE_WMEM_MAX, "wmem_max" },
+ { CTL_INT, NET_CORE_RMEM_MAX, "rmem_max" },
+ { CTL_INT, NET_CORE_WMEM_DEFAULT, "wmem_default" },
+ { CTL_INT, NET_CORE_RMEM_DEFAULT, "rmem_default" },
+ /* NET_CORE_DESTROY_DELAY unused */
+ { CTL_INT, NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
+ /* NET_CORE_FASTROUTE unused */
+ { CTL_INT, NET_CORE_MSG_COST, "message_cost" },
+ { CTL_INT, NET_CORE_MSG_BURST, "message_burst" },
+ { CTL_INT, NET_CORE_OPTMEM_MAX, "optmem_max" },
+ /* NET_CORE_HOT_LIST_LENGTH unused */
+ /* NET_CORE_DIVERT_VERSION unused */
+ /* NET_CORE_NO_CONG_THRESH unused */
+ /* NET_CORE_NO_CONG unused */
+ /* NET_CORE_LO_CONG unused */
+ /* NET_CORE_MOD_CONG unused */
+ { CTL_INT, NET_CORE_DEV_WEIGHT, "dev_weight" },
+ { CTL_INT, NET_CORE_SOMAXCONN, "somaxconn" },
+ { CTL_INT, NET_CORE_BUDGET, "netdev_budget" },
+ { CTL_INT, NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
+ { CTL_INT, NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
+ { CTL_INT, NET_CORE_WARNINGS, "warnings" },
+ {},
+};
+
+static const struct bin_table bin_net_unix_table[] = {
+ /* NET_UNIX_DESTROY_DELAY unused */
+ /* NET_UNIX_DELETE_DELAY unused */
+ { CTL_INT, NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv4_route_table[] = {
+ { CTL_INT, NET_IPV4_ROUTE_FLUSH, "flush" },
+ /* NET_IPV4_ROUTE_MIN_DELAY "min_delay" no longer used */
+ /* NET_IPV4_ROUTE_MAX_DELAY "max_delay" no longer used */
+ { CTL_INT, NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
+ { CTL_INT, NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
+ { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
+ { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
+ { CTL_INT, NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
+ { CTL_INT, NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
+ { CTL_INT, NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
+ { CTL_INT, NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
+ { CTL_INT, NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
+ { CTL_INT, NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
+ { CTL_INT, NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
+ { CTL_INT, NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
+ { CTL_INT, NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
+ { CTL_INT, NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
+ { CTL_INT, NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
+ { CTL_INT, NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv4_conf_vars_table[] = {
+ { CTL_INT, NET_IPV4_CONF_FORWARDING, "forwarding" },
+ { CTL_INT, NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
+
+ { CTL_INT, NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
+ { CTL_INT, NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
+ { CTL_INT, NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
+ { CTL_INT, NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
+ { CTL_INT, NET_IPV4_CONF_RP_FILTER, "rp_filter" },
+ { CTL_INT, NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
+ { CTL_INT, NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
+ { CTL_INT, NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
+ { CTL_INT, NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
+ { CTL_INT, NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
+ { CTL_INT, NET_IPV4_CONF_TAG, "tag" },
+ { CTL_INT, NET_IPV4_CONF_ARPFILTER, "arp_filter" },
+ { CTL_INT, NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
+ { CTL_INT, NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
+ { CTL_INT, NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
+ { CTL_INT, NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
+
+ { CTL_INT, NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
+ { CTL_INT, NET_IPV4_CONF_NOPOLICY, "disable_policy" },
+ { CTL_INT, NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
+ { CTL_INT, NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv4_conf_table[] = {
+ { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv4_conf_vars_table },
+ { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv4_conf_vars_table },
+ { CTL_DIR, 0, NULL, bin_net_ipv4_conf_vars_table },
+ {}
+};
+
+static const struct bin_table bin_net_neigh_vars_table[] = {
+ { CTL_INT, NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
+ { CTL_INT, NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
+ { CTL_INT, NET_NEIGH_APP_SOLICIT, "app_solicit" },
+ /* NET_NEIGH_RETRANS_TIME "retrans_time" no longer used */
+ { CTL_INT, NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
+ { CTL_INT, NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
+ { CTL_INT, NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
+ { CTL_INT, NET_NEIGH_UNRES_QLEN, "unres_qlen" },
+ { CTL_INT, NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
+ /* NET_NEIGH_ANYCAST_DELAY "anycast_delay" no longer used */
+ /* NET_NEIGH_PROXY_DELAY "proxy_delay" no longer used */
+ /* NET_NEIGH_LOCKTIME "locktime" no longer used */
+ { CTL_INT, NET_NEIGH_GC_INTERVAL, "gc_interval" },
+ { CTL_INT, NET_NEIGH_GC_THRESH1, "gc_thresh1" },
+ { CTL_INT, NET_NEIGH_GC_THRESH2, "gc_thresh2" },
+ { CTL_INT, NET_NEIGH_GC_THRESH3, "gc_thresh3" },
+ { CTL_INT, NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
+ { CTL_INT, NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
+ {}
+};
+
+static const struct bin_table bin_net_neigh_table[] = {
+ { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_neigh_vars_table },
+ { CTL_DIR, 0, NULL, bin_net_neigh_vars_table },
+ {}
+};
+
+static const struct bin_table bin_net_ipv4_netfilter_table[] = {
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
+
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "ip_conntrack_tcp_timeout_syn_sent" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "ip_conntrack_tcp_timeout_syn_recv" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "ip_conntrack_tcp_timeout_established" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "ip_conntrack_tcp_timeout_fin_wait" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "ip_conntrack_tcp_timeout_close_wait" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "ip_conntrack_tcp_timeout_last_ack" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "ip_conntrack_tcp_timeout_time_wait" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "ip_conntrack_tcp_timeout_close" no longer used */
+
+ /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT "ip_conntrack_udp_timeout" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM "ip_conntrack_udp_timeout_stream" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT "ip_conntrack_icmp_timeout" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT "ip_conntrack_generic_timeout" no longer used */
+
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
+ /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "ip_conntrack_tcp_timeout_max_retrans" no longer used */
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
+
+ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "ip_conntrack_sctp_timeout_closed" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "ip_conntrack_sctp_timeout_cookie_wait" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "ip_conntrack_sctp_timeout_cookie_echoed" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "ip_conntrack_sctp_timeout_established" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "ip_conntrack_sctp_timeout_shutdown_sent" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "ip_conntrack_sctp_timeout_shutdown_recd" no longer used */
+ /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "ip_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */
+
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
+ { CTL_INT, NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv4_table[] = {
+ {CTL_INT, NET_IPV4_FORWARD, "ip_forward" },
+
+ { CTL_DIR, NET_IPV4_CONF, "conf", bin_net_ipv4_conf_table },
+ { CTL_DIR, NET_IPV4_NEIGH, "neigh", bin_net_neigh_table },
+ { CTL_DIR, NET_IPV4_ROUTE, "route", bin_net_ipv4_route_table },
+ /* NET_IPV4_FIB_HASH unused */
+ { CTL_DIR, NET_IPV4_NETFILTER, "netfilter", bin_net_ipv4_netfilter_table },
+
+ { CTL_INT, NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
+ { CTL_INT, NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
+ { CTL_INT, NET_IPV4_TCP_SACK, "tcp_sack" },
+ { CTL_INT, NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
+ { CTL_INT, NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
+ /* NET_IPV4_AUTOCONFIG unused */
+ { CTL_INT, NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
+ { CTL_INT, NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
+ { CTL_INT, NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
+ { CTL_INT, NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
+ { CTL_INT, NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
+ { CTL_INT, NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
+ { CTL_INT, NET_IPV4_DYNADDR, "ip_dynaddr" },
+ { CTL_INT, NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
+ { CTL_INT, NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
+ { CTL_INT, NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
+ { CTL_INT, NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
+ { CTL_INT, NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
+ { CTL_INT, NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
+ { CTL_INT, NET_TCP_SYNCOOKIES, "tcp_syncookies" },
+ { CTL_INT, NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
+ { CTL_INT, NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
+ { CTL_INT, NET_TCP_STDURG, "tcp_stdurg" },
+ { CTL_INT, NET_TCP_RFC1337, "tcp_rfc1337" },
+ { CTL_INT, NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
+ { CTL_INT, NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
+ { CTL_INT, NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
+ { CTL_INT, NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
+ { CTL_INT, NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
+ { CTL_INT, NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
+ { CTL_INT, NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
+ { CTL_INT, NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
+ { CTL_INT, NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
+ { CTL_INT, NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
+ { CTL_INT, NET_TCP_FACK, "tcp_fack" },
+ { CTL_INT, NET_TCP_REORDERING, "tcp_reordering" },
+ { CTL_INT, NET_TCP_ECN, "tcp_ecn" },
+ { CTL_INT, NET_TCP_DSACK, "tcp_dsack" },
+ { CTL_INT, NET_TCP_MEM, "tcp_mem" },
+ { CTL_INT, NET_TCP_WMEM, "tcp_wmem" },
+ { CTL_INT, NET_TCP_RMEM, "tcp_rmem" },
+ { CTL_INT, NET_TCP_APP_WIN, "tcp_app_win" },
+ { CTL_INT, NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
+ { CTL_INT, NET_TCP_TW_REUSE, "tcp_tw_reuse" },
+ { CTL_INT, NET_TCP_FRTO, "tcp_frto" },
+ { CTL_INT, NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
+ { CTL_INT, NET_TCP_LOW_LATENCY, "tcp_low_latency" },
+ { CTL_INT, NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
+ { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
+ { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
+ { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
+ { CTL_INT, NET_TCP_ABC, "tcp_abc" },
+ { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
+ { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
+ { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
+ { CTL_INT, NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
+ { CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
+ { CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
+ { CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
+ { CTL_INT, NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
+ { CTL_INT, NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
+ /* NET_TCP_AVAIL_CONG_CONTROL "tcp_available_congestion_control" no longer used */
+ { CTL_STR, NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
+ { CTL_INT, NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
+
+ { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
+ { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
+ { CTL_INT, NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
+ { CTL_INT, NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
+ { CTL_INT, NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
+ { CTL_INT, NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
+
+ { CTL_INT, NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
+ { CTL_INT, NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
+ { CTL_INT, NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
+
+ { CTL_INT, NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
+ /* NET_IPV4_IPFRAG_MAX_DIST "ipfrag_max_dist" no longer used */
+
+ { CTL_INT, 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
+
+ /* NET_TCP_DEFAULT_WIN_SCALE unused */
+ /* NET_TCP_BIC_BETA unused */
+ /* NET_IPV4_TCP_MAX_KA_PROBES unused */
+ /* NET_IPV4_IP_MASQ_DEBUG unused */
+ /* NET_TCP_SYN_TAILDROP unused */
+ /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
+ /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
+ /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
+ /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
+ /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
+ /* NET_IPV4_ALWAYS_DEFRAG unused */
+ {}
+};
+
+static const struct bin_table bin_net_ipx_table[] = {
+ { CTL_INT, NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
+ /* NET_IPX_FORWARDING unused */
+ {}
+};
+
+static const struct bin_table bin_net_atalk_table[] = {
+ { CTL_INT, NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
+ { CTL_INT, NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
+ { CTL_INT, NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
+ { CTL_INT, NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
+ {},
+};
+
+static const struct bin_table bin_net_netrom_table[] = {
+ { CTL_INT, NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
+ { CTL_INT, NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
+ { CTL_INT, NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
+ { CTL_INT, NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
+ { CTL_INT, NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
+ { CTL_INT, NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
+ { CTL_INT, NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
+ { CTL_INT, NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
+ { CTL_INT, NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
+ { CTL_INT, NET_NETROM_ROUTING_CONTROL, "routing_control" },
+ { CTL_INT, NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
+ { CTL_INT, NET_NETROM_RESET, "reset" },
+ {}
+};
+
+static const struct bin_table bin_net_ax25_param_table[] = {
+ { CTL_INT, NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
+ { CTL_INT, NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
+ { CTL_INT, NET_AX25_BACKOFF_TYPE, "backoff_type" },
+ { CTL_INT, NET_AX25_CONNECT_MODE, "connect_mode" },
+ { CTL_INT, NET_AX25_STANDARD_WINDOW, "standard_window_size" },
+ { CTL_INT, NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
+ { CTL_INT, NET_AX25_T1_TIMEOUT, "t1_timeout" },
+ { CTL_INT, NET_AX25_T2_TIMEOUT, "t2_timeout" },
+ { CTL_INT, NET_AX25_T3_TIMEOUT, "t3_timeout" },
+ { CTL_INT, NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
+ { CTL_INT, NET_AX25_N2, "maximum_retry_count" },
+ { CTL_INT, NET_AX25_PACLEN, "maximum_packet_length" },
+ { CTL_INT, NET_AX25_PROTOCOL, "protocol" },
+ { CTL_INT, NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
+ {}
+};
+
+static const struct bin_table bin_net_ax25_table[] = {
+ { CTL_DIR, 0, NULL, bin_net_ax25_param_table },
+ {}
+};
+
+static const struct bin_table bin_net_rose_table[] = {
+ { CTL_INT, NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
+ { CTL_INT, NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
+ { CTL_INT, NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
+ { CTL_INT, NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
+ { CTL_INT, NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
+ { CTL_INT, NET_ROSE_ROUTING_CONTROL, "routing_control" },
+ { CTL_INT, NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
+ { CTL_INT, NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
+ { CTL_INT, NET_ROSE_WINDOW_SIZE, "window_size" },
+ { CTL_INT, NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv6_conf_var_table[] = {
+ { CTL_INT, NET_IPV6_FORWARDING, "forwarding" },
+ { CTL_INT, NET_IPV6_HOP_LIMIT, "hop_limit" },
+ { CTL_INT, NET_IPV6_MTU, "mtu" },
+ { CTL_INT, NET_IPV6_ACCEPT_RA, "accept_ra" },
+ { CTL_INT, NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
+ { CTL_INT, NET_IPV6_AUTOCONF, "autoconf" },
+ { CTL_INT, NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
+ { CTL_INT, NET_IPV6_RTR_SOLICITS, "router_solicitations" },
+ { CTL_INT, NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
+ { CTL_INT, NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
+ { CTL_INT, NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
+ { CTL_INT, NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
+ { CTL_INT, NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
+ { CTL_INT, NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
+ { CTL_INT, NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
+ { CTL_INT, NET_IPV6_MAX_ADDRESSES, "max_addresses" },
+ { CTL_INT, NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
+ { CTL_INT, NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
+ { CTL_INT, NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
+ { CTL_INT, NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
+ { CTL_INT, NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
+ { CTL_INT, NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
+ { CTL_INT, NET_IPV6_PROXY_NDP, "proxy_ndp" },
+ { CTL_INT, NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv6_conf_table[] = {
+ { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv6_conf_var_table },
+ { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv6_conf_var_table },
+ { CTL_DIR, 0, NULL, bin_net_ipv6_conf_var_table },
+ {}
+};
+
+static const struct bin_table bin_net_ipv6_route_table[] = {
+ /* NET_IPV6_ROUTE_FLUSH "flush" no longer used */
+ { CTL_INT, NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
+ { CTL_INT, NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
+ { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
+ { CTL_INT, NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
+ { CTL_INT, NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
+ { CTL_INT, NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
+ { CTL_INT, NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
+ { CTL_INT, NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
+ { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv6_icmp_table[] = {
+ { CTL_INT, NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
+ {}
+};
+
+static const struct bin_table bin_net_ipv6_table[] = {
+ { CTL_DIR, NET_IPV6_CONF, "conf", bin_net_ipv6_conf_table },
+ { CTL_DIR, NET_IPV6_NEIGH, "neigh", bin_net_neigh_table },
+ { CTL_DIR, NET_IPV6_ROUTE, "route", bin_net_ipv6_route_table },
+ { CTL_DIR, NET_IPV6_ICMP, "icmp", bin_net_ipv6_icmp_table },
+ { CTL_INT, NET_IPV6_BINDV6ONLY, "bindv6only" },
+ { CTL_INT, NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
+ { CTL_INT, NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
+ { CTL_INT, NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
+ { CTL_INT, NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
+ { CTL_INT, NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
+ { CTL_INT, 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
+ {}
+};
+
+static const struct bin_table bin_net_x25_table[] = {
+ { CTL_INT, NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
+ { CTL_INT, NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
+ { CTL_INT, NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
+ { CTL_INT, NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
+ { CTL_INT, NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
+ { CTL_INT, NET_X25_FORWARD, "x25_forward" },
+ {}
+};
+
+static const struct bin_table bin_net_tr_table[] = {
+ { CTL_INT, NET_TR_RIF_TIMEOUT, "rif_timeout" },
+ {}
+};
+
+
+static const struct bin_table bin_net_decnet_conf_vars[] = {
+ { CTL_INT, NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
+ { CTL_INT, NET_DECNET_CONF_DEV_PRIORITY, "priority" },
+ { CTL_INT, NET_DECNET_CONF_DEV_T2, "t2" },
+ { CTL_INT, NET_DECNET_CONF_DEV_T3, "t3" },
+ {}
+};
+
+static const struct bin_table bin_net_decnet_conf[] = {
+ { CTL_DIR, NET_DECNET_CONF_ETHER, "ethernet", bin_net_decnet_conf_vars },
+ { CTL_DIR, NET_DECNET_CONF_GRE, "ipgre", bin_net_decnet_conf_vars },
+ { CTL_DIR, NET_DECNET_CONF_X25, "x25", bin_net_decnet_conf_vars },
+ { CTL_DIR, NET_DECNET_CONF_PPP, "ppp", bin_net_decnet_conf_vars },
+ { CTL_DIR, NET_DECNET_CONF_DDCMP, "ddcmp", bin_net_decnet_conf_vars },
+ { CTL_DIR, NET_DECNET_CONF_LOOPBACK, "loopback", bin_net_decnet_conf_vars },
+ { CTL_DIR, 0, NULL, bin_net_decnet_conf_vars },
+ {}
+};
+
+static const struct bin_table bin_net_decnet_table[] = {
+ { CTL_DIR, NET_DECNET_CONF, "conf", bin_net_decnet_conf },
+ { CTL_DNADR, NET_DECNET_NODE_ADDRESS, "node_address" },
+ { CTL_STR, NET_DECNET_NODE_NAME, "node_name" },
+ { CTL_STR, NET_DECNET_DEFAULT_DEVICE, "default_device" },
+ { CTL_INT, NET_DECNET_TIME_WAIT, "time_wait" },
+ { CTL_INT, NET_DECNET_DN_COUNT, "dn_count" },
+ { CTL_INT, NET_DECNET_DI_COUNT, "di_count" },
+ { CTL_INT, NET_DECNET_DR_COUNT, "dr_count" },
+ { CTL_INT, NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
+ { CTL_INT, NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
+ { CTL_INT, NET_DECNET_MEM, "decnet_mem" },
+ { CTL_INT, NET_DECNET_RMEM, "decnet_rmem" },
+ { CTL_INT, NET_DECNET_WMEM, "decnet_wmem" },
+ { CTL_INT, NET_DECNET_DEBUG_LEVEL, "debug" },
+ {}
+};
+
+static const struct bin_table bin_net_sctp_table[] = {
+ { CTL_INT, NET_SCTP_RTO_INITIAL, "rto_initial" },
+ { CTL_INT, NET_SCTP_RTO_MIN, "rto_min" },
+ { CTL_INT, NET_SCTP_RTO_MAX, "rto_max" },
+ { CTL_INT, NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
+ { CTL_INT, NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
+ { CTL_INT, NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
+ { CTL_INT, NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
+ { CTL_INT, NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
+ { CTL_INT, NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
+ { CTL_INT, NET_SCTP_HB_INTERVAL, "hb_interval" },
+ { CTL_INT, NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
+ { CTL_INT, NET_SCTP_MAX_BURST, "max_burst" },
+ { CTL_INT, NET_SCTP_ADDIP_ENABLE, "addip_enable" },
+ { CTL_INT, NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
+ { CTL_INT, NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
+ { CTL_INT, NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
+ { CTL_INT, NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
+ {}
+};
+
+static const struct bin_table bin_net_llc_llc2_timeout_table[] = {
+ { CTL_INT, NET_LLC2_ACK_TIMEOUT, "ack" },
+ { CTL_INT, NET_LLC2_P_TIMEOUT, "p" },
+ { CTL_INT, NET_LLC2_REJ_TIMEOUT, "rej" },
+ { CTL_INT, NET_LLC2_BUSY_TIMEOUT, "busy" },
+ {}
+};
+
+static const struct bin_table bin_net_llc_station_table[] = {
+ { CTL_INT, NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
+ {}
+};
+
+static const struct bin_table bin_net_llc_llc2_table[] = {
+ { CTL_DIR, NET_LLC2, "timeout", bin_net_llc_llc2_timeout_table },
+ {}
+};
+
+static const struct bin_table bin_net_llc_table[] = {
+ { CTL_DIR, NET_LLC2, "llc2", bin_net_llc_llc2_table },
+ { CTL_DIR, NET_LLC_STATION, "station", bin_net_llc_station_table },
+ {}
+};
+
+static const struct bin_table bin_net_netfilter_table[] = {
+ { CTL_INT, NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "nf_conntrack_tcp_timeout_syn_sent" no longer used */
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "nf_conntrack_tcp_timeout_syn_recv" no longer used */
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "nf_conntrack_tcp_timeout_established" no longer used */
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "nf_conntrack_tcp_timeout_fin_wait" no longer used */
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "nf_conntrack_tcp_timeout_close_wait" no longer used */
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "nf_conntrack_tcp_timeout_last_ack" no longer used */
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "nf_conntrack_tcp_timeout_time_wait" no longer used */
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "nf_conntrack_tcp_timeout_close" no longer used */
+ /* NET_NF_CONNTRACK_UDP_TIMEOUT "nf_conntrack_udp_timeout" no longer used */
+ /* NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM "nf_conntrack_udp_timeout_stream" no longer used */
+ /* NET_NF_CONNTRACK_ICMP_TIMEOUT "nf_conntrack_icmp_timeout" no longer used */
+ /* NET_NF_CONNTRACK_GENERIC_TIMEOUT "nf_conntrack_generic_timeout" no longer used */
+ { CTL_INT, NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
+ { CTL_INT, NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
+ /* NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "nf_conntrack_tcp_timeout_max_retrans" no longer used */
+ { CTL_INT, NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
+ { CTL_INT, NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
+ { CTL_INT, NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
+ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "nf_conntrack_sctp_timeout_closed" no longer used */
+ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "nf_conntrack_sctp_timeout_cookie_wait" no longer used */
+ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "nf_conntrack_sctp_timeout_cookie_echoed" no longer used */
+ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "nf_conntrack_sctp_timeout_established" no longer used */
+ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "nf_conntrack_sctp_timeout_shutdown_sent" no longer used */
+ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "nf_conntrack_sctp_timeout_shutdown_recd" no longer used */
+ /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "nf_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */
+ { CTL_INT, NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
+ /* NET_NF_CONNTRACK_ICMPV6_TIMEOUT "nf_conntrack_icmpv6_timeout" no longer used */
+ /* NET_NF_CONNTRACK_FRAG6_TIMEOUT "nf_conntrack_frag6_timeout" no longer used */
+ { CTL_INT, NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
+ { CTL_INT, NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
+ { CTL_INT, NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
+
+ {}
+};
+
+static const struct bin_table bin_net_irda_table[] = {
+ { CTL_INT, NET_IRDA_DISCOVERY, "discovery" },
+ { CTL_STR, NET_IRDA_DEVNAME, "devname" },
+ { CTL_INT, NET_IRDA_DEBUG, "debug" },
+ { CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" },
+ { CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
+ { CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
+ { CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
+ { CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
+ { CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
+ { CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
+ { CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
+ { CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
+ { CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
+ { CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
+ {}
+};
+
+static const struct bin_table bin_net_table[] = {
+ { CTL_DIR, NET_CORE, "core", bin_net_core_table },
+ /* NET_ETHER not used */
+ /* NET_802 not used */
+ { CTL_DIR, NET_UNIX, "unix", bin_net_unix_table },
+ { CTL_DIR, NET_IPV4, "ipv4", bin_net_ipv4_table },
+ { CTL_DIR, NET_IPX, "ipx", bin_net_ipx_table },
+ { CTL_DIR, NET_ATALK, "appletalk", bin_net_atalk_table },
+ { CTL_DIR, NET_NETROM, "netrom", bin_net_netrom_table },
+ { CTL_DIR, NET_AX25, "ax25", bin_net_ax25_table },
+ /* NET_BRIDGE "bridge" no longer used */
+ { CTL_DIR, NET_ROSE, "rose", bin_net_rose_table },
+ { CTL_DIR, NET_IPV6, "ipv6", bin_net_ipv6_table },
+ { CTL_DIR, NET_X25, "x25", bin_net_x25_table },
+ { CTL_DIR, NET_TR, "token-ring", bin_net_tr_table },
+ { CTL_DIR, NET_DECNET, "decnet", bin_net_decnet_table },
+ /* NET_ECONET not used */
+ { CTL_DIR, NET_SCTP, "sctp", bin_net_sctp_table },
+ { CTL_DIR, NET_LLC, "llc", bin_net_llc_table },
+ { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table },
+ /* NET_DCCP "dccp" no longer used */
+ { CTL_DIR, NET_IRDA, "irda", bin_net_irda_table },
+ { CTL_INT, 2089, "nf_conntrack_max" },
+ {}
+};
+
+static const struct bin_table bin_fs_quota_table[] = {
+ { CTL_INT, FS_DQ_LOOKUPS, "lookups" },
+ { CTL_INT, FS_DQ_DROPS, "drops" },
+ { CTL_INT, FS_DQ_READS, "reads" },
+ { CTL_INT, FS_DQ_WRITES, "writes" },
+ { CTL_INT, FS_DQ_CACHE_HITS, "cache_hits" },
+ { CTL_INT, FS_DQ_ALLOCATED, "allocated_dquots" },
+ { CTL_INT, FS_DQ_FREE, "free_dquots" },
+ { CTL_INT, FS_DQ_SYNCS, "syncs" },
+ { CTL_INT, FS_DQ_WARNINGS, "warnings" },
+ {}
+};
+
+static const struct bin_table bin_fs_xfs_table[] = {
+ { CTL_INT, XFS_SGID_INHERIT, "irix_sgid_inherit" },
+ { CTL_INT, XFS_SYMLINK_MODE, "irix_symlink_mode" },
+ { CTL_INT, XFS_PANIC_MASK, "panic_mask" },
+
+ { CTL_INT, XFS_ERRLEVEL, "error_level" },
+ { CTL_INT, XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
+ { CTL_INT, XFS_INHERIT_SYNC, "inherit_sync" },
+ { CTL_INT, XFS_INHERIT_NODUMP, "inherit_nodump" },
+ { CTL_INT, XFS_INHERIT_NOATIME, "inherit_noatime" },
+ { CTL_INT, XFS_BUF_TIMER, "xfsbufd_centisecs" },
+ { CTL_INT, XFS_BUF_AGE, "age_buffer_centisecs" },
+ { CTL_INT, XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
+ { CTL_INT, XFS_ROTORSTEP, "rotorstep" },
+ { CTL_INT, XFS_INHERIT_NODFRG, "inherit_nodefrag" },
+ { CTL_INT, XFS_FILESTREAM_TIMER, "filestream_centisecs" },
+ { CTL_INT, XFS_STATS_CLEAR, "stats_clear" },
+ {}
+};
+
+static const struct bin_table bin_fs_ocfs2_nm_table[] = {
+ { CTL_STR, 1, "hb_ctl_path" },
+ {}
+};
+
+static const struct bin_table bin_fs_ocfs2_table[] = {
+ { CTL_DIR, 1, "nm", bin_fs_ocfs2_nm_table },
+ {}
+};
+
+static const struct bin_table bin_inotify_table[] = {
+ { CTL_INT, INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
+ { CTL_INT, INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
+ { CTL_INT, INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
+ {}
+};
+
+static const struct bin_table bin_fs_table[] = {
+ { CTL_INT, FS_NRINODE, "inode-nr" },
+ { CTL_INT, FS_STATINODE, "inode-state" },
+ /* FS_MAXINODE unused */
+ /* FS_NRDQUOT unused */
+ /* FS_MAXDQUOT unused */
+ /* FS_NRFILE "file-nr" no longer used */
+ { CTL_INT, FS_MAXFILE, "file-max" },
+ { CTL_INT, FS_DENTRY, "dentry-state" },
+ /* FS_NRSUPER unused */
+ /* FS_MAXUPSER unused */
+ { CTL_INT, FS_OVERFLOWUID, "overflowuid" },
+ { CTL_INT, FS_OVERFLOWGID, "overflowgid" },
+ { CTL_INT, FS_LEASES, "leases-enable" },
+ { CTL_INT, FS_DIR_NOTIFY, "dir-notify-enable" },
+ { CTL_INT, FS_LEASE_TIME, "lease-break-time" },
+ { CTL_DIR, FS_DQSTATS, "quota", bin_fs_quota_table },
+ { CTL_DIR, FS_XFS, "xfs", bin_fs_xfs_table },
+ { CTL_ULONG, FS_AIO_NR, "aio-nr" },
+ { CTL_ULONG, FS_AIO_MAX_NR, "aio-max-nr" },
+ { CTL_DIR, FS_INOTIFY, "inotify", bin_inotify_table },
+ { CTL_DIR, FS_OCFS2, "ocfs2", bin_fs_ocfs2_table },
+ { CTL_INT, KERN_SETUID_DUMPABLE, "suid_dumpable" },
+ {}
+};
+
+static const struct bin_table bin_ipmi_table[] = {
+ { CTL_INT, DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
+ {}
+};
+
+static const struct bin_table bin_mac_hid_files[] = {
+ /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
+ /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
+ { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
+ { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
+ { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
+ /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
+ {}
+};
+
+static const struct bin_table bin_raid_table[] = {
+ { CTL_INT, DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
+ { CTL_INT, DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
+ {}
+};
+
+static const struct bin_table bin_scsi_table[] = {
+ { CTL_INT, DEV_SCSI_LOGGING_LEVEL, "logging_level" },
+ {}
+};
+
+static const struct bin_table bin_dev_table[] = {
+ /* DEV_CDROM "cdrom" no longer used */
+ /* DEV_HWMON unused */
+ /* DEV_PARPORT "parport" no longer used */
+ { CTL_DIR, DEV_RAID, "raid", bin_raid_table },
+ { CTL_DIR, DEV_MAC_HID, "mac_hid", bin_mac_hid_files },
+ { CTL_DIR, DEV_SCSI, "scsi", bin_scsi_table },
+ { CTL_DIR, DEV_IPMI, "ipmi", bin_ipmi_table },
+ {}
+};
+
+static const struct bin_table bin_bus_isa_table[] = {
+ { CTL_INT, BUS_ISA_MEM_BASE, "membase" },
+ { CTL_INT, BUS_ISA_PORT_BASE, "portbase" },
+ { CTL_INT, BUS_ISA_PORT_SHIFT, "portshift" },
+ {}
+};
+
+static const struct bin_table bin_bus_table[] = {
+ { CTL_DIR, CTL_BUS_ISA, "isa", bin_bus_isa_table },
+ {}
+};
+
+
+static const struct bin_table bin_s390dbf_table[] = {
+ { CTL_INT, 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
+ { CTL_INT, 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
+ {}
+};
+
+static const struct bin_table bin_sunrpc_table[] = {
+ /* CTL_RPCDEBUG "rpc_debug" no longer used */
+ /* CTL_NFSDEBUG "nfs_debug" no longer used */
+ /* CTL_NFSDDEBUG "nfsd_debug" no longer used */
+ /* CTL_NLMDEBUG "nlm_debug" no longer used */
+
+ { CTL_INT, CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
+ { CTL_INT, CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
+ { CTL_INT, CTL_MIN_RESVPORT, "min_resvport" },
+ { CTL_INT, CTL_MAX_RESVPORT, "max_resvport" },
+ {}
+};
+
+static const struct bin_table bin_pm_table[] = {
+ /* frv specific */
+ /* 1 == CTL_PM_SUSPEND "suspend" no longer used" */
+ { CTL_INT, 2 /* CTL_PM_CMODE */, "cmode" },
+ { CTL_INT, 3 /* CTL_PM_P0 */, "p0" },
+ { CTL_INT, 4 /* CTL_PM_CM */, "cm" },
+ {}
+};
+
+static const struct bin_table bin_root_table[] = {
+ { CTL_DIR, CTL_KERN, "kernel", bin_kern_table },
+ { CTL_DIR, CTL_VM, "vm", bin_vm_table },
+ { CTL_DIR, CTL_NET, "net", bin_net_table },
+ /* CTL_PROC not used */
+ { CTL_DIR, CTL_FS, "fs", bin_fs_table },
+ /* CTL_DEBUG "debug" no longer used */
+ { CTL_DIR, CTL_DEV, "dev", bin_dev_table },
+ { CTL_DIR, CTL_BUS, "bus", bin_bus_table },
+ { CTL_DIR, CTL_ABI, "abi" },
+ /* CTL_CPU not used */
+ /* CTL_ARLAN "arlan" no longer used */
+ { CTL_DIR, CTL_S390DBF, "s390dbf", bin_s390dbf_table },
+ { CTL_DIR, CTL_SUNRPC, "sunrpc", bin_sunrpc_table },
+ { CTL_DIR, CTL_PM, "pm", bin_pm_table },
+ {}
+};
+
+static ssize_t bin_dir(struct file *file,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ return -ENOTDIR;
+}
+
+
+static ssize_t bin_string(struct file *file,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ ssize_t result, copied = 0;
+
+ if (oldval && oldlen) {
+ char __user *lastp;
+ loff_t pos = 0;
+ int ch;
+
+ result = vfs_read(file, oldval, oldlen, &pos);
+ if (result < 0)
+ goto out;
+
+ copied = result;
+ lastp = oldval + copied - 1;
+
+ result = -EFAULT;
+ if (get_user(ch, lastp))
+ goto out;
+
+ /* Trim off the trailing newline */
+ if (ch == '\n') {
+ result = -EFAULT;
+ if (put_user('\0', lastp))
+ goto out;
+ copied -= 1;
+ }
+ }
+
+ if (newval && newlen) {
+ loff_t pos = 0;
+
+ result = vfs_write(file, newval, newlen, &pos);
+ if (result < 0)
+ goto out;
+ }
+
+ result = copied;
+out:
+ return result;
+}
+
+static ssize_t bin_intvec(struct file *file,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ mm_segment_t old_fs = get_fs();
+ ssize_t copied = 0;
+ char *buffer;
+ ssize_t result;
+
+ result = -ENOMEM;
+ buffer = kmalloc(BUFSZ, GFP_KERNEL);
+ if (!buffer)
+ goto out;
+
+ if (oldval && oldlen) {
+ unsigned __user *vec = oldval;
+ size_t length = oldlen / sizeof(*vec);
+ loff_t pos = 0;
+ char *str, *end;
+ int i;
+
+ set_fs(KERNEL_DS);
+ result = vfs_read(file, buffer, BUFSZ - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+
+ str = buffer;
+ end = str + result;
+ *end++ = '\0';
+ for (i = 0; i < length; i++) {
+ unsigned long value;
+
+ value = simple_strtoul(str, &str, 10);
+ while (isspace(*str))
+ str++;
+
+ result = -EFAULT;
+ if (put_user(value, vec + i))
+ goto out_kfree;
+
+ copied += sizeof(*vec);
+ if (!isdigit(*str))
+ break;
+ }
+ }
+
+ if (newval && newlen) {
+ unsigned __user *vec = newval;
+ size_t length = newlen / sizeof(*vec);
+ loff_t pos = 0;
+ char *str, *end;
+ int i;
+
+ str = buffer;
+ end = str + BUFSZ;
+ for (i = 0; i < length; i++) {
+ unsigned long value;
+
+ result = -EFAULT;
+ if (get_user(value, vec + i))
+ goto out_kfree;
+
+ str += snprintf(str, end - str, "%lu\t", value);
+ }
+
+ set_fs(KERNEL_DS);
+ result = vfs_write(file, buffer, str - buffer, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+ }
+ result = copied;
+out_kfree:
+ kfree(buffer);
+out:
+ return result;
+}
+
+static ssize_t bin_ulongvec(struct file *file,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ mm_segment_t old_fs = get_fs();
+ ssize_t copied = 0;
+ char *buffer;
+ ssize_t result;
+
+ result = -ENOMEM;
+ buffer = kmalloc(BUFSZ, GFP_KERNEL);
+ if (!buffer)
+ goto out;
+
+ if (oldval && oldlen) {
+ unsigned long __user *vec = oldval;
+ size_t length = oldlen / sizeof(*vec);
+ loff_t pos = 0;
+ char *str, *end;
+ int i;
+
+ set_fs(KERNEL_DS);
+ result = vfs_read(file, buffer, BUFSZ - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+
+ str = buffer;
+ end = str + result;
+ *end++ = '\0';
+ for (i = 0; i < length; i++) {
+ unsigned long value;
+
+ value = simple_strtoul(str, &str, 10);
+ while (isspace(*str))
+ str++;
+
+ result = -EFAULT;
+ if (put_user(value, vec + i))
+ goto out_kfree;
+
+ copied += sizeof(*vec);
+ if (!isdigit(*str))
+ break;
+ }
+ }
+
+ if (newval && newlen) {
+ unsigned long __user *vec = newval;
+ size_t length = newlen / sizeof(*vec);
+ loff_t pos = 0;
+ char *str, *end;
+ int i;
+
+ str = buffer;
+ end = str + BUFSZ;
+ for (i = 0; i < length; i++) {
+ unsigned long value;
+
+ result = -EFAULT;
+ if (get_user(value, vec + i))
+ goto out_kfree;
+
+ str += snprintf(str, end - str, "%lu\t", value);
+ }
+
+ set_fs(KERNEL_DS);
+ result = vfs_write(file, buffer, str - buffer, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+ }
+ result = copied;
+out_kfree:
+ kfree(buffer);
+out:
+ return result;
+}
+
+static unsigned hex_value(int ch)
+{
+ return isdigit(ch) ? ch - '0' : ((ch | 0x20) - 'a') + 10;
+}
+
+static ssize_t bin_uuid(struct file *file,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ mm_segment_t old_fs = get_fs();
+ ssize_t result, copied = 0;
+
+ /* Only supports reads */
+ if (oldval && oldlen) {
+ loff_t pos = 0;
+ char buf[40], *str = buf;
+ unsigned char uuid[16];
+ int i;
+
+ set_fs(KERNEL_DS);
+ result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+
+ buf[result] = '\0';
+
+ /* Convert the uuid to from a string to binary */
+ for (i = 0; i < 16; i++) {
+ result = -EIO;
+ if (!isxdigit(str[0]) || !isxdigit(str[1]))
+ goto out;
+
+ uuid[i] = (hex_value(str[0]) << 4) | hex_value(str[1]);
+ str += 2;
+ if (*str == '-')
+ str++;
+ }
+
+ if (oldlen > 16)
+ oldlen = 16;
+
+ result = -EFAULT;
+ if (copy_to_user(oldval, uuid, oldlen))
+ goto out;
+
+ copied = oldlen;
+ }
+ result = copied;
+out:
+ return result;
+}
+
+static ssize_t bin_dn_node_address(struct file *file,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ mm_segment_t old_fs = get_fs();
+ ssize_t result, copied = 0;
+
+ if (oldval && oldlen) {
+ loff_t pos = 0;
+ char buf[15], *nodep;
+ unsigned long area, node;
+ __le16 dnaddr;
+
+ set_fs(KERNEL_DS);
+ result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+
+ buf[result] = '\0';
+
+ /* Convert the decnet addresss to binary */
+ result = -EIO;
+ nodep = strchr(buf, '.') + 1;
+ if (!nodep)
+ goto out;
+
+ area = simple_strtoul(buf, NULL, 10);
+ node = simple_strtoul(nodep, NULL, 10);
+
+ result = -EIO;
+ if ((area > 63)||(node > 1023))
+ goto out;
+
+ dnaddr = cpu_to_le16((area << 10) | node);
+
+ result = -EFAULT;
+ if (put_user(dnaddr, (__le16 __user *)oldval))
+ goto out;
+
+ copied = sizeof(dnaddr);
+ }
+
+ if (newval && newlen) {
+ loff_t pos = 0;
+ __le16 dnaddr;
+ char buf[15];
+ int len;
+
+ result = -EINVAL;
+ if (newlen != sizeof(dnaddr))
+ goto out;
+
+ result = -EFAULT;
+ if (get_user(dnaddr, (__le16 __user *)newval))
+ goto out;
+
+ len = snprintf(buf, sizeof(buf), "%hu.%hu",
+ le16_to_cpu(dnaddr) >> 10,
+ le16_to_cpu(dnaddr) & 0x3ff);
+
+ set_fs(KERNEL_DS);
+ result = vfs_write(file, buf, len, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+ }
+
+ result = copied;
+out:
+ return result;
+}
+
+static const struct bin_table *get_sysctl(const int *name, int nlen, char *path)
+{
+ const struct bin_table *table = &bin_root_table[0];
+ int ctl_name;
+
+ /* The binary sysctl tables have a small maximum depth so
+ * there is no danger of overflowing our path as it PATH_MAX
+ * bytes long.
+ */
+ memcpy(path, "sys/", 4);
+ path += 4;
+
+repeat:
+ if (!nlen)
+ return ERR_PTR(-ENOTDIR);
+ ctl_name = *name;
+ name++;
+ nlen--;
+ for ( ; table->convert; table++) {
+ int len = 0;
+
+ /*
+ * For a wild card entry map from ifindex to network
+ * device name.
+ */
+ if (!table->ctl_name) {
+#ifdef CONFIG_NET
+ struct net *net = current->nsproxy->net_ns;
+ struct net_device *dev;
+ dev = dev_get_by_index(net, ctl_name);
+ if (dev) {
+ len = strlen(dev->name);
+ memcpy(path, dev->name, len);
+ dev_put(dev);
+ }
+#endif
+ /* Use the well known sysctl number to proc name mapping */
+ } else if (ctl_name == table->ctl_name) {
+ len = strlen(table->procname);
+ memcpy(path, table->procname, len);
+ }
+ if (len) {
+ path += len;
+ if (table->child) {
+ *path++ = '/';
+ table = table->child;
+ goto repeat;
+ }
+ *path = '\0';
+ return table;
+ }
+ }
+ return ERR_PTR(-ENOTDIR);
+}
+
+static char *sysctl_getname(const int *name, int nlen, const struct bin_table **tablep)
+{
+ char *tmp, *result;
+
+ result = ERR_PTR(-ENOMEM);
+ tmp = __getname();
+ if (tmp) {
+ const struct bin_table *table = get_sysctl(name, nlen, tmp);
+ result = tmp;
+ *tablep = table;
+ if (IS_ERR(table)) {
+ __putname(tmp);
+ result = ERR_CAST(table);
+ }
+ }
+ return result;
+}
+
+static ssize_t binary_sysctl(const int *name, int nlen,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ const struct bin_table *table = NULL;
+ struct nameidata nd;
+ struct vfsmount *mnt;
+ struct file *file;
+ ssize_t result;
+ char *pathname;
+ int flags;
+ int acc_mode, fmode;
+
+ pathname = sysctl_getname(name, nlen, &table);
+ result = PTR_ERR(pathname);
+ if (IS_ERR(pathname))
+ goto out;
+
+ /* How should the sysctl be accessed? */
+ if (oldval && oldlen && newval && newlen) {
+ flags = O_RDWR;
+ acc_mode = MAY_READ | MAY_WRITE;
+ fmode = FMODE_READ | FMODE_WRITE;
+ } else if (newval && newlen) {
+ flags = O_WRONLY;
+ acc_mode = MAY_WRITE;
+ fmode = FMODE_WRITE;
+ } else if (oldval && oldlen) {
+ flags = O_RDONLY;
+ acc_mode = MAY_READ;
+ fmode = FMODE_READ;
+ } else {
+ result = 0;
+ goto out_putname;
+ }
+
+ mnt = current->nsproxy->pid_ns->proc_mnt;
+ result = vfs_path_lookup(mnt->mnt_root, mnt, pathname, 0, &nd);
+ if (result)
+ goto out_putname;
+
+ result = may_open(&nd.path, acc_mode, fmode);
+ if (result)
+ goto out_putpath;
+
+ file = dentry_open(nd.path.dentry, nd.path.mnt, flags, current_cred());
+ result = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out_putname;
+
+ result = table->convert(file, oldval, oldlen, newval, newlen);
+
+ fput(file);
+out_putname:
+ putname(pathname);
+out:
+ return result;
+
+out_putpath:
+ path_put(&nd.path);
+ goto out_putname;
+}
+
+
+#else /* CONFIG_SYSCTL_SYSCALL */
+
+static ssize_t binary_sysctl(const int *name, int nlen,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_SYSCTL_SYSCALL */
+
+
+static void deprecated_sysctl_warning(const int *name, int nlen)
+{
+ int i;
+
+ if (printk_ratelimit()) {
+ printk(KERN_INFO
+ "warning: process `%s' used the deprecated sysctl "
+ "system call with ", current->comm);
+ for (i = 0; i < nlen; i++)
+ printk("%d.", name[i]);
+ printk("\n");
+ }
+ return;
+}
+
+static ssize_t do_sysctl(int __user *args_name, int nlen,
+ void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
+{
+ int name[CTL_MAXNAME];
+ int i;
+
+ /* Check args->nlen. */
+ if (nlen < 0 || nlen > CTL_MAXNAME)
+ return -ENOTDIR;
+ /* Read in the sysctl name for simplicity */
+ for (i = 0; i < nlen; i++)
+ if (get_user(name[i], args_name + i))
+ return -EFAULT;
+
+ deprecated_sysctl_warning(name, nlen);
+
+ return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen);
+}
+
+SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
+{
+ struct __sysctl_args tmp;
+ size_t oldlen = 0;
+ ssize_t result;
+
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ return -EFAULT;
+
+ if (tmp.oldval && !tmp.oldlenp)
+ return -EFAULT;
+
+ if (tmp.oldlenp && get_user(oldlen, tmp.oldlenp))
+ return -EFAULT;
+
+ result = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, oldlen,
+ tmp.newval, tmp.newlen);
+
+ if (result >= 0) {
+ oldlen = result;
+ result = 0;
+ }
+
+ if (tmp.oldlenp && put_user(oldlen, tmp.oldlenp))
+ return -EFAULT;
+
+ return result;
+}
+
+
+#ifdef CONFIG_COMPAT
+#include <asm/compat.h>
+
+struct compat_sysctl_args {
+ compat_uptr_t name;
+ int nlen;
+ compat_uptr_t oldval;
+ compat_uptr_t oldlenp;
+ compat_uptr_t newval;
+ compat_size_t newlen;
+ compat_ulong_t __unused[4];
+};
+
+asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args)
+{
+ struct compat_sysctl_args tmp;
+ compat_size_t __user *compat_oldlenp;
+ size_t oldlen = 0;
+ ssize_t result;
+
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ return -EFAULT;
+
+ if (tmp.oldval && !tmp.oldlenp)
+ return -EFAULT;
+
+ compat_oldlenp = compat_ptr(tmp.oldlenp);
+ if (compat_oldlenp && get_user(oldlen, compat_oldlenp))
+ return -EFAULT;
+
+ result = do_sysctl(compat_ptr(tmp.name), tmp.nlen,
+ compat_ptr(tmp.oldval), oldlen,
+ compat_ptr(tmp.newval), tmp.newlen);
+
+ if (result >= 0) {
+ oldlen = result;
+ result = 0;
+ }
+
+ if (compat_oldlenp && put_user(oldlen, compat_oldlenp))
+ return -EFAULT;
+
+ return result;
+}
+
+#endif /* CONFIG_COMPAT */
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index b6e7aaea4604..04cdcf72c827 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -5,1239 +5,6 @@
#include <linux/string.h>
#include <net/ip_vs.h>
-struct trans_ctl_table {
- int ctl_name;
- const char *procname;
- const struct trans_ctl_table *child;
-};
-
-static const struct trans_ctl_table trans_random_table[] = {
- { RANDOM_POOLSIZE, "poolsize" },
- { RANDOM_ENTROPY_COUNT, "entropy_avail" },
- { RANDOM_READ_THRESH, "read_wakeup_threshold" },
- { RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
- { RANDOM_BOOT_ID, "boot_id" },
- { RANDOM_UUID, "uuid" },
- {}
-};
-
-static const struct trans_ctl_table trans_pty_table[] = {
- { PTY_MAX, "max" },
- { PTY_NR, "nr" },
- {}
-};
-
-static const struct trans_ctl_table trans_kern_table[] = {
- { KERN_OSTYPE, "ostype" },
- { KERN_OSRELEASE, "osrelease" },
- /* KERN_OSREV not used */
- { KERN_VERSION, "version" },
- /* KERN_SECUREMASK not used */
- /* KERN_PROF not used */
- { KERN_NODENAME, "hostname" },
- { KERN_DOMAINNAME, "domainname" },
-
- { KERN_PANIC, "panic" },
- { KERN_REALROOTDEV, "real-root-dev" },
-
- { KERN_SPARC_REBOOT, "reboot-cmd" },
- { KERN_CTLALTDEL, "ctrl-alt-del" },
- { KERN_PRINTK, "printk" },
-
- /* KERN_NAMETRANS not used */
- /* KERN_PPC_HTABRECLAIM not used */
- /* KERN_PPC_ZEROPAGED not used */
- { KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
-
- { KERN_MODPROBE, "modprobe" },
- { KERN_SG_BIG_BUFF, "sg-big-buff" },
- { KERN_ACCT, "acct" },
- { KERN_PPC_L2CR, "l2cr" },
-
- /* KERN_RTSIGNR not used */
- /* KERN_RTSIGMAX not used */
-
- { KERN_SHMMAX, "shmmax" },
- { KERN_MSGMAX, "msgmax" },
- { KERN_MSGMNB, "msgmnb" },
- /* KERN_MSGPOOL not used*/
- { KERN_SYSRQ, "sysrq" },
- { KERN_MAX_THREADS, "threads-max" },
- { KERN_RANDOM, "random", trans_random_table },
- { KERN_SHMALL, "shmall" },
- { KERN_MSGMNI, "msgmni" },
- { KERN_SEM, "sem" },
- { KERN_SPARC_STOP_A, "stop-a" },
- { KERN_SHMMNI, "shmmni" },
-
- { KERN_OVERFLOWUID, "overflowuid" },
- { KERN_OVERFLOWGID, "overflowgid" },
-
- { KERN_HOTPLUG, "hotplug", },
- { KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
-
- { KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
- { KERN_CORE_USES_PID, "core_uses_pid" },
- { KERN_TAINTED, "tainted" },
- { KERN_CADPID, "cad_pid" },
- { KERN_PIDMAX, "pid_max" },
- { KERN_CORE_PATTERN, "core_pattern" },
- { KERN_PANIC_ON_OOPS, "panic_on_oops" },
- { KERN_HPPA_PWRSW, "soft-power" },
- { KERN_HPPA_UNALIGNED, "unaligned-trap" },
-
- { KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
- { KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
-
- { KERN_PTY, "pty", trans_pty_table },
- { KERN_NGROUPS_MAX, "ngroups_max" },
- { KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
- { KERN_HZ_TIMER, "hz_timer" },
- { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
- { KERN_BOOTLOADER_TYPE, "bootloader_type" },
- { KERN_RANDOMIZE, "randomize_va_space" },
-
- { KERN_SPIN_RETRY, "spin_retry" },
- { KERN_ACPI_VIDEO_FLAGS, "acpi_video_flags" },
- { KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
- { KERN_COMPAT_LOG, "compat-log" },
- { KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
- { KERN_NMI_WATCHDOG, "nmi_watchdog" },
- { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
- {}
-};
-
-static const struct trans_ctl_table trans_vm_table[] = {
- { VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
- { VM_PAGE_CLUSTER, "page-cluster" },
- { VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
- { VM_DIRTY_RATIO, "dirty_ratio" },
- { VM_DIRTY_WB_CS, "dirty_writeback_centisecs" },
- { VM_DIRTY_EXPIRE_CS, "dirty_expire_centisecs" },
- { VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
- { VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
- /* VM_PAGEBUF unused */
- { VM_HUGETLB_PAGES, "nr_hugepages" },
- { VM_SWAPPINESS, "swappiness" },
- { VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
- { VM_MIN_FREE_KBYTES, "min_free_kbytes" },
- { VM_MAX_MAP_COUNT, "max_map_count" },
- { VM_LAPTOP_MODE, "laptop_mode" },
- { VM_BLOCK_DUMP, "block_dump" },
- { VM_HUGETLB_GROUP, "hugetlb_shm_group" },
- { VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
- { VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
- /* VM_SWAP_TOKEN_TIMEOUT unused */
- { VM_DROP_PAGECACHE, "drop_caches" },
- { VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
- { VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
- { VM_MIN_UNMAPPED, "min_unmapped_ratio" },
- { VM_PANIC_ON_OOM, "panic_on_oom" },
- { VM_VDSO_ENABLED, "vdso_enabled" },
- { VM_MIN_SLAB, "min_slab_ratio" },
-
- {}
-};
-
-static const struct trans_ctl_table trans_net_core_table[] = {
- { NET_CORE_WMEM_MAX, "wmem_max" },
- { NET_CORE_RMEM_MAX, "rmem_max" },
- { NET_CORE_WMEM_DEFAULT, "wmem_default" },
- { NET_CORE_RMEM_DEFAULT, "rmem_default" },
- /* NET_CORE_DESTROY_DELAY unused */
- { NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
- /* NET_CORE_FASTROUTE unused */
- { NET_CORE_MSG_COST, "message_cost" },
- { NET_CORE_MSG_BURST, "message_burst" },
- { NET_CORE_OPTMEM_MAX, "optmem_max" },
- /* NET_CORE_HOT_LIST_LENGTH unused */
- /* NET_CORE_DIVERT_VERSION unused */
- /* NET_CORE_NO_CONG_THRESH unused */
- /* NET_CORE_NO_CONG unused */
- /* NET_CORE_LO_CONG unused */
- /* NET_CORE_MOD_CONG unused */
- { NET_CORE_DEV_WEIGHT, "dev_weight" },
- { NET_CORE_SOMAXCONN, "somaxconn" },
- { NET_CORE_BUDGET, "netdev_budget" },
- { NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
- { NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
- { NET_CORE_WARNINGS, "warnings" },
- {},
-};
-
-static const struct trans_ctl_table trans_net_unix_table[] = {
- /* NET_UNIX_DESTROY_DELAY unused */
- /* NET_UNIX_DELETE_DELAY unused */
- { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv4_route_table[] = {
- { NET_IPV4_ROUTE_FLUSH, "flush" },
- { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" },
- { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" },
- { NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
- { NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
- { NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
- { NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
- { NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
- { NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
- { NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
- { NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
- { NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
- { NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
- { NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
- { NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
- { NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
- { NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
- { NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
- { NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
- { NET_IPV4_CONF_FORWARDING, "forwarding" },
- { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
-
- { NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
- { NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
- { NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
- { NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
- { NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
- { NET_IPV4_CONF_RP_FILTER, "rp_filter" },
- { NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
- { NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
- { NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
- { NET_IPV4_CONF_TAG, "tag" },
- { NET_IPV4_CONF_ARPFILTER, "arp_filter" },
- { NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
- { NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
- { NET_IPV4_CONF_NOPOLICY, "disable_policy" },
- { NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
-
- { NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
- { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
- { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
- { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
- { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv4_conf_table[] = {
- { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table },
- { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table },
- { 0, NULL, trans_net_ipv4_conf_vars_table },
- {}
-};
-
-static const struct trans_ctl_table trans_net_neigh_vars_table[] = {
- { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
- { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
- { NET_NEIGH_APP_SOLICIT, "app_solicit" },
- { NET_NEIGH_RETRANS_TIME, "retrans_time" },
- { NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
- { NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
- { NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
- { NET_NEIGH_UNRES_QLEN, "unres_qlen" },
- { NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
- { NET_NEIGH_ANYCAST_DELAY, "anycast_delay" },
- { NET_NEIGH_PROXY_DELAY, "proxy_delay" },
- { NET_NEIGH_LOCKTIME, "locktime" },
- { NET_NEIGH_GC_INTERVAL, "gc_interval" },
- { NET_NEIGH_GC_THRESH1, "gc_thresh1" },
- { NET_NEIGH_GC_THRESH2, "gc_thresh2" },
- { NET_NEIGH_GC_THRESH3, "gc_thresh3" },
- { NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
- { NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_neigh_table[] = {
- { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table },
- { 0, NULL, trans_net_neigh_vars_table },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv4_netfilter_table[] = {
- { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
-
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "ip_conntrack_tcp_timeout_syn_recv" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "ip_conntrack_tcp_timeout_established" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "ip_conntrack_tcp_timeout_fin_wait" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "ip_conntrack_tcp_timeout_close_wait" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "ip_conntrack_tcp_timeout_last_ack" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "ip_conntrack_tcp_timeout_time_wait" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "ip_conntrack_tcp_timeout_close" },
-
- { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, "ip_conntrack_udp_timeout" },
- { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "ip_conntrack_udp_timeout_stream" },
- { NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, "ip_conntrack_icmp_timeout" },
- { NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, "ip_conntrack_generic_timeout" },
-
- { NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
- { NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
- { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "ip_conntrack_tcp_timeout_max_retrans" },
- { NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
- { NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
- { NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
-
- { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "ip_conntrack_sctp_timeout_closed" },
- { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "ip_conntrack_sctp_timeout_cookie_wait" },
- { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "ip_conntrack_sctp_timeout_cookie_echoed" },
- { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "ip_conntrack_sctp_timeout_established" },
- { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "ip_conntrack_sctp_timeout_shutdown_sent" },
- { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "ip_conntrack_sctp_timeout_shutdown_recd" },
- { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "ip_conntrack_sctp_timeout_shutdown_ack_sent" },
-
- { NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
- { NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv4_table[] = {
- { NET_IPV4_FORWARD, "ip_forward" },
- { NET_IPV4_DYNADDR, "ip_dynaddr" },
-
- { NET_IPV4_CONF, "conf", trans_net_ipv4_conf_table },
- { NET_IPV4_NEIGH, "neigh", trans_net_neigh_table },
- { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
- /* NET_IPV4_FIB_HASH unused */
- { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
-
- { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
- { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
- { NET_IPV4_TCP_SACK, "tcp_sack" },
- { NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
- { NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
- /* NET_IPV4_AUTOCONFIG unused */
- { NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
- { NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
- { NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
- { NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
- { NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
- /* NET_IPV4_TCP_MAX_KA_PROBES unused */
- { NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
- { NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
- { NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
- { NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
- { NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
- /* NET_IPV4_IP_MASQ_DEBUG unused */
- { NET_TCP_SYNCOOKIES, "tcp_syncookies" },
- { NET_TCP_STDURG, "tcp_stdurg" },
- { NET_TCP_RFC1337, "tcp_rfc1337" },
- /* NET_TCP_SYN_TAILDROP unused */
- { NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
- { NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
- { NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
- { NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
- /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
- /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
- /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
- /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
- /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
- { NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
- { NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
- { NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
- /* NET_IPV4_ALWAYS_DEFRAG unused */
- { NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
- { NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
- { NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
- { NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
- { NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
- { NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
- { NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
- { NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
- { NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
- { NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
- { NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
- { NET_TCP_FACK, "tcp_fack" },
- { NET_TCP_REORDERING, "tcp_reordering" },
- { NET_TCP_ECN, "tcp_ecn" },
- { NET_TCP_DSACK, "tcp_dsack" },
- { NET_TCP_MEM, "tcp_mem" },
- { NET_TCP_WMEM, "tcp_wmem" },
- { NET_TCP_RMEM, "tcp_rmem" },
- { NET_TCP_APP_WIN, "tcp_app_win" },
- { NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
- { NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
- { NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
- { NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
- { NET_TCP_TW_REUSE, "tcp_tw_reuse" },
- { NET_TCP_FRTO, "tcp_frto" },
- { NET_TCP_LOW_LATENCY, "tcp_low_latency" },
- { NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
- { NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
- { NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
- /* NET_TCP_DEFAULT_WIN_SCALE unused */
- { NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
- { NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
- /* NET_TCP_BIC_BETA unused */
- { NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
- { NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
- { NET_TCP_ABC, "tcp_abc" },
- { NET_IPV4_IPFRAG_MAX_DIST, "ipfrag_max_dist" },
- { NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
- { NET_TCP_BASE_MSS, "tcp_base_mss" },
- { NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
- { NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
- { NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
- { NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
- { NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
- { NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
- { NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
- { NET_TCP_AVAIL_CONG_CONTROL, "tcp_available_congestion_control" },
- { NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
- { NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
- { NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
- { 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipx_table[] = {
- { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
- /* NET_IPX_FORWARDING unused */
- {}
-};
-
-static const struct trans_ctl_table trans_net_atalk_table[] = {
- { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
- { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
- { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
- { NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
- {},
-};
-
-static const struct trans_ctl_table trans_net_netrom_table[] = {
- { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
- { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
- { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
- { NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
- { NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
- { NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
- { NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
- { NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
- { NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
- { NET_NETROM_ROUTING_CONTROL, "routing_control" },
- { NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
- { NET_NETROM_RESET, "reset" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ax25_param_table[] = {
- { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
- { NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
- { NET_AX25_BACKOFF_TYPE, "backoff_type" },
- { NET_AX25_CONNECT_MODE, "connect_mode" },
- { NET_AX25_STANDARD_WINDOW, "standard_window_size" },
- { NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
- { NET_AX25_T1_TIMEOUT, "t1_timeout" },
- { NET_AX25_T2_TIMEOUT, "t2_timeout" },
- { NET_AX25_T3_TIMEOUT, "t3_timeout" },
- { NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
- { NET_AX25_N2, "maximum_retry_count" },
- { NET_AX25_PACLEN, "maximum_packet_length" },
- { NET_AX25_PROTOCOL, "protocol" },
- { NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ax25_table[] = {
- { 0, NULL, trans_net_ax25_param_table },
- {}
-};
-
-static const struct trans_ctl_table trans_net_bridge_table[] = {
- { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
- { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
- { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" },
- { NET_BRIDGE_NF_FILTER_VLAN_TAGGED, "bridge-nf-filter-vlan-tagged" },
- { NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, "bridge-nf-filter-pppoe-tagged" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_rose_table[] = {
- { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
- { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
- { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
- { NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
- { NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
- { NET_ROSE_ROUTING_CONTROL, "routing_control" },
- { NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
- { NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
- { NET_ROSE_WINDOW_SIZE, "window_size" },
- { NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv6_conf_var_table[] = {
- { NET_IPV6_FORWARDING, "forwarding" },
- { NET_IPV6_HOP_LIMIT, "hop_limit" },
- { NET_IPV6_MTU, "mtu" },
- { NET_IPV6_ACCEPT_RA, "accept_ra" },
- { NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
- { NET_IPV6_AUTOCONF, "autoconf" },
- { NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
- { NET_IPV6_RTR_SOLICITS, "router_solicitations" },
- { NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
- { NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
- { NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
- { NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
- { NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
- { NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
- { NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
- { NET_IPV6_MAX_ADDRESSES, "max_addresses" },
- { NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
- { NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
- { NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
- { NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
- { NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
- { NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
- { NET_IPV6_PROXY_NDP, "proxy_ndp" },
- { NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv6_conf_table[] = {
- { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table },
- { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table },
- { 0, NULL, trans_net_ipv6_conf_var_table },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv6_route_table[] = {
- { NET_IPV6_ROUTE_FLUSH, "flush" },
- { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
- { NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
- { NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
- { NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
- { NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
- { NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
- { NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
- { NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
- { NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv6_icmp_table[] = {
- { NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_ipv6_table[] = {
- { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table },
- { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table },
- { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table },
- { NET_IPV6_ICMP, "icmp", trans_net_ipv6_icmp_table },
- { NET_IPV6_BINDV6ONLY, "bindv6only" },
- { NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
- { NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
- { NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
- { NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
- { NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
- { 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_x25_table[] = {
- { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
- { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
- { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
- { NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
- { NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
- { NET_X25_FORWARD, "x25_forward" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_tr_table[] = {
- { NET_TR_RIF_TIMEOUT, "rif_timeout" },
- {}
-};
-
-
-static const struct trans_ctl_table trans_net_decnet_conf_vars[] = {
- { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
- { NET_DECNET_CONF_DEV_PRIORITY, "priority" },
- { NET_DECNET_CONF_DEV_T2, "t2" },
- { NET_DECNET_CONF_DEV_T3, "t3" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_decnet_conf[] = {
- { 0, NULL, trans_net_decnet_conf_vars },
- {}
-};
-
-static const struct trans_ctl_table trans_net_decnet_table[] = {
- { NET_DECNET_CONF, "conf", trans_net_decnet_conf },
- { NET_DECNET_NODE_ADDRESS, "node_address" },
- { NET_DECNET_NODE_NAME, "node_name" },
- { NET_DECNET_DEFAULT_DEVICE, "default_device" },
- { NET_DECNET_TIME_WAIT, "time_wait" },
- { NET_DECNET_DN_COUNT, "dn_count" },
- { NET_DECNET_DI_COUNT, "di_count" },
- { NET_DECNET_DR_COUNT, "dr_count" },
- { NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
- { NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
- { NET_DECNET_MEM, "decnet_mem" },
- { NET_DECNET_RMEM, "decnet_rmem" },
- { NET_DECNET_WMEM, "decnet_wmem" },
- { NET_DECNET_DEBUG_LEVEL, "debug" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_sctp_table[] = {
- { NET_SCTP_RTO_INITIAL, "rto_initial" },
- { NET_SCTP_RTO_MIN, "rto_min" },
- { NET_SCTP_RTO_MAX, "rto_max" },
- { NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
- { NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
- { NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
- { NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
- { NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
- { NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
- { NET_SCTP_HB_INTERVAL, "hb_interval" },
- { NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
- { NET_SCTP_MAX_BURST, "max_burst" },
- { NET_SCTP_ADDIP_ENABLE, "addip_enable" },
- { NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
- { NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
- { NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
- { NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = {
- { NET_LLC2_ACK_TIMEOUT, "ack" },
- { NET_LLC2_P_TIMEOUT, "p" },
- { NET_LLC2_REJ_TIMEOUT, "rej" },
- { NET_LLC2_BUSY_TIMEOUT, "busy" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_llc_station_table[] = {
- { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_llc_llc2_table[] = {
- { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table },
- {}
-};
-
-static const struct trans_ctl_table trans_net_llc_table[] = {
- { NET_LLC2, "llc2", trans_net_llc_llc2_table },
- { NET_LLC_STATION, "station", trans_net_llc_station_table },
- {}
-};
-
-static const struct trans_ctl_table trans_net_netfilter_table[] = {
- { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "nf_conntrack_tcp_timeout_established" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "nf_conntrack_tcp_timeout_fin_wait" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "nf_conntrack_tcp_timeout_close_wait" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "nf_conntrack_tcp_timeout_last_ack" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "nf_conntrack_tcp_timeout_time_wait" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "nf_conntrack_tcp_timeout_close" },
- { NET_NF_CONNTRACK_UDP_TIMEOUT, "nf_conntrack_udp_timeout" },
- { NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "nf_conntrack_udp_timeout_stream" },
- { NET_NF_CONNTRACK_ICMP_TIMEOUT, "nf_conntrack_icmp_timeout" },
- { NET_NF_CONNTRACK_GENERIC_TIMEOUT, "nf_conntrack_generic_timeout" },
- { NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
- { NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
- { NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "nf_conntrack_tcp_timeout_max_retrans" },
- { NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
- { NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
- { NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
- { NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "nf_conntrack_sctp_timeout_closed" },
- { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "nf_conntrack_sctp_timeout_cookie_wait" },
- { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "nf_conntrack_sctp_timeout_cookie_echoed" },
- { NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "nf_conntrack_sctp_timeout_established" },
- { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "nf_conntrack_sctp_timeout_shutdown_sent" },
- { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "nf_conntrack_sctp_timeout_shutdown_recd" },
- { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "nf_conntrack_sctp_timeout_shutdown_ack_sent" },
- { NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
- { NET_NF_CONNTRACK_ICMPV6_TIMEOUT, "nf_conntrack_icmpv6_timeout" },
- { NET_NF_CONNTRACK_FRAG6_TIMEOUT, "nf_conntrack_frag6_timeout" },
- { NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
- { NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
- { NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
-
- {}
-};
-
-static const struct trans_ctl_table trans_net_dccp_table[] = {
- { NET_DCCP_DEFAULT, "default" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_irda_table[] = {
- { NET_IRDA_DISCOVERY, "discovery" },
- { NET_IRDA_DEVNAME, "devname" },
- { NET_IRDA_DEBUG, "debug" },
- { NET_IRDA_FAST_POLL, "fast_poll_increase" },
- { NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
- { NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
- { NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
- { NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
- { NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
- { NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
- { NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
- { NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
- { NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
- { NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
- {}
-};
-
-static const struct trans_ctl_table trans_net_table[] = {
- { NET_CORE, "core", trans_net_core_table },
- /* NET_ETHER not used */
- /* NET_802 not used */
- { NET_UNIX, "unix", trans_net_unix_table },
- { NET_IPV4, "ipv4", trans_net_ipv4_table },
- { NET_IPX, "ipx", trans_net_ipx_table },
- { NET_ATALK, "appletalk", trans_net_atalk_table },
- { NET_NETROM, "netrom", trans_net_netrom_table },
- { NET_AX25, "ax25", trans_net_ax25_table },
- { NET_BRIDGE, "bridge", trans_net_bridge_table },
- { NET_ROSE, "rose", trans_net_rose_table },
- { NET_IPV6, "ipv6", trans_net_ipv6_table },
- { NET_X25, "x25", trans_net_x25_table },
- { NET_TR, "token-ring", trans_net_tr_table },
- { NET_DECNET, "decnet", trans_net_decnet_table },
- /* NET_ECONET not used */
- { NET_SCTP, "sctp", trans_net_sctp_table },
- { NET_LLC, "llc", trans_net_llc_table },
- { NET_NETFILTER, "netfilter", trans_net_netfilter_table },
- { NET_DCCP, "dccp", trans_net_dccp_table },
- { NET_IRDA, "irda", trans_net_irda_table },
- { 2089, "nf_conntrack_max" },
- {}
-};
-
-static const struct trans_ctl_table trans_fs_quota_table[] = {
- { FS_DQ_LOOKUPS, "lookups" },
- { FS_DQ_DROPS, "drops" },
- { FS_DQ_READS, "reads" },
- { FS_DQ_WRITES, "writes" },
- { FS_DQ_CACHE_HITS, "cache_hits" },
- { FS_DQ_ALLOCATED, "allocated_dquots" },
- { FS_DQ_FREE, "free_dquots" },
- { FS_DQ_SYNCS, "syncs" },
- { FS_DQ_WARNINGS, "warnings" },
- {}
-};
-
-static const struct trans_ctl_table trans_fs_xfs_table[] = {
- { XFS_SGID_INHERIT, "irix_sgid_inherit" },
- { XFS_SYMLINK_MODE, "irix_symlink_mode" },
- { XFS_PANIC_MASK, "panic_mask" },
-
- { XFS_ERRLEVEL, "error_level" },
- { XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
- { XFS_INHERIT_SYNC, "inherit_sync" },
- { XFS_INHERIT_NODUMP, "inherit_nodump" },
- { XFS_INHERIT_NOATIME, "inherit_noatime" },
- { XFS_BUF_TIMER, "xfsbufd_centisecs" },
- { XFS_BUF_AGE, "age_buffer_centisecs" },
- { XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
- { XFS_ROTORSTEP, "rotorstep" },
- { XFS_INHERIT_NODFRG, "inherit_nodefrag" },
- { XFS_FILESTREAM_TIMER, "filestream_centisecs" },
- { XFS_STATS_CLEAR, "stats_clear" },
- {}
-};
-
-static const struct trans_ctl_table trans_fs_ocfs2_nm_table[] = {
- { 1, "hb_ctl_path" },
- {}
-};
-
-static const struct trans_ctl_table trans_fs_ocfs2_table[] = {
- { 1, "nm", trans_fs_ocfs2_nm_table },
- {}
-};
-
-static const struct trans_ctl_table trans_inotify_table[] = {
- { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
- { INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
- { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
- {}
-};
-
-static const struct trans_ctl_table trans_fs_table[] = {
- { FS_NRINODE, "inode-nr" },
- { FS_STATINODE, "inode-state" },
- /* FS_MAXINODE unused */
- /* FS_NRDQUOT unused */
- /* FS_MAXDQUOT unused */
- { FS_NRFILE, "file-nr" },
- { FS_MAXFILE, "file-max" },
- { FS_DENTRY, "dentry-state" },
- /* FS_NRSUPER unused */
- /* FS_MAXUPSER unused */
- { FS_OVERFLOWUID, "overflowuid" },
- { FS_OVERFLOWGID, "overflowgid" },
- { FS_LEASES, "leases-enable" },
- { FS_DIR_NOTIFY, "dir-notify-enable" },
- { FS_LEASE_TIME, "lease-break-time" },
- { FS_DQSTATS, "quota", trans_fs_quota_table },
- { FS_XFS, "xfs", trans_fs_xfs_table },
- { FS_AIO_NR, "aio-nr" },
- { FS_AIO_MAX_NR, "aio-max-nr" },
- { FS_INOTIFY, "inotify", trans_inotify_table },
- { FS_OCFS2, "ocfs2", trans_fs_ocfs2_table },
- { KERN_SETUID_DUMPABLE, "suid_dumpable" },
- {}
-};
-
-static const struct trans_ctl_table trans_debug_table[] = {
- {}
-};
-
-static const struct trans_ctl_table trans_cdrom_table[] = {
- { DEV_CDROM_INFO, "info" },
- { DEV_CDROM_AUTOCLOSE, "autoclose" },
- { DEV_CDROM_AUTOEJECT, "autoeject" },
- { DEV_CDROM_DEBUG, "debug" },
- { DEV_CDROM_LOCK, "lock" },
- { DEV_CDROM_CHECK_MEDIA, "check_media" },
- {}
-};
-
-static const struct trans_ctl_table trans_ipmi_table[] = {
- { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
- {}
-};
-
-static const struct trans_ctl_table trans_mac_hid_files[] = {
- /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
- /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
- { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
- { DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
- { DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
- /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
- {}
-};
-
-static const struct trans_ctl_table trans_raid_table[] = {
- { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
- { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
- {}
-};
-
-static const struct trans_ctl_table trans_scsi_table[] = {
- { DEV_SCSI_LOGGING_LEVEL, "logging_level" },
- {}
-};
-
-static const struct trans_ctl_table trans_parport_default_table[] = {
- { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" },
- { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" },
- {}
-};
-
-static const struct trans_ctl_table trans_parport_device_table[] = {
- { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" },
- {}
-};
-
-static const struct trans_ctl_table trans_parport_devices_table[] = {
- { DEV_PARPORT_DEVICES_ACTIVE, "active" },
- { 0, NULL, trans_parport_device_table },
- {}
-};
-
-static const struct trans_ctl_table trans_parport_parport_table[] = {
- { DEV_PARPORT_SPINTIME, "spintime" },
- { DEV_PARPORT_BASE_ADDR, "base-addr" },
- { DEV_PARPORT_IRQ, "irq" },
- { DEV_PARPORT_DMA, "dma" },
- { DEV_PARPORT_MODES, "modes" },
- { DEV_PARPORT_DEVICES, "devices", trans_parport_devices_table },
- { DEV_PARPORT_AUTOPROBE, "autoprobe" },
- { DEV_PARPORT_AUTOPROBE + 1, "autoprobe0" },
- { DEV_PARPORT_AUTOPROBE + 2, "autoprobe1" },
- { DEV_PARPORT_AUTOPROBE + 3, "autoprobe2" },
- { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" },
- {}
-};
-static const struct trans_ctl_table trans_parport_table[] = {
- { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table },
- { 0, NULL, trans_parport_parport_table },
- {}
-};
-
-static const struct trans_ctl_table trans_dev_table[] = {
- { DEV_CDROM, "cdrom", trans_cdrom_table },
- /* DEV_HWMON unused */
- { DEV_PARPORT, "parport", trans_parport_table },
- { DEV_RAID, "raid", trans_raid_table },
- { DEV_MAC_HID, "mac_hid", trans_mac_hid_files },
- { DEV_SCSI, "scsi", trans_scsi_table },
- { DEV_IPMI, "ipmi", trans_ipmi_table },
- {}
-};
-
-static const struct trans_ctl_table trans_bus_isa_table[] = {
- { BUS_ISA_MEM_BASE, "membase" },
- { BUS_ISA_PORT_BASE, "portbase" },
- { BUS_ISA_PORT_SHIFT, "portshift" },
- {}
-};
-
-static const struct trans_ctl_table trans_bus_table[] = {
- { CTL_BUS_ISA, "isa", trans_bus_isa_table },
- {}
-};
-
-static const struct trans_ctl_table trans_arlan_conf_table0[] = {
- { 1, "spreadingCode" },
- { 2, "channelNumber" },
- { 3, "scramblingDisable" },
- { 4, "txAttenuation" },
- { 5, "systemId" },
- { 6, "maxDatagramSize" },
- { 7, "maxFrameSize" },
- { 8, "maxRetries" },
- { 9, "receiveMode" },
- { 10, "priority" },
- { 11, "rootOrRepeater" },
- { 12, "SID" },
- { 13, "registrationMode" },
- { 14, "registrationFill" },
- { 15, "localTalkAddress" },
- { 16, "codeFormat" },
- { 17, "numChannels" },
- { 18, "channel1" },
- { 19, "channel2" },
- { 20, "channel3" },
- { 21, "channel4" },
- { 22, "txClear" },
- { 23, "txRetries" },
- { 24, "txRouting" },
- { 25, "txScrambled" },
- { 26, "rxParameter" },
- { 27, "txTimeoutMs" },
- { 28, "waitCardTimeout" },
- { 29, "channelSet" },
- { 30, "name" },
- { 31, "waitTime" },
- { 32, "lParameter" },
- { 33, "_15" },
- { 34, "headerSize" },
- { 36, "tx_delay_ms" },
- { 37, "retries" },
- { 38, "ReTransmitPacketMaxSize" },
- { 39, "waitReTransmitPacketMaxSize" },
- { 40, "fastReTransCount" },
- { 41, "driverRetransmissions" },
- { 42, "txAckTimeoutMs" },
- { 43, "registrationInterrupts" },
- { 44, "hardwareType" },
- { 45, "radioType" },
- { 46, "writeEEPROM" },
- { 47, "writeRadioType" },
- { 48, "entry_exit_debug" },
- { 49, "debug" },
- { 50, "in_speed" },
- { 51, "out_speed" },
- { 52, "in_speed10" },
- { 53, "out_speed10" },
- { 54, "in_speed_max" },
- { 55, "out_speed_max" },
- { 56, "measure_rate" },
- { 57, "pre_Command_Wait" },
- { 58, "rx_tweak1" },
- { 59, "rx_tweak2" },
- { 60, "tx_queue_len" },
-
- { 150, "arlan0-txRing" },
- { 151, "arlan0-rxRing" },
- { 152, "arlan0-18" },
- { 153, "arlan0-ring" },
- { 154, "arlan0-shm-cpy" },
- { 155, "config0" },
- { 156, "reset0" },
- {}
-};
-
-static const struct trans_ctl_table trans_arlan_conf_table1[] = {
- { 1, "spreadingCode" },
- { 2, "channelNumber" },
- { 3, "scramblingDisable" },
- { 4, "txAttenuation" },
- { 5, "systemId" },
- { 6, "maxDatagramSize" },
- { 7, "maxFrameSize" },
- { 8, "maxRetries" },
- { 9, "receiveMode" },
- { 10, "priority" },
- { 11, "rootOrRepeater" },
- { 12, "SID" },
- { 13, "registrationMode" },
- { 14, "registrationFill" },
- { 15, "localTalkAddress" },
- { 16, "codeFormat" },
- { 17, "numChannels" },
- { 18, "channel1" },
- { 19, "channel2" },
- { 20, "channel3" },
- { 21, "channel4" },
- { 22, "txClear" },
- { 23, "txRetries" },
- { 24, "txRouting" },
- { 25, "txScrambled" },
- { 26, "rxParameter" },
- { 27, "txTimeoutMs" },
- { 28, "waitCardTimeout" },
- { 29, "channelSet" },
- { 30, "name" },
- { 31, "waitTime" },
- { 32, "lParameter" },
- { 33, "_15" },
- { 34, "headerSize" },
- { 36, "tx_delay_ms" },
- { 37, "retries" },
- { 38, "ReTransmitPacketMaxSize" },
- { 39, "waitReTransmitPacketMaxSize" },
- { 40, "fastReTransCount" },
- { 41, "driverRetransmissions" },
- { 42, "txAckTimeoutMs" },
- { 43, "registrationInterrupts" },
- { 44, "hardwareType" },
- { 45, "radioType" },
- { 46, "writeEEPROM" },
- { 47, "writeRadioType" },
- { 48, "entry_exit_debug" },
- { 49, "debug" },
- { 50, "in_speed" },
- { 51, "out_speed" },
- { 52, "in_speed10" },
- { 53, "out_speed10" },
- { 54, "in_speed_max" },
- { 55, "out_speed_max" },
- { 56, "measure_rate" },
- { 57, "pre_Command_Wait" },
- { 58, "rx_tweak1" },
- { 59, "rx_tweak2" },
- { 60, "tx_queue_len" },
-
- { 150, "arlan1-txRing" },
- { 151, "arlan1-rxRing" },
- { 152, "arlan1-18" },
- { 153, "arlan1-ring" },
- { 154, "arlan1-shm-cpy" },
- { 155, "config1" },
- { 156, "reset1" },
- {}
-};
-
-static const struct trans_ctl_table trans_arlan_conf_table2[] = {
- { 1, "spreadingCode" },
- { 2, "channelNumber" },
- { 3, "scramblingDisable" },
- { 4, "txAttenuation" },
- { 5, "systemId" },
- { 6, "maxDatagramSize" },
- { 7, "maxFrameSize" },
- { 8, "maxRetries" },
- { 9, "receiveMode" },
- { 10, "priority" },
- { 11, "rootOrRepeater" },
- { 12, "SID" },
- { 13, "registrationMode" },
- { 14, "registrationFill" },
- { 15, "localTalkAddress" },
- { 16, "codeFormat" },
- { 17, "numChannels" },
- { 18, "channel1" },
- { 19, "channel2" },
- { 20, "channel3" },
- { 21, "channel4" },
- { 22, "txClear" },
- { 23, "txRetries" },
- { 24, "txRouting" },
- { 25, "txScrambled" },
- { 26, "rxParameter" },
- { 27, "txTimeoutMs" },
- { 28, "waitCardTimeout" },
- { 29, "channelSet" },
- { 30, "name" },
- { 31, "waitTime" },
- { 32, "lParameter" },
- { 33, "_15" },
- { 34, "headerSize" },
- { 36, "tx_delay_ms" },
- { 37, "retries" },
- { 38, "ReTransmitPacketMaxSize" },
- { 39, "waitReTransmitPacketMaxSize" },
- { 40, "fastReTransCount" },
- { 41, "driverRetransmissions" },
- { 42, "txAckTimeoutMs" },
- { 43, "registrationInterrupts" },
- { 44, "hardwareType" },
- { 45, "radioType" },
- { 46, "writeEEPROM" },
- { 47, "writeRadioType" },
- { 48, "entry_exit_debug" },
- { 49, "debug" },
- { 50, "in_speed" },
- { 51, "out_speed" },
- { 52, "in_speed10" },
- { 53, "out_speed10" },
- { 54, "in_speed_max" },
- { 55, "out_speed_max" },
- { 56, "measure_rate" },
- { 57, "pre_Command_Wait" },
- { 58, "rx_tweak1" },
- { 59, "rx_tweak2" },
- { 60, "tx_queue_len" },
-
- { 150, "arlan2-txRing" },
- { 151, "arlan2-rxRing" },
- { 152, "arlan2-18" },
- { 153, "arlan2-ring" },
- { 154, "arlan2-shm-cpy" },
- { 155, "config2" },
- { 156, "reset2" },
- {}
-};
-
-static const struct trans_ctl_table trans_arlan_conf_table3[] = {
- { 1, "spreadingCode" },
- { 2, "channelNumber" },
- { 3, "scramblingDisable" },
- { 4, "txAttenuation" },
- { 5, "systemId" },
- { 6, "maxDatagramSize" },
- { 7, "maxFrameSize" },
- { 8, "maxRetries" },
- { 9, "receiveMode" },
- { 10, "priority" },
- { 11, "rootOrRepeater" },
- { 12, "SID" },
- { 13, "registrationMode" },
- { 14, "registrationFill" },
- { 15, "localTalkAddress" },
- { 16, "codeFormat" },
- { 17, "numChannels" },
- { 18, "channel1" },
- { 19, "channel2" },
- { 20, "channel3" },
- { 21, "channel4" },
- { 22, "txClear" },
- { 23, "txRetries" },
- { 24, "txRouting" },
- { 25, "txScrambled" },
- { 26, "rxParameter" },
- { 27, "txTimeoutMs" },
- { 28, "waitCardTimeout" },
- { 29, "channelSet" },
- { 30, "name" },
- { 31, "waitTime" },
- { 32, "lParameter" },
- { 33, "_15" },
- { 34, "headerSize" },
- { 36, "tx_delay_ms" },
- { 37, "retries" },
- { 38, "ReTransmitPacketMaxSize" },
- { 39, "waitReTransmitPacketMaxSize" },
- { 40, "fastReTransCount" },
- { 41, "driverRetransmissions" },
- { 42, "txAckTimeoutMs" },
- { 43, "registrationInterrupts" },
- { 44, "hardwareType" },
- { 45, "radioType" },
- { 46, "writeEEPROM" },
- { 47, "writeRadioType" },
- { 48, "entry_exit_debug" },
- { 49, "debug" },
- { 50, "in_speed" },
- { 51, "out_speed" },
- { 52, "in_speed10" },
- { 53, "out_speed10" },
- { 54, "in_speed_max" },
- { 55, "out_speed_max" },
- { 56, "measure_rate" },
- { 57, "pre_Command_Wait" },
- { 58, "rx_tweak1" },
- { 59, "rx_tweak2" },
- { 60, "tx_queue_len" },
-
- { 150, "arlan3-txRing" },
- { 151, "arlan3-rxRing" },
- { 152, "arlan3-18" },
- { 153, "arlan3-ring" },
- { 154, "arlan3-shm-cpy" },
- { 155, "config3" },
- { 156, "reset3" },
- {}
-};
-
-static const struct trans_ctl_table trans_arlan_table[] = {
- { 1, "arlan0", trans_arlan_conf_table0 },
- { 2, "arlan1", trans_arlan_conf_table1 },
- { 3, "arlan2", trans_arlan_conf_table2 },
- { 4, "arlan3", trans_arlan_conf_table3 },
- {}
-};
-
-static const struct trans_ctl_table trans_s390dbf_table[] = {
- { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
- { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
- {}
-};
-
-static const struct trans_ctl_table trans_sunrpc_table[] = {
- { CTL_RPCDEBUG, "rpc_debug" },
- { CTL_NFSDEBUG, "nfs_debug" },
- { CTL_NFSDDEBUG, "nfsd_debug" },
- { CTL_NLMDEBUG, "nlm_debug" },
- { CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
- { CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
- { CTL_MIN_RESVPORT, "min_resvport" },
- { CTL_MAX_RESVPORT, "max_resvport" },
- {}
-};
-
-static const struct trans_ctl_table trans_pm_table[] = {
- { 1 /* CTL_PM_SUSPEND */, "suspend" },
- { 2 /* CTL_PM_CMODE */, "cmode" },
- { 3 /* CTL_PM_P0 */, "p0" },
- { 4 /* CTL_PM_CM */, "cm" },
- {}
-};
-
-static const struct trans_ctl_table trans_frv_table[] = {
- { 1, "cache-mode" },
- { 2, "pin-cxnr" },
- {}
-};
-
-static const struct trans_ctl_table trans_root_table[] = {
- { CTL_KERN, "kernel", trans_kern_table },
- { CTL_VM, "vm", trans_vm_table },
- { CTL_NET, "net", trans_net_table },
- /* CTL_PROC not used */
- { CTL_FS, "fs", trans_fs_table },
- { CTL_DEBUG, "debug", trans_debug_table },
- { CTL_DEV, "dev", trans_dev_table },
- { CTL_BUS, "bus", trans_bus_table },
- { CTL_ABI, "abi" },
- /* CTL_CPU not used */
- { CTL_ARLAN, "arlan", trans_arlan_table },
- { CTL_S390DBF, "s390dbf", trans_s390dbf_table },
- { CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
- { CTL_PM, "pm", trans_pm_table },
- { CTL_FRV, "frv", trans_frv_table },
- {}
-};
-
-
-
static int sysctl_depth(struct ctl_table *table)
{
@@ -1261,47 +28,6 @@ static struct ctl_table *sysctl_parent(struct ctl_table *table, int n)
return table;
}
-static const struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table)
-{
- struct ctl_table *test;
- const struct trans_ctl_table *ref;
- int cur_depth;
-
- cur_depth = sysctl_depth(table);
-
- ref = trans_root_table;
-repeat:
- test = sysctl_parent(table, cur_depth);
- for (; ref->ctl_name || ref->procname || ref->child; ref++) {
- int match = 0;
-
- if (cur_depth && !ref->child)
- continue;
-
- if (test->procname && ref->procname &&
- (strcmp(test->procname, ref->procname) == 0))
- match++;
-
- if (test->ctl_name && ref->ctl_name &&
- (test->ctl_name == ref->ctl_name))
- match++;
-
- if (!ref->ctl_name && !ref->procname)
- match++;
-
- if (match) {
- if (cur_depth != 0) {
- cur_depth--;
- ref = ref->child;
- goto repeat;
- }
- goto out;
- }
- }
- ref = NULL;
-out:
- return ref;
-}
static void sysctl_print_path(struct ctl_table *table)
{
@@ -1315,26 +41,6 @@ static void sysctl_print_path(struct ctl_table *table)
}
}
printk(" ");
- if (table->ctl_name) {
- for (i = depth; i >= 0; i--) {
- tmp = sysctl_parent(table, i);
- printk(".%d", tmp->ctl_name);
- }
- }
-}
-
-static void sysctl_repair_table(struct ctl_table *table)
-{
- /* Don't complain about the classic default
- * sysctl strategy routine. Maybe later we
- * can get the tables fixed and complain about
- * this.
- */
- if (table->ctl_name && table->procname &&
- (table->proc_handler == proc_dointvec) &&
- (!table->strategy)) {
- table->strategy = sysctl_data;
- }
}
static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces,
@@ -1352,7 +58,7 @@ static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces,
ref = head->ctl_table;
repeat:
test = sysctl_parent(table, cur_depth);
- for (; ref->ctl_name || ref->procname; ref++) {
+ for (; ref->procname; ref++) {
int match = 0;
if (cur_depth && !ref->child)
continue;
@@ -1361,10 +67,6 @@ repeat:
(strcmp(test->procname, ref->procname) == 0))
match++;
- if (test->ctl_name && ref->ctl_name &&
- (test->ctl_name == ref->ctl_name))
- match++;
-
if (match) {
if (cur_depth != 0) {
cur_depth--;
@@ -1392,38 +94,6 @@ static void set_fail(const char **fail, struct ctl_table *table, const char *str
*fail = str;
}
-static int sysctl_check_dir(struct nsproxy *namespaces,
- struct ctl_table *table)
-{
- struct ctl_table *ref;
- int error;
-
- error = 0;
- ref = sysctl_check_lookup(namespaces, table);
- if (ref) {
- int match = 0;
- if ((!table->procname && !ref->procname) ||
- (table->procname && ref->procname &&
- (strcmp(table->procname, ref->procname) == 0)))
- match++;
-
- if ((!table->ctl_name && !ref->ctl_name) ||
- (table->ctl_name && ref->ctl_name &&
- (table->ctl_name == ref->ctl_name)))
- match++;
-
- if (match != 2) {
- printk(KERN_ERR "%s: failed: ", __func__);
- sysctl_print_path(table);
- printk(" ref: ");
- sysctl_print_path(ref);
- printk("\n");
- error = -EINVAL;
- }
- }
- return error;
-}
-
static void sysctl_check_leaf(struct nsproxy *namespaces,
struct ctl_table *table, const char **fail)
{
@@ -1434,37 +104,15 @@ static void sysctl_check_leaf(struct nsproxy *namespaces,
set_fail(fail, table, "Sysctl already exists");
}
-static void sysctl_check_bin_path(struct ctl_table *table, const char **fail)
-{
- const struct trans_ctl_table *ref;
-
- ref = sysctl_binary_lookup(table);
- if (table->ctl_name && !ref)
- set_fail(fail, table, "Unknown sysctl binary path");
- if (ref) {
- if (ref->procname &&
- (!table->procname ||
- (strcmp(table->procname, ref->procname) != 0)))
- set_fail(fail, table, "procname does not match binary path procname");
-
- if (ref->ctl_name && table->ctl_name &&
- (table->ctl_name != ref->ctl_name))
- set_fail(fail, table, "ctl_name does not match binary path ctl_name");
- }
-}
-
int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
{
int error = 0;
- for (; table->ctl_name || table->procname; table++) {
+ for (; table->procname; table++) {
const char *fail = NULL;
- sysctl_repair_table(table);
if (table->parent) {
if (table->procname && !table->parent->procname)
set_fail(&fail, table, "Parent without procname");
- if (table->ctl_name && !table->parent->ctl_name)
- set_fail(&fail, table, "Parent without ctl_name");
}
if (!table->procname)
set_fail(&fail, table, "No procname");
@@ -1477,21 +125,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
set_fail(&fail, table, "Writable sysctl directory");
if (table->proc_handler)
set_fail(&fail, table, "Directory with proc_handler");
- if (table->strategy)
- set_fail(&fail, table, "Directory with strategy");
if (table->extra1)
set_fail(&fail, table, "Directory with extra1");
if (table->extra2)
set_fail(&fail, table, "Directory with extra2");
- if (sysctl_check_dir(namespaces, table))
- set_fail(&fail, table, "Inconsistent directory names");
} else {
- if ((table->strategy == sysctl_data) ||
- (table->strategy == sysctl_string) ||
- (table->strategy == sysctl_intvec) ||
- (table->strategy == sysctl_jiffies) ||
- (table->strategy == sysctl_ms_jiffies) ||
- (table->proc_handler == proc_dostring) ||
+ if ((table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
@@ -1513,14 +152,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
set_fail(&fail, table, "No max");
}
}
-#ifdef CONFIG_SYSCTL_SYSCALL
- if (table->ctl_name && !table->strategy)
- set_fail(&fail, table, "Missing strategy");
-#endif
-#if 0
- if (!table->ctl_name && table->strategy)
- set_fail(&fail, table, "Strategy without ctl_name");
-#endif
#ifdef CONFIG_PROC_SYSCTL
if (table->procname && !table->proc_handler)
set_fail(&fail, table, "No proc_handler");
@@ -1531,7 +162,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
#endif
sysctl_check_leaf(namespaces, table, &fail);
}
- sysctl_check_bin_path(table, &fail);
if (table->mode > 0777)
set_fail(&fail, table, "bogus .mode");
if (fail) {
diff --git a/kernel/time.c b/kernel/time.c
index 804798005d19..c6324d96009e 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -136,7 +136,6 @@ static inline void warp_clock(void)
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
- update_xtime_cache(0);
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 620b58abdc32..20a8920029ee 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -20,6 +20,8 @@
#include <linux/sysdev.h>
#include <linux/tick.h>
+#include "tick-internal.h"
+
/* The registered clock event devices */
static LIST_HEAD(clockevent_devices);
static LIST_HEAD(clockevents_released);
@@ -37,10 +39,9 @@ static DEFINE_SPINLOCK(clockevents_lock);
*
* Math helper, returns latch value converted to nanoseconds (bound checked)
*/
-unsigned long clockevent_delta2ns(unsigned long latch,
- struct clock_event_device *evt)
+u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
{
- u64 clc = ((u64) latch << evt->shift);
+ u64 clc = (u64) latch << evt->shift;
if (unlikely(!evt->mult)) {
evt->mult = 1;
@@ -50,10 +51,10 @@ unsigned long clockevent_delta2ns(unsigned long latch,
do_div(clc, evt->mult);
if (clc < 1000)
clc = 1000;
- if (clc > LONG_MAX)
- clc = LONG_MAX;
+ if (clc > KTIME_MAX)
+ clc = KTIME_MAX;
- return (unsigned long) clc;
+ return clc;
}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 5e18c6ab2c6a..e85c23404d34 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -39,7 +39,7 @@ void timecounter_init(struct timecounter *tc,
tc->cycle_last = cc->read(cc);
tc->nsec = start_tstamp;
}
-EXPORT_SYMBOL(timecounter_init);
+EXPORT_SYMBOL_GPL(timecounter_init);
/**
* timecounter_read_delta - get nanoseconds since last call of this function
@@ -83,7 +83,7 @@ u64 timecounter_read(struct timecounter *tc)
return nsec;
}
-EXPORT_SYMBOL(timecounter_read);
+EXPORT_SYMBOL_GPL(timecounter_read);
u64 timecounter_cyc2time(struct timecounter *tc,
cycle_t cycle_tstamp)
@@ -105,7 +105,60 @@ u64 timecounter_cyc2time(struct timecounter *tc,
return nsec;
}
-EXPORT_SYMBOL(timecounter_cyc2time);
+EXPORT_SYMBOL_GPL(timecounter_cyc2time);
+
+/**
+ * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
+ * @mult: pointer to mult variable
+ * @shift: pointer to shift variable
+ * @from: frequency to convert from
+ * @to: frequency to convert to
+ * @minsec: guaranteed runtime conversion range in seconds
+ *
+ * The function evaluates the shift/mult pair for the scaled math
+ * operations of clocksources and clockevents.
+ *
+ * @to and @from are frequency values in HZ. For clock sources @to is
+ * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
+ * event @to is the counter frequency and @from is NSEC_PER_SEC.
+ *
+ * The @minsec conversion range argument controls the time frame in
+ * seconds which must be covered by the runtime conversion with the
+ * calculated mult and shift factors. This guarantees that no 64bit
+ * overflow happens when the input value of the conversion is
+ * multiplied with the calculated mult factor. Larger ranges may
+ * reduce the conversion accuracy by chosing smaller mult and shift
+ * factors.
+ */
+void
+clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
+{
+ u64 tmp;
+ u32 sft, sftacc= 32;
+
+ /*
+ * Calculate the shift factor which is limiting the conversion
+ * range:
+ */
+ tmp = ((u64)minsec * from) >> 32;
+ while (tmp) {
+ tmp >>=1;
+ sftacc--;
+ }
+
+ /*
+ * Find the conversion shift/mult pair which has the best
+ * accuracy and fits the maxsec conversion range:
+ */
+ for (sft = 32; sft > 0; sft--) {
+ tmp = (u64) to << sft;
+ do_div(tmp, from);
+ if ((tmp >> sftacc) == 0)
+ break;
+ }
+ *mult = tmp;
+ *shift = sft;
+}
/*[Clocksource internal variables]---------
* curr_clocksource:
@@ -413,6 +466,47 @@ void clocksource_touch_watchdog(void)
clocksource_resume_watchdog();
}
+/**
+ * clocksource_max_deferment - Returns max time the clocksource can be deferred
+ * @cs: Pointer to clocksource
+ *
+ */
+static u64 clocksource_max_deferment(struct clocksource *cs)
+{
+ u64 max_nsecs, max_cycles;
+
+ /*
+ * Calculate the maximum number of cycles that we can pass to the
+ * cyc2ns function without overflowing a 64-bit signed result. The
+ * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
+ * is equivalent to the below.
+ * max_cycles < (2^63)/cs->mult
+ * max_cycles < 2^(log2((2^63)/cs->mult))
+ * max_cycles < 2^(log2(2^63) - log2(cs->mult))
+ * max_cycles < 2^(63 - log2(cs->mult))
+ * max_cycles < 1 << (63 - log2(cs->mult))
+ * Please note that we add 1 to the result of the log2 to account for
+ * any rounding errors, ensure the above inequality is satisfied and
+ * no overflow will occur.
+ */
+ max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+
+ /*
+ * The actual maximum number of cycles we can defer the clocksource is
+ * determined by the minimum of max_cycles and cs->mask.
+ */
+ max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
+ max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+
+ /*
+ * To ensure that the clocksource does not wrap whilst we are idle,
+ * limit the time the clocksource can be deferred by 12.5%. Please
+ * note a margin of 12.5% is used because this can be computed with
+ * a shift, versus say 10% which would require division.
+ */
+ return max_nsecs - (max_nsecs >> 5);
+}
+
#ifdef CONFIG_GENERIC_TIME
/**
@@ -511,6 +605,9 @@ static void clocksource_enqueue(struct clocksource *cs)
*/
int clocksource_register(struct clocksource *cs)
{
+ /* calculate max idle time permitted for this clocksource */
+ cs->max_idle_ns = clocksource_max_deferment(cs);
+
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_select();
@@ -580,7 +677,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
* @count: length of buffer
*
* Takes input from sysfs interface for manually overriding the default
- * clocksource selction.
+ * clocksource selection.
*/
static ssize_t sysfs_override_clocksource(struct sys_device *dev,
struct sysdev_attribute *attr,
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index a96c0e2b89cf..0a8a213016f0 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -50,9 +50,9 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
dev->min_delta_ns += dev->min_delta_ns >> 1;
printk(KERN_WARNING
- "CE: %s increasing min_delta_ns to %lu nsec\n",
+ "CE: %s increasing min_delta_ns to %llu nsec\n",
dev->name ? dev->name : "?",
- dev->min_delta_ns << 1);
+ (unsigned long long) dev->min_delta_ns << 1);
i = 0;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 89aed5933ed4..f992762d7f51 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -134,18 +134,13 @@ __setup("nohz=", setup_tick_nohz);
* value. We do this unconditionally on any cpu, as we don't know whether the
* cpu, which has the update task assigned is in a long sleep.
*/
-static void tick_nohz_update_jiffies(void)
+static void tick_nohz_update_jiffies(ktime_t now)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long flags;
- ktime_t now;
-
- if (!ts->tick_stopped)
- return;
cpumask_clear_cpu(cpu, nohz_cpu_mask);
- now = ktime_get();
ts->idle_waketime = now;
local_irq_save(flags);
@@ -155,20 +150,17 @@ static void tick_nohz_update_jiffies(void)
touch_softlockup_watchdog();
}
-static void tick_nohz_stop_idle(int cpu)
+static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t delta;
- if (ts->idle_active) {
- ktime_t now, delta;
- now = ktime_get();
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_lastupdate = now;
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
- ts->idle_active = 0;
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ ts->idle_active = 0;
- sched_clock_idle_wakeup_event(0);
- }
+ sched_clock_idle_wakeup_event(0);
}
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
@@ -216,6 +208,7 @@ void tick_nohz_stop_sched_tick(int inidle)
struct tick_sched *ts;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+ u64 time_delta;
int cpu;
local_irq_save(flags);
@@ -263,7 +256,7 @@ void tick_nohz_stop_sched_tick(int inidle)
if (ratelimit < 10) {
printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- local_softirq_pending());
+ (unsigned int) local_softirq_pending());
ratelimit++;
}
goto end;
@@ -275,14 +268,18 @@ void tick_nohz_stop_sched_tick(int inidle)
seq = read_seqbegin(&xtime_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
+ time_delta = timekeeping_max_deferment();
} while (read_seqretry(&xtime_lock, seq));
- /* Get the next timer wheel timer */
- next_jiffies = get_next_timer_interrupt(last_jiffies);
- delta_jiffies = next_jiffies - last_jiffies;
-
- if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
+ if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+ arch_needs_cpu(cpu)) {
+ next_jiffies = last_jiffies + 1;
delta_jiffies = 1;
+ } else {
+ /* Get the next timer wheel timer */
+ next_jiffies = get_next_timer_interrupt(last_jiffies);
+ delta_jiffies = next_jiffies - last_jiffies;
+ }
/*
* Do not stop the tick, if we are only one off
* or if the cpu is required for rcu
@@ -294,22 +291,51 @@ void tick_nohz_stop_sched_tick(int inidle)
if ((long)delta_jiffies >= 1) {
/*
- * calculate the expiry time for the next timer wheel
- * timer
- */
- expires = ktime_add_ns(last_update, tick_period.tv64 *
- delta_jiffies);
-
- /*
* If this cpu is the one which updates jiffies, then
* give up the assignment and let it be taken by the
* cpu which runs the tick timer next, which might be
* this cpu as well. If we don't drop this here the
* jiffies might be stale and do_timer() never
- * invoked.
+ * invoked. Keep track of the fact that it was the one
+ * which had the do_timer() duty last. If this cpu is
+ * the one which had the do_timer() duty last, we
+ * limit the sleep time to the timekeeping
+ * max_deferement value which we retrieved
+ * above. Otherwise we can sleep as long as we want.
*/
- if (cpu == tick_do_timer_cpu)
+ if (cpu == tick_do_timer_cpu) {
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+ ts->do_timer_last = 1;
+ } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
+ time_delta = KTIME_MAX;
+ ts->do_timer_last = 0;
+ } else if (!ts->do_timer_last) {
+ time_delta = KTIME_MAX;
+ }
+
+ /*
+ * calculate the expiry time for the next timer wheel
+ * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
+ * that there is no timer pending or at least extremely
+ * far into the future (12 days for HZ=1000). In this
+ * case we set the expiry to the end of time.
+ */
+ if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
+ /*
+ * Calculate the time delta for the next timer event.
+ * If the time delta exceeds the maximum time delta
+ * permitted by the current clocksource then adjust
+ * the time delta accordingly to ensure the
+ * clocksource does not wrap.
+ */
+ time_delta = min_t(u64, time_delta,
+ tick_period.tv64 * delta_jiffies);
+ }
+
+ if (time_delta < KTIME_MAX)
+ expires = ktime_add_ns(last_update, time_delta);
+ else
+ expires.tv64 = KTIME_MAX;
if (delta_jiffies > 1)
cpumask_set_cpu(cpu, nohz_cpu_mask);
@@ -342,22 +368,19 @@ void tick_nohz_stop_sched_tick(int inidle)
ts->idle_sleeps++;
+ /* Mark expires */
+ ts->idle_expires = expires;
+
/*
- * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
- * there is no timer pending or at least extremly far
- * into the future (12 days for HZ=1000). In this case
- * we simply stop the tick timer:
+ * If the expiration time == KTIME_MAX, then
+ * in this case we simply stop the tick timer.
*/
- if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) {
- ts->idle_expires.tv64 = KTIME_MAX;
+ if (unlikely(expires.tv64 == KTIME_MAX)) {
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
hrtimer_cancel(&ts->sched_timer);
goto out;
}
- /* Mark expiries */
- ts->idle_expires = expires;
-
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer, expires,
HRTIMER_MODE_ABS_PINNED);
@@ -436,7 +459,11 @@ void tick_nohz_restart_sched_tick(void)
ktime_t now;
local_irq_disable();
- tick_nohz_stop_idle(cpu);
+ if (ts->idle_active || (ts->inidle && ts->tick_stopped))
+ now = ktime_get();
+
+ if (ts->idle_active)
+ tick_nohz_stop_idle(cpu, now);
if (!ts->inidle || !ts->tick_stopped) {
ts->inidle = 0;
@@ -450,7 +477,6 @@ void tick_nohz_restart_sched_tick(void)
/* Update jiffies first */
select_nohz_load_balancer(0);
- now = ktime_get();
tick_do_update_jiffies64(now);
cpumask_clear_cpu(cpu, nohz_cpu_mask);
@@ -584,22 +610,18 @@ static void tick_nohz_switch_to_nohz(void)
* timer and do not touch the other magic bits which need to be done
* when idle is left.
*/
-static void tick_nohz_kick_tick(int cpu)
+static void tick_nohz_kick_tick(int cpu, ktime_t now)
{
#if 0
/* Switch back to 2.6.27 behaviour */
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
- ktime_t delta, now;
-
- if (!ts->tick_stopped)
- return;
+ ktime_t delta;
/*
* Do not touch the tick device, when the next expiry is either
* already reached or less/equal than the tick period.
*/
- now = ktime_get();
delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
if (delta.tv64 <= tick_period.tv64)
return;
@@ -608,9 +630,26 @@ static void tick_nohz_kick_tick(int cpu)
#endif
}
+static inline void tick_check_nohz(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t now;
+
+ if (!ts->idle_active && !ts->tick_stopped)
+ return;
+ now = ktime_get();
+ if (ts->idle_active)
+ tick_nohz_stop_idle(cpu, now);
+ if (ts->tick_stopped) {
+ tick_nohz_update_jiffies(now);
+ tick_nohz_kick_tick(cpu, now);
+ }
+}
+
#else
static inline void tick_nohz_switch_to_nohz(void) { }
+static inline void tick_check_nohz(int cpu) { }
#endif /* NO_HZ */
@@ -620,11 +659,7 @@ static inline void tick_nohz_switch_to_nohz(void) { }
void tick_check_idle(int cpu)
{
tick_check_oneshot_broadcast(cpu);
-#ifdef CONFIG_NO_HZ
- tick_nohz_stop_idle(cpu);
- tick_nohz_update_jiffies();
- tick_nohz_kick_tick(cpu);
-#endif
+ tick_check_nohz(cpu);
}
/*
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
index 71e7f1a19156..96ff643a5a59 100644
--- a/kernel/time/timecompare.c
+++ b/kernel/time/timecompare.c
@@ -40,7 +40,7 @@ ktime_t timecompare_transform(struct timecompare *sync,
return ns_to_ktime(nsec);
}
-EXPORT_SYMBOL(timecompare_transform);
+EXPORT_SYMBOL_GPL(timecompare_transform);
int timecompare_offset(struct timecompare *sync,
s64 *offset,
@@ -131,7 +131,7 @@ int timecompare_offset(struct timecompare *sync,
return used;
}
-EXPORT_SYMBOL(timecompare_offset);
+EXPORT_SYMBOL_GPL(timecompare_offset);
void __timecompare_update(struct timecompare *sync,
u64 source_tstamp)
@@ -188,4 +188,4 @@ void __timecompare_update(struct timecompare *sync,
}
}
}
-EXPORT_SYMBOL(__timecompare_update);
+EXPORT_SYMBOL_GPL(__timecompare_update);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index c3a4e2907eaa..af4135f05825 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -165,19 +165,12 @@ struct timespec raw_time;
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
-static struct timespec xtime_cache __attribute__ ((aligned (16)));
-void update_xtime_cache(u64 nsec)
-{
- xtime_cache = xtime;
- timespec_add_ns(&xtime_cache, nsec);
-}
-
/* must hold xtime_lock */
void timekeeping_leap_insert(int leapsecond)
{
xtime.tv_sec += leapsecond;
wall_to_monotonic.tv_sec -= leapsecond;
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
}
#ifdef CONFIG_GENERIC_TIME
@@ -332,12 +325,10 @@ int do_settimeofday(struct timespec *tv)
xtime = *tv;
- update_xtime_cache(0);
-
timekeeper.ntp_error = 0;
ntp_clear();
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -488,6 +479,17 @@ int timekeeping_valid_for_hres(void)
}
/**
+ * timekeeping_max_deferment - Returns max time the clocksource can be deferred
+ *
+ * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
+ * ensure that the clocksource does not change!
+ */
+u64 timekeeping_max_deferment(void)
+{
+ return timekeeper.clock->max_idle_ns;
+}
+
+/**
* read_persistent_clock - Return time from the persistent clock.
*
* Weak dummy function for arches that do not yet support it.
@@ -548,7 +550,6 @@ void __init timekeeping_init(void)
}
set_normalized_timespec(&wall_to_monotonic,
-boot.tv_sec, -boot.tv_nsec);
- update_xtime_cache(0);
total_sleep_time.tv_sec = 0;
total_sleep_time.tv_nsec = 0;
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -582,7 +583,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
total_sleep_time = timespec_add_safe(total_sleep_time, ts);
}
- update_xtime_cache(0);
/* re-base the last cycle value */
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
@@ -723,6 +723,49 @@ static void timekeeping_adjust(s64 offset)
}
/**
+ * logarithmic_accumulation - shifted accumulation of cycles
+ *
+ * This functions accumulates a shifted interval of cycles into
+ * into a shifted interval nanoseconds. Allows for O(log) accumulation
+ * loop.
+ *
+ * Returns the unconsumed cycles.
+ */
+static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+{
+ u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
+
+ /* If the offset is smaller then a shifted interval, do nothing */
+ if (offset < timekeeper.cycle_interval<<shift)
+ return offset;
+
+ /* Accumulate one shifted interval */
+ offset -= timekeeper.cycle_interval << shift;
+ timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
+
+ timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
+ while (timekeeper.xtime_nsec >= nsecps) {
+ timekeeper.xtime_nsec -= nsecps;
+ xtime.tv_sec++;
+ second_overflow();
+ }
+
+ /* Accumulate into raw time */
+ raw_time.tv_nsec += timekeeper.raw_interval << shift;;
+ while (raw_time.tv_nsec >= NSEC_PER_SEC) {
+ raw_time.tv_nsec -= NSEC_PER_SEC;
+ raw_time.tv_sec++;
+ }
+
+ /* Accumulate error between NTP and clock interval */
+ timekeeper.ntp_error += tick_length << shift;
+ timekeeper.ntp_error -= timekeeper.xtime_interval <<
+ (timekeeper.ntp_error_shift + shift);
+
+ return offset;
+}
+
+/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
* Called from the timer interrupt, must hold a write on xtime_lock.
@@ -731,7 +774,7 @@ void update_wall_time(void)
{
struct clocksource *clock;
cycle_t offset;
- u64 nsecs;
+ int shift = 0, maxshift;
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
@@ -745,33 +788,22 @@ void update_wall_time(void)
#endif
timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
- /* normally this loop will run just once, however in the
- * case of lost or late ticks, it will accumulate correctly.
+ /*
+ * With NO_HZ we may have to accumulate many cycle_intervals
+ * (think "ticks") worth of time at once. To do this efficiently,
+ * we calculate the largest doubling multiple of cycle_intervals
+ * that is smaller then the offset. We then accumulate that
+ * chunk in one go, and then try to consume the next smaller
+ * doubled multiple.
*/
+ shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
+ shift = max(0, shift);
+ /* Bound shift to one less then what overflows tick_length */
+ maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
+ shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) {
- u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
-
- /* accumulate one interval */
- offset -= timekeeper.cycle_interval;
- clock->cycle_last += timekeeper.cycle_interval;
-
- timekeeper.xtime_nsec += timekeeper.xtime_interval;
- if (timekeeper.xtime_nsec >= nsecps) {
- timekeeper.xtime_nsec -= nsecps;
- xtime.tv_sec++;
- second_overflow();
- }
-
- raw_time.tv_nsec += timekeeper.raw_interval;
- if (raw_time.tv_nsec >= NSEC_PER_SEC) {
- raw_time.tv_nsec -= NSEC_PER_SEC;
- raw_time.tv_sec++;
- }
-
- /* accumulate error between NTP and clock interval */
- timekeeper.ntp_error += tick_length;
- timekeeper.ntp_error -= timekeeper.xtime_interval <<
- timekeeper.ntp_error_shift;
+ offset = logarithmic_accumulation(offset, shift);
+ shift--;
}
/* correct the clock when NTP error is too big */
@@ -807,11 +839,8 @@ void update_wall_time(void)
timekeeper.ntp_error += timekeeper.xtime_nsec <<
timekeeper.ntp_error_shift;
- nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
- update_xtime_cache(nsecs);
-
/* check to see if there is a new clocksource to use */
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
}
/**
@@ -846,13 +875,13 @@ void monotonic_to_bootbased(struct timespec *ts)
unsigned long get_seconds(void)
{
- return xtime_cache.tv_sec;
+ return xtime.tv_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return xtime_cache;
+ return xtime;
}
struct timespec current_kernel_time(void)
@@ -862,8 +891,7 @@ struct timespec current_kernel_time(void)
do {
seq = read_seqbegin(&xtime_lock);
-
- now = xtime_cache;
+ now = xtime;
} while (read_seqretry(&xtime_lock, seq));
return now;
@@ -877,8 +905,7 @@ struct timespec get_monotonic_coarse(void)
do {
seq = read_seqbegin(&xtime_lock);
-
- now = xtime_cache;
+ now = xtime;
mono = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 1b5b7aa2fdfd..9d80db4747d4 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -150,6 +150,9 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
P_ns(expires_next);
P(hres_active);
P(nr_events);
+ P(nr_retries);
+ P(nr_hangs);
+ P_ns(max_hang_time);
#endif
#undef P
#undef P_ns
@@ -204,10 +207,12 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
return;
}
SEQ_printf(m, "%s\n", dev->name);
- SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns);
- SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns);
- SEQ_printf(m, " mult: %lu\n", dev->mult);
- SEQ_printf(m, " shift: %d\n", dev->shift);
+ SEQ_printf(m, " max_delta_ns: %llu\n",
+ (unsigned long long) dev->max_delta_ns);
+ SEQ_printf(m, " min_delta_ns: %llu\n",
+ (unsigned long long) dev->min_delta_ns);
+ SEQ_printf(m, " mult: %u\n", dev->mult);
+ SEQ_printf(m, " shift: %u\n", dev->shift);
SEQ_printf(m, " mode: %d\n", dev->mode);
SEQ_printf(m, " next_event: %Ld nsecs\n",
(unsigned long long) ktime_to_ns(dev->next_event));
@@ -252,7 +257,7 @@ static int timer_list_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Timer List Version: v0.4\n");
+ SEQ_printf(m, "Timer List Version: v0.5\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 874f2893cff0..88bd9ae2a9ed 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1361,11 +1361,7 @@ int trace_array_vprintk(struct trace_array *tr,
pause_graph_tracing();
raw_local_irq_save(irq_flags);
__raw_spin_lock(&trace_buf_lock);
- if (args == NULL) {
- strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
- len = strlen(trace_buf);
- } else
- len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+ len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
buffer = tr->buffer;
@@ -1516,6 +1512,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
int i = (int)*pos;
void *ent;
+ WARN_ON_ONCE(iter->leftover);
+
(*pos)++;
/* can't go backwards */
@@ -1614,8 +1612,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
;
} else {
- l = *pos - 1;
- p = s_next(m, p, &l);
+ /*
+ * If we overflowed the seq_file before, then we want
+ * to just reuse the trace_seq buffer again.
+ */
+ if (iter->leftover)
+ p = iter;
+ else {
+ l = *pos - 1;
+ p = s_next(m, p, &l);
+ }
}
trace_event_read_lock();
@@ -1923,6 +1929,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
+ int ret;
if (iter->ent == NULL) {
if (iter->tr) {
@@ -1942,9 +1949,27 @@ static int s_show(struct seq_file *m, void *v)
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_func_help_header(m);
}
+ } else if (iter->leftover) {
+ /*
+ * If we filled the seq_file buffer earlier, we
+ * want to just show it now.
+ */
+ ret = trace_print_seq(m, &iter->seq);
+
+ /* ret should this time be zero, but you never know */
+ iter->leftover = ret;
+
} else {
print_trace_line(iter);
- trace_print_seq(m, &iter->seq);
+ ret = trace_print_seq(m, &iter->seq);
+ /*
+ * If we overflow the seq_file buffer, then it will
+ * ask us for this data again at start up.
+ * Use that instead.
+ * ret is 0 if seq_file write succeeded.
+ * -1 otherwise.
+ */
+ iter->leftover = ret;
}
return 0;
@@ -2898,6 +2923,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
else
cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
+
+ if (iter->trace->pipe_close)
+ iter->trace->pipe_close(iter);
+
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
@@ -3320,6 +3349,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
return cnt;
}
+static int mark_printk(const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+ va_start(args, fmt);
+ ret = trace_vprintk(0, fmt, args);
+ va_end(args);
+ return ret;
+}
+
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
@@ -3346,7 +3385,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
buf[cnt] = '\0';
- cnt = trace_vprintk(0, buf, NULL);
+ cnt = mark_printk("%s", buf);
kfree(buf);
*fpos += cnt;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1d7f4830a80d..7fa33cab6962 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -272,6 +272,7 @@ struct tracer_flags {
* @pipe_open: called when the trace_pipe file is opened
* @wait_pipe: override how the user waits for traces on trace_pipe
* @close: called when the trace file is released
+ * @pipe_close: called when the trace_pipe file is released
* @read: override the default read callback on trace_pipe
* @splice_read: override the default splice_read callback on trace_pipe
* @selftest: selftest to run on boot (see trace_selftest.c)
@@ -290,6 +291,7 @@ struct tracer {
void (*pipe_open)(struct trace_iterator *iter);
void (*wait_pipe)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter);
+ void (*pipe_close)(struct trace_iterator *iter);
ssize_t (*read)(struct trace_iterator *iter,
struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 45e6c01b2e4d..a43d009c561a 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -14,9 +14,20 @@
#include "trace.h"
#include "trace_output.h"
-struct fgraph_data {
+struct fgraph_cpu_data {
pid_t last_pid;
int depth;
+ int ignore;
+};
+
+struct fgraph_data {
+ struct fgraph_cpu_data *cpu_data;
+
+ /* Place to preserve last processed entry. */
+ struct ftrace_graph_ent_entry ent;
+ struct ftrace_graph_ret_entry ret;
+ int failed;
+ int cpu;
};
#define TRACE_GRAPH_INDENT 2
@@ -384,7 +395,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
if (!data)
return TRACE_TYPE_HANDLED;
- last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
+ last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
if (*last_pid == pid)
return TRACE_TYPE_HANDLED;
@@ -435,26 +446,49 @@ static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr)
{
- struct ring_buffer_iter *ring_iter;
+ struct fgraph_data *data = iter->private;
+ struct ring_buffer_iter *ring_iter = NULL;
struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *next;
- ring_iter = iter->buffer_iter[iter->cpu];
+ /*
+ * If the previous output failed to write to the seq buffer,
+ * then we just reuse the data from before.
+ */
+ if (data && data->failed) {
+ curr = &data->ent;
+ next = &data->ret;
+ } else {
- /* First peek to compare current entry and the next one */
- if (ring_iter)
- event = ring_buffer_iter_peek(ring_iter, NULL);
- else {
- /* We need to consume the current entry to see the next one */
- ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
- event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
- NULL);
- }
+ ring_iter = iter->buffer_iter[iter->cpu];
+
+ /* First peek to compare current entry and the next one */
+ if (ring_iter)
+ event = ring_buffer_iter_peek(ring_iter, NULL);
+ else {
+ /*
+ * We need to consume the current entry to see
+ * the next one.
+ */
+ ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
+ event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
+ NULL);
+ }
- if (!event)
- return NULL;
+ if (!event)
+ return NULL;
+
+ next = ring_buffer_event_data(event);
- next = ring_buffer_event_data(event);
+ if (data) {
+ /*
+ * Save current and next entries for later reference
+ * if the output fails.
+ */
+ data->ent = *curr;
+ data->ret = *next;
+ }
+ }
if (next->ent.type != TRACE_GRAPH_RET)
return NULL;
@@ -640,7 +674,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
if (data) {
int cpu = iter->cpu;
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
/*
* Comments display at + 1 to depth. Since
@@ -688,7 +722,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
if (data) {
int cpu = iter->cpu;
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
*depth = call->depth;
}
@@ -782,19 +816,34 @@ static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_iterator *iter)
{
- int cpu = iter->cpu;
+ struct fgraph_data *data = iter->private;
struct ftrace_graph_ent *call = &field->graph_ent;
struct ftrace_graph_ret_entry *leaf_ret;
+ static enum print_line_t ret;
+ int cpu = iter->cpu;
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
return TRACE_TYPE_PARTIAL_LINE;
leaf_ret = get_return_for_leaf(iter, field);
if (leaf_ret)
- return print_graph_entry_leaf(iter, field, leaf_ret, s);
+ ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
else
- return print_graph_entry_nested(iter, field, s, cpu);
+ ret = print_graph_entry_nested(iter, field, s, cpu);
+ if (data) {
+ /*
+ * If we failed to write our output, then we need to make
+ * note of it. Because we already consumed our entry.
+ */
+ if (s->full) {
+ data->failed = 1;
+ data->cpu = cpu;
+ } else
+ data->failed = 0;
+ }
+
+ return ret;
}
static enum print_line_t
@@ -810,7 +859,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (data) {
int cpu = iter->cpu;
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
/*
* Comments display at + 1 to depth. This is the
@@ -873,7 +922,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
int i;
if (data)
- depth = per_cpu_ptr(data, iter->cpu)->depth;
+ depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
if (print_graph_prologue(iter, s, 0, 0))
return TRACE_TYPE_PARTIAL_LINE;
@@ -941,8 +990,33 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
+ struct ftrace_graph_ent_entry *field;
+ struct fgraph_data *data = iter->private;
struct trace_entry *entry = iter->ent;
struct trace_seq *s = &iter->seq;
+ int cpu = iter->cpu;
+ int ret;
+
+ if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
+ per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
+ return TRACE_TYPE_HANDLED;
+ }
+
+ /*
+ * If the last output failed, there's a possibility we need
+ * to print out the missing entry which would never go out.
+ */
+ if (data && data->failed) {
+ field = &data->ent;
+ iter->cpu = data->cpu;
+ ret = print_graph_entry(field, s, iter);
+ if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
+ per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
+ ret = TRACE_TYPE_NO_CONSUME;
+ }
+ iter->cpu = cpu;
+ return ret;
+ }
switch (entry->type) {
case TRACE_GRAPH_ENT: {
@@ -952,7 +1026,7 @@ print_graph_function(struct trace_iterator *iter)
* sizeof(struct ftrace_graph_ent_entry) is very small,
* it can be safely saved at the stack.
*/
- struct ftrace_graph_ent_entry *field, saved;
+ struct ftrace_graph_ent_entry saved;
trace_assign_type(field, entry);
saved = *field;
return print_graph_entry(&saved, s, iter);
@@ -1030,31 +1104,54 @@ static void print_graph_headers(struct seq_file *s)
static void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
- struct fgraph_data *data = alloc_percpu(struct fgraph_data);
+ struct fgraph_data *data;
int cpu;
+ iter->private = NULL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- pr_warning("function graph tracer: not enough memory\n");
- else
- for_each_possible_cpu(cpu) {
- pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
- *pid = -1;
- *depth = 0;
- }
+ goto out_err;
+
+ data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
+ if (!data->cpu_data)
+ goto out_err_free;
+
+ for_each_possible_cpu(cpu) {
+ pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
+ int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
+ *pid = -1;
+ *depth = 0;
+ *ignore = 0;
+ }
iter->private = data;
+
+ return;
+
+ out_err_free:
+ kfree(data);
+ out_err:
+ pr_warning("function graph tracer: not enough memory\n");
}
static void graph_trace_close(struct trace_iterator *iter)
{
- free_percpu(iter->private);
+ struct fgraph_data *data = iter->private;
+
+ if (data) {
+ free_percpu(data->cpu_data);
+ kfree(data);
+ }
}
static struct tracer graph_trace __read_mostly = {
.name = "function_graph",
.open = graph_trace_open,
+ .pipe_open = graph_trace_open,
.close = graph_trace_close,
+ .pipe_close = graph_trace_close,
.wait_pipe = poll_wait_pipe,
.init = graph_trace_init,
.reset = graph_trace_reset,
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index aff5f80b59b8..b52d397e57eb 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -606,23 +606,22 @@ static int create_trace_probe(int argc, char **argv)
*/
struct trace_probe *tp;
int i, ret = 0;
- int is_return = 0;
+ int is_return = 0, is_delete = 0;
char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
unsigned long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
- if (argc < 2) {
- pr_info("Probe point is not specified.\n");
- return -EINVAL;
- }
-
+ /* argc must be >= 1 */
if (argv[0][0] == 'p')
is_return = 0;
else if (argv[0][0] == 'r')
is_return = 1;
+ else if (argv[0][0] == '-')
+ is_delete = 1;
else {
- pr_info("Probe definition must be started with 'p' or 'r'.\n");
+ pr_info("Probe definition must be started with 'p', 'r' or"
+ " '-'.\n");
return -EINVAL;
}
@@ -642,7 +641,29 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
}
+ if (!group)
+ group = KPROBE_EVENT_SYSTEM;
+ if (is_delete) {
+ if (!event) {
+ pr_info("Delete command needs an event name.\n");
+ return -EINVAL;
+ }
+ tp = find_probe_event(event, group);
+ if (!tp) {
+ pr_info("Event %s/%s doesn't exist.\n", group, event);
+ return -ENOENT;
+ }
+ /* delete an event */
+ unregister_trace_probe(tp);
+ free_trace_probe(tp);
+ return 0;
+ }
+
+ if (argc < 2) {
+ pr_info("Probe point is not specified.\n");
+ return -EINVAL;
+ }
if (isdigit(argv[1][0])) {
if (is_return) {
pr_info("Return probe point must be a symbol.\n");
@@ -671,8 +692,6 @@ static int create_trace_probe(int argc, char **argv)
argc -= 2; argv += 2;
/* setup a probe */
- if (!group)
- group = KPROBE_EVENT_SYSTEM;
if (!event) {
/* Make a new event name */
if (symbol)
@@ -1114,7 +1133,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
struct trace_probe *tp = (struct trace_probe *)event_call->data;
ret = trace_define_common_fields(event_call);
- if (!ret)
+ if (ret)
return ret;
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
@@ -1132,7 +1151,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
struct trace_probe *tp = (struct trace_probe *)event_call->data;
ret = trace_define_common_fields(event_call);
- if (!ret)
+ if (ret)
return ret;
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
index ddfa0fd43bc0..acb87d4a4ac1 100644
--- a/kernel/trace/trace_ksym.c
+++ b/kernel/trace/trace_ksym.c
@@ -79,11 +79,12 @@ void ksym_collect_stats(unsigned long hbp_hit_addr)
}
#endif /* CONFIG_PROFILE_KSYM_TRACER */
-void ksym_hbp_handler(struct perf_event *hbp, void *data)
+void ksym_hbp_handler(struct perf_event *hbp, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
struct ring_buffer_event *event;
struct ksym_trace_entry *entry;
- struct pt_regs *regs = data;
struct ring_buffer *buffer;
int pc;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index b6c12c6a1bcd..8e46b3323cdc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -23,13 +23,21 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static int next_event_type = __TRACE_LAST_TYPE + 1;
-void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+int trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
+ int ret;
+
+ ret = seq_write(m, s->buffer, len);
- seq_write(m, s->buffer, len);
+ /*
+ * Only reset this buffer if we successfully wrote to the
+ * seq_file buffer.
+ */
+ if (!ret)
+ trace_seq_init(s);
- trace_seq_init(s);
+ return ret;
}
enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
@@ -85,7 +93,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
va_list ap;
int ret;
- if (!len)
+ if (s->full || !len)
return 0;
va_start(ap, fmt);
@@ -93,8 +101,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
va_end(ap);
/* If we can't write it all, don't bother writing anything */
- if (ret >= len)
+ if (ret >= len) {
+ s->full = 1;
return 0;
+ }
s->len += ret;
@@ -119,14 +129,16 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
int len = (PAGE_SIZE - 1) - s->len;
int ret;
- if (!len)
+ if (s->full || !len)
return 0;
ret = vsnprintf(s->buffer + s->len, len, fmt, args);
/* If we can't write it all, don't bother writing anything */
- if (ret >= len)
+ if (ret >= len) {
+ s->full = 1;
return 0;
+ }
s->len += ret;
@@ -139,14 +151,16 @@ int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
int len = (PAGE_SIZE - 1) - s->len;
int ret;
- if (!len)
+ if (s->full || !len)
return 0;
ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
/* If we can't write it all, don't bother writing anything */
- if (ret >= len)
+ if (ret >= len) {
+ s->full = 1;
return 0;
+ }
s->len += ret;
@@ -167,8 +181,13 @@ int trace_seq_puts(struct trace_seq *s, const char *str)
{
int len = strlen(str);
- if (len > ((PAGE_SIZE - 1) - s->len))
+ if (s->full)
+ return 0;
+
+ if (len > ((PAGE_SIZE - 1) - s->len)) {
+ s->full = 1;
return 0;
+ }
memcpy(s->buffer + s->len, str, len);
s->len += len;
@@ -178,9 +197,14 @@ int trace_seq_puts(struct trace_seq *s, const char *str)
int trace_seq_putc(struct trace_seq *s, unsigned char c)
{
- if (s->len >= (PAGE_SIZE - 1))
+ if (s->full)
return 0;
+ if (s->len >= (PAGE_SIZE - 1)) {
+ s->full = 1;
+ return 0;
+ }
+
s->buffer[s->len++] = c;
return 1;
@@ -188,9 +212,14 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
{
- if (len > ((PAGE_SIZE - 1) - s->len))
+ if (s->full)
return 0;
+ if (len > ((PAGE_SIZE - 1) - s->len)) {
+ s->full = 1;
+ return 0;
+ }
+
memcpy(s->buffer + s->len, mem, len);
s->len += len;
@@ -203,6 +232,9 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
const unsigned char *data = mem;
int i, j;
+ if (s->full)
+ return 0;
+
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < len; i++) {
#else
@@ -220,8 +252,13 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len)
{
void *ret;
- if (len > ((PAGE_SIZE - 1) - s->len))
+ if (s->full)
+ return 0;
+
+ if (len > ((PAGE_SIZE - 1) - s->len)) {
+ s->full = 1;
return NULL;
+ }
ret = s->buffer + s->len;
s->len += len;
@@ -233,8 +270,14 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
{
unsigned char *p;
- if (s->len >= (PAGE_SIZE - 1))
+ if (s->full)
+ return 0;
+
+ if (s->len >= (PAGE_SIZE - 1)) {
+ s->full = 1;
return 0;
+ }
+
p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
if (!IS_ERR(p)) {
p = mangle_path(s->buffer + s->len, p, "\n");
@@ -247,6 +290,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
return 1;
}
+ s->full = 1;
return 0;
}
@@ -373,6 +417,9 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
unsigned long vmstart = 0;
int ret = 1;
+ if (s->full)
+ return 0;
+
if (mm) {
const struct vm_area_struct *vma;
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
new file mode 100644
index 000000000000..eb27fd3430a2
--- /dev/null
+++ b/kernel/user-return-notifier.c
@@ -0,0 +1,44 @@
+
+#include <linux/user-return-notifier.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
+
+/*
+ * Request a notification when the current cpu returns to userspace. Must be
+ * called in atomic context. The notifier will also be called in atomic
+ * context.
+ */
+void user_return_notifier_register(struct user_return_notifier *urn)
+{
+ set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
+ hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
+}
+EXPORT_SYMBOL_GPL(user_return_notifier_register);
+
+/*
+ * Removes a registered user return notifier. Must be called from atomic
+ * context, and from the same cpu registration occured in.
+ */
+void user_return_notifier_unregister(struct user_return_notifier *urn)
+{
+ hlist_del(&urn->link);
+ if (hlist_empty(&__get_cpu_var(return_notifier_list)))
+ clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
+}
+EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
+
+/* Calls registered user return notifiers */
+void fire_user_return_notifiers(void)
+{
+ struct user_return_notifier *urn;
+ struct hlist_node *tmp1, *tmp2;
+ struct hlist_head *head;
+
+ head = &get_cpu_var(return_notifier_list);
+ hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link)
+ urn->on_user_return(urn);
+ put_cpu_var(return_notifier_list);
+}
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 69eae358a726..a2cd77e70d4d 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -57,78 +57,47 @@ static int proc_do_uts_string(ctl_table *table, int write,
#define proc_do_uts_string NULL
#endif
-
-#ifdef CONFIG_SYSCTL_SYSCALL
-/* The generic string strategy routine: */
-static int sysctl_uts_string(ctl_table *table,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- struct ctl_table uts_table;
- int r, write;
- write = newval && newlen;
- memcpy(&uts_table, table, sizeof(uts_table));
- uts_table.data = get_uts(table, write);
- r = sysctl_string(&uts_table, oldval, oldlenp, newval, newlen);
- put_uts(table, write, uts_table.data);
- return r;
-}
-#else
-#define sysctl_uts_string NULL
-#endif
-
static struct ctl_table uts_kern_table[] = {
{
- .ctl_name = KERN_OSTYPE,
.procname = "ostype",
.data = init_uts_ns.name.sysname,
.maxlen = sizeof(init_uts_ns.name.sysname),
.mode = 0444,
.proc_handler = proc_do_uts_string,
- .strategy = sysctl_uts_string,
},
{
- .ctl_name = KERN_OSRELEASE,
.procname = "osrelease",
.data = init_uts_ns.name.release,
.maxlen = sizeof(init_uts_ns.name.release),
.mode = 0444,
.proc_handler = proc_do_uts_string,
- .strategy = sysctl_uts_string,
},
{
- .ctl_name = KERN_VERSION,
.procname = "version",
.data = init_uts_ns.name.version,
.maxlen = sizeof(init_uts_ns.name.version),
.mode = 0444,
.proc_handler = proc_do_uts_string,
- .strategy = sysctl_uts_string,
},
{
- .ctl_name = KERN_NODENAME,
.procname = "hostname",
.data = init_uts_ns.name.nodename,
.maxlen = sizeof(init_uts_ns.name.nodename),
.mode = 0644,
.proc_handler = proc_do_uts_string,
- .strategy = sysctl_uts_string,
},
{
- .ctl_name = KERN_DOMAINNAME,
.procname = "domainname",
.data = init_uts_ns.name.domainname,
.maxlen = sizeof(init_uts_ns.name.domainname),
.mode = 0644,
.proc_handler = proc_do_uts_string,
- .strategy = sysctl_uts_string,
},
{}
};
static struct ctl_table uts_root_table[] = {
{
- .ctl_name = CTL_KERN,
.procname = "kernel",
.mode = 0555,
.child = uts_kern_table,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 67e526b6ae81..dee48658805c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,6 +68,116 @@ struct workqueue_struct {
#endif
};
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+
+static struct debug_obj_descr work_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int work_fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct work_struct *work = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ cancel_work_sync(work);
+ debug_object_init(work, &work_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int work_fixup_activate(void *addr, enum debug_obj_state state)
+{
+ struct work_struct *work = addr;
+
+ switch (state) {
+
+ case ODEBUG_STATE_NOTAVAILABLE:
+ /*
+ * This is not really a fixup. The work struct was
+ * statically initialized. We just make sure that it
+ * is tracked in the object tracker.
+ */
+ if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {
+ debug_object_init(work, &work_debug_descr);
+ debug_object_activate(work, &work_debug_descr);
+ return 0;
+ }
+ WARN_ON_ONCE(1);
+ return 0;
+
+ case ODEBUG_STATE_ACTIVE:
+ WARN_ON(1);
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int work_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct work_struct *work = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ cancel_work_sync(work);
+ debug_object_free(work, &work_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static struct debug_obj_descr work_debug_descr = {
+ .name = "work_struct",
+ .fixup_init = work_fixup_init,
+ .fixup_activate = work_fixup_activate,
+ .fixup_free = work_fixup_free,
+};
+
+static inline void debug_work_activate(struct work_struct *work)
+{
+ debug_object_activate(work, &work_debug_descr);
+}
+
+static inline void debug_work_deactivate(struct work_struct *work)
+{
+ debug_object_deactivate(work, &work_debug_descr);
+}
+
+void __init_work(struct work_struct *work, int onstack)
+{
+ if (onstack)
+ debug_object_init_on_stack(work, &work_debug_descr);
+ else
+ debug_object_init(work, &work_debug_descr);
+}
+EXPORT_SYMBOL_GPL(__init_work);
+
+void destroy_work_on_stack(struct work_struct *work)
+{
+ debug_object_free(work, &work_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_work_on_stack);
+
+#else
+static inline void debug_work_activate(struct work_struct *work) { }
+static inline void debug_work_deactivate(struct work_struct *work) { }
+#endif
+
/* Serializes the accesses to the list of workqueues. */
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
@@ -145,6 +255,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
{
unsigned long flags;
+ debug_work_activate(work);
spin_lock_irqsave(&cwq->lock, flags);
insert_work(cwq, work, &cwq->worklist);
spin_unlock_irqrestore(&cwq->lock, flags);
@@ -280,6 +391,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
struct lockdep_map lockdep_map = work->lockdep_map;
#endif
trace_workqueue_execution(cwq->thread, work);
+ debug_work_deactivate(work);
cwq->current_work = work;
list_del_init(cwq->worklist.next);
spin_unlock_irq(&cwq->lock);
@@ -350,11 +462,18 @@ static void wq_barrier_func(struct work_struct *work)
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
struct wq_barrier *barr, struct list_head *head)
{
- INIT_WORK(&barr->work, wq_barrier_func);
+ /*
+ * debugobject calls are safe here even with cwq->lock locked
+ * as we know for sure that this will not trigger any of the
+ * checks and call back into the fixup functions where we
+ * might deadlock.
+ */
+ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
init_completion(&barr->done);
+ debug_work_activate(&barr->work);
insert_work(cwq, &barr->work, head);
}
@@ -372,8 +491,10 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
}
spin_unlock_irq(&cwq->lock);
- if (active)
+ if (active) {
wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ }
return active;
}
@@ -451,6 +572,7 @@ out:
return 0;
wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
return 1;
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -485,6 +607,7 @@ static int try_to_grab_pending(struct work_struct *work)
*/
smp_rmb();
if (cwq == get_wq_data(work)) {
+ debug_work_deactivate(work);
list_del_init(&work->entry);
ret = 1;
}
@@ -507,8 +630,10 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
}
spin_unlock_irq(&cwq->lock);
- if (unlikely(running))
+ if (unlikely(running)) {
wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ }
}
static void wait_on_work(struct work_struct *work)