summaryrefslogtreecommitdiffstats
path: root/kernel/printk/printk.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-09-01 18:41:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-01 18:41:13 -0700
commitdf43d903828c59afb9e93b59835127a02e1f8144 (patch)
tree39be64267622b2efe8e2fc39298a4245d76f2337 /kernel/printk/printk.c
parent9e5f3ffcf1cb34e7c7beb3f79a96f58536730924 (diff)
parentc985aafb60e972c0a6b8d0bd65e03af5890b748a (diff)
downloadlinux-df43d903828c59afb9e93b59835127a02e1f8144.tar.bz2
Merge tag 'printk-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux
Pull printk updates from Petr Mladek: - Optionally, provide an index of possible printk messages via <debugfs>/printk/index/. It can be used when monitoring important kernel messages on a farm of various hosts. The monitor has to be updated when some messages has changed or are not longer available by a newly deployed kernel. - Add printk.console_no_auto_verbose boot parameter. It allows to generate crash dump even with slow consoles in a reasonable time frame. - Remove printk_safe buffers. The messages are always stored directly to the main logbuffer, even in NMI or recursive context. Also it allows to serialize syslog operations by a mutex instead of a spin lock. - Misc clean up and build fixes. * tag 'printk-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux: printk/index: Fix -Wunused-function warning lib/nmi_backtrace: Serialize even messages about idle CPUs printk: Add printk.console_no_auto_verbose boot parameter printk: Remove console_silent() lib/test_scanf: Handle n_bits == 0 in random tests printk: syslog: close window between wait and read printk: convert @syslog_lock to mutex printk: remove NMI tracking printk: remove safe buffers printk: track/limit recursion lib/nmi_backtrace: explicitly serialize banner and regs printk: Move the printk() kerneldoc comment to its new home printk/index: Fix warning about missing prototypes MIPS/asm/printk: Fix build failure caused by printk printk: index: Add indexing support to dev_printk printk: Userspace format indexing support printk: Rework parse_prefix into printk_parse_prefix printk: Straighten out log_flags into printk_info_flags string_helpers: Escape double quotes in escape_special printk/console: Check consistent sequence number when handling race in console_unlock()
Diffstat (limited to 'kernel/printk/printk.c')
-rw-r--r--kernel/printk/printk.c361
1 files changed, 204 insertions, 157 deletions
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 142a58d124d9..825277e1e742 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -350,13 +350,8 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
* non-prinatable characters are escaped in the "\xff" notation.
*/
-enum log_flags {
- LOG_NEWLINE = 2, /* text ended with a newline */
- LOG_CONT = 8, /* text is a fragment of a continuation line */
-};
-
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
-static DEFINE_RAW_SPINLOCK(syslog_lock);
+static DEFINE_MUTEX(syslog_lock);
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
@@ -732,27 +727,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
if (ret)
return ret;
- printk_safe_enter_irq();
if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
- printk_safe_exit_irq();
goto out;
}
- printk_safe_exit_irq();
ret = wait_event_interruptible(log_wait,
prb_read_valid(prb, atomic64_read(&user->seq), r));
if (ret)
goto out;
- printk_safe_enter_irq();
}
if (r->info->seq != atomic64_read(&user->seq)) {
/* our last seen message is gone, return error and reset */
atomic64_set(&user->seq, r->info->seq);
ret = -EPIPE;
- printk_safe_exit_irq();
goto out;
}
@@ -762,7 +752,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
&r->info->dev_info);
atomic64_set(&user->seq, r->info->seq + 1);
- printk_safe_exit_irq();
if (len > count) {
ret = -EINVAL;
@@ -797,7 +786,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
if (offset)
return -ESPIPE;
- printk_safe_enter_irq();
switch (whence) {
case SEEK_SET:
/* the first record */
@@ -818,7 +806,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
default:
ret = -EINVAL;
}
- printk_safe_exit_irq();
return ret;
}
@@ -833,7 +820,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
poll_wait(file, &log_wait, wait);
- printk_safe_enter_irq();
if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
/* return error when data has vanished underneath us */
if (info.seq != atomic64_read(&user->seq))
@@ -841,7 +827,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
else
ret = EPOLLIN|EPOLLRDNORM;
}
- printk_safe_exit_irq();
return ret;
}
@@ -874,9 +859,7 @@ static int devkmsg_open(struct inode *inode, struct file *file)
prb_rec_init_rd(&user->record, &user->info,
&user->text_buf[0], sizeof(user->text_buf));
- printk_safe_enter_irq();
atomic64_set(&user->seq, prb_first_valid_seq(prb));
- printk_safe_exit_irq();
file->private_data = user;
return 0;
@@ -1042,9 +1025,6 @@ static inline void log_buf_add_cpu(void) {}
static void __init set_percpu_data_ready(void)
{
- printk_safe_init();
- /* Make sure we set this flag only after printk_safe() init is done */
- barrier();
__printk_percpu_data_ready = true;
}
@@ -1082,6 +1062,7 @@ void __init setup_log_buf(int early)
struct prb_desc *new_descs;
struct printk_info info;
struct printk_record r;
+ unsigned int text_size;
size_t new_descs_size;
size_t new_infos_size;
unsigned long flags;
@@ -1142,24 +1123,37 @@ void __init setup_log_buf(int early)
new_descs, ilog2(new_descs_count),
new_infos);
- printk_safe_enter_irqsave(flags);
+ local_irq_save(flags);
log_buf_len = new_log_buf_len;
log_buf = new_log_buf;
new_log_buf_len = 0;
free = __LOG_BUF_LEN;
- prb_for_each_record(0, &printk_rb_static, seq, &r)
- free -= add_to_rb(&printk_rb_dynamic, &r);
+ prb_for_each_record(0, &printk_rb_static, seq, &r) {
+ text_size = add_to_rb(&printk_rb_dynamic, &r);
+ if (text_size > free)
+ free = 0;
+ else
+ free -= text_size;
+ }
- /*
- * This is early enough that everything is still running on the
- * boot CPU and interrupts are disabled. So no new messages will
- * appear during the transition to the dynamic buffer.
- */
prb = &printk_rb_dynamic;
- printk_safe_exit_irqrestore(flags);
+ local_irq_restore(flags);
+
+ /*
+ * Copy any remaining messages that might have appeared from
+ * NMI context after copying but before switching to the
+ * dynamic buffer.
+ */
+ prb_for_each_record(seq, &printk_rb_static, seq, &r) {
+ text_size = add_to_rb(&printk_rb_dynamic, &r);
+ if (text_size > free)
+ free = 0;
+ else
+ free -= text_size;
+ }
if (seq != prb_next_seq(&printk_rb_static)) {
pr_err("dropped %llu messages\n",
@@ -1481,12 +1475,14 @@ static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
return seq;
}
+/* The caller is responsible for making sure @size is greater than 0. */
static int syslog_print(char __user *buf, int size)
{
struct printk_info info;
struct printk_record r;
char *text;
int len = 0;
+ u64 seq;
text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
if (!text)
@@ -1494,17 +1490,35 @@ static int syslog_print(char __user *buf, int size)
prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
- while (size > 0) {
+ mutex_lock(&syslog_lock);
+
+ /*
+ * Wait for the @syslog_seq record to be available. @syslog_seq may
+ * change while waiting.
+ */
+ do {
+ seq = syslog_seq;
+
+ mutex_unlock(&syslog_lock);
+ len = wait_event_interruptible(log_wait, prb_read_valid(prb, seq, NULL));
+ mutex_lock(&syslog_lock);
+
+ if (len)
+ goto out;
+ } while (syslog_seq != seq);
+
+ /*
+ * Copy records that fit into the buffer. The above cycle makes sure
+ * that the first record is always available.
+ */
+ do {
size_t n;
size_t skip;
+ int err;
- printk_safe_enter_irq();
- raw_spin_lock(&syslog_lock);
- if (!prb_read_valid(prb, syslog_seq, &r)) {
- raw_spin_unlock(&syslog_lock);
- printk_safe_exit_irq();
+ if (!prb_read_valid(prb, syslog_seq, &r))
break;
- }
+
if (r.info->seq != syslog_seq) {
/* message is gone, move to next valid one */
syslog_seq = r.info->seq;
@@ -1531,13 +1545,15 @@ static int syslog_print(char __user *buf, int size)
syslog_partial += n;
} else
n = 0;
- raw_spin_unlock(&syslog_lock);
- printk_safe_exit_irq();
if (!n)
break;
- if (copy_to_user(buf, text + skip, n)) {
+ mutex_unlock(&syslog_lock);
+ err = copy_to_user(buf, text + skip, n);
+ mutex_lock(&syslog_lock);
+
+ if (err) {
if (!len)
len = -EFAULT;
break;
@@ -1546,8 +1562,9 @@ static int syslog_print(char __user *buf, int size)
len += n;
size -= n;
buf += n;
- }
-
+ } while (size);
+out:
+ mutex_unlock(&syslog_lock);
kfree(text);
return len;
}
@@ -1566,7 +1583,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
return -ENOMEM;
time = printk_time;
- printk_safe_enter_irq();
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
@@ -1587,23 +1603,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
break;
}
- printk_safe_exit_irq();
if (copy_to_user(buf + len, text, textlen))
len = -EFAULT;
else
len += textlen;
- printk_safe_enter_irq();
if (len < 0)
break;
}
if (clear) {
- raw_spin_lock(&syslog_lock);
+ mutex_lock(&syslog_lock);
latched_seq_write(&clear_seq, seq);
- raw_spin_unlock(&syslog_lock);
+ mutex_unlock(&syslog_lock);
}
- printk_safe_exit_irq();
kfree(text);
return len;
@@ -1611,23 +1624,9 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
static void syslog_clear(void)
{
- printk_safe_enter_irq();
- raw_spin_lock(&syslog_lock);
+ mutex_lock(&syslog_lock);
latched_seq_write(&clear_seq, prb_next_seq(prb));
- raw_spin_unlock(&syslog_lock);
- printk_safe_exit_irq();
-}
-
-/* Return a consistent copy of @syslog_seq. */
-static u64 read_syslog_seq_irq(void)
-{
- u64 seq;
-
- raw_spin_lock_irq(&syslog_lock);
- seq = syslog_seq;
- raw_spin_unlock_irq(&syslog_lock);
-
- return seq;
+ mutex_unlock(&syslog_lock);
}
int do_syslog(int type, char __user *buf, int len, int source)
@@ -1653,11 +1652,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
return 0;
if (!access_ok(buf, len))
return -EFAULT;
-
- error = wait_event_interruptible(log_wait,
- prb_read_valid(prb, read_syslog_seq_irq(), NULL));
- if (error)
- return error;
error = syslog_print(buf, len);
break;
/* Read/clear last kernel messages */
@@ -1703,12 +1697,10 @@ int do_syslog(int type, char __user *buf, int len, int source)
break;
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
- printk_safe_enter_irq();
- raw_spin_lock(&syslog_lock);
+ mutex_lock(&syslog_lock);
if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
/* No unread messages. */
- raw_spin_unlock(&syslog_lock);
- printk_safe_exit_irq();
+ mutex_unlock(&syslog_lock);
return 0;
}
if (info.seq != syslog_seq) {
@@ -1736,8 +1728,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
}
error -= syslog_partial;
}
- raw_spin_unlock(&syslog_lock);
- printk_safe_exit_irq();
+ mutex_unlock(&syslog_lock);
break;
/* Size of the log buffer */
case SYSLOG_ACTION_SIZE_BUFFER:
@@ -1940,6 +1931,76 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
}
}
+/*
+ * Recursion is tracked separately on each CPU. If NMIs are supported, an
+ * additional NMI context per CPU is also separately tracked. Until per-CPU
+ * is available, a separate "early tracking" is performed.
+ */
+static DEFINE_PER_CPU(u8, printk_count);
+static u8 printk_count_early;
+#ifdef CONFIG_HAVE_NMI
+static DEFINE_PER_CPU(u8, printk_count_nmi);
+static u8 printk_count_nmi_early;
+#endif
+
+/*
+ * Recursion is limited to keep the output sane. printk() should not require
+ * more than 1 level of recursion (allowing, for example, printk() to trigger
+ * a WARN), but a higher value is used in case some printk-internal errors
+ * exist, such as the ringbuffer validation checks failing.
+ */
+#define PRINTK_MAX_RECURSION 3
+
+/*
+ * Return a pointer to the dedicated counter for the CPU+context of the
+ * caller.
+ */
+static u8 *__printk_recursion_counter(void)
+{
+#ifdef CONFIG_HAVE_NMI
+ if (in_nmi()) {
+ if (printk_percpu_data_ready())
+ return this_cpu_ptr(&printk_count_nmi);
+ return &printk_count_nmi_early;
+ }
+#endif
+ if (printk_percpu_data_ready())
+ return this_cpu_ptr(&printk_count);
+ return &printk_count_early;
+}
+
+/*
+ * Enter recursion tracking. Interrupts are disabled to simplify tracking.
+ * The caller must check the boolean return value to see if the recursion is
+ * allowed. On failure, interrupts are not disabled.
+ *
+ * @recursion_ptr must be a variable of type (u8 *) and is the same variable
+ * that is passed to printk_exit_irqrestore().
+ */
+#define printk_enter_irqsave(recursion_ptr, flags) \
+({ \
+ bool success = true; \
+ \
+ typecheck(u8 *, recursion_ptr); \
+ local_irq_save(flags); \
+ (recursion_ptr) = __printk_recursion_counter(); \
+ if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
+ local_irq_restore(flags); \
+ success = false; \
+ } else { \
+ (*(recursion_ptr))++; \
+ } \
+ success; \
+})
+
+/* Exit recursion tracking, restoring interrupts. */
+#define printk_exit_irqrestore(recursion_ptr, flags) \
+ do { \
+ typecheck(u8 *, recursion_ptr); \
+ (*(recursion_ptr))--; \
+ local_irq_restore(flags); \
+ } while (0)
+
int printk_delay_msec __read_mostly;
static inline void printk_delay(void)
@@ -1961,23 +2022,24 @@ static inline u32 printk_caller_id(void)
}
/**
- * parse_prefix - Parse level and control flags.
+ * printk_parse_prefix - Parse level and control flags.
*
* @text: The terminated text message.
* @level: A pointer to the current level value, will be updated.
- * @lflags: A pointer to the current log flags, will be updated.
+ * @flags: A pointer to the current printk_info flags, will be updated.
*
* @level may be NULL if the caller is not interested in the parsed value.
* Otherwise the variable pointed to by @level must be set to
* LOGLEVEL_DEFAULT in order to be updated with the parsed value.
*
- * @lflags may be NULL if the caller is not interested in the parsed value.
- * Otherwise the variable pointed to by @lflags will be OR'd with the parsed
+ * @flags may be NULL if the caller is not interested in the parsed value.
+ * Otherwise the variable pointed to by @flags will be OR'd with the parsed
* value.
*
* Return: The length of the parsed level and control flags.
*/
-static u16 parse_prefix(char *text, int *level, enum log_flags *lflags)
+u16 printk_parse_prefix(const char *text, int *level,
+ enum printk_info_flags *flags)
{
u16 prefix_len = 0;
int kern_level;
@@ -1993,8 +2055,8 @@ static u16 parse_prefix(char *text, int *level, enum log_flags *lflags)
*level = kern_level - '0';
break;
case 'c': /* KERN_CONT */
- if (lflags)
- *lflags |= LOG_CONT;
+ if (flags)
+ *flags |= LOG_CONT;
}
prefix_len += 2;
@@ -2004,8 +2066,9 @@ static u16 parse_prefix(char *text, int *level, enum log_flags *lflags)
return prefix_len;
}
-static u16 printk_sprint(char *text, u16 size, int facility, enum log_flags *lflags,
- const char *fmt, va_list args)
+static u16 printk_sprint(char *text, u16 size, int facility,
+ enum printk_info_flags *flags, const char *fmt,
+ va_list args)
{
u16 text_len;
@@ -2014,14 +2077,14 @@ static u16 printk_sprint(char *text, u16 size, int facility, enum log_flags *lfl
/* Mark and strip a trailing newline. */
if (text_len && text[text_len - 1] == '\n') {
text_len--;
- *lflags |= LOG_NEWLINE;
+ *flags |= LOG_NEWLINE;
}
/* Strip log level and control flags. */
if (facility == 0) {
u16 prefix_len;
- prefix_len = parse_prefix(text, NULL, NULL);
+ prefix_len = printk_parse_prefix(text, NULL, NULL);
if (prefix_len) {
text_len -= prefix_len;
memmove(text, text + prefix_len, text_len);
@@ -2038,13 +2101,16 @@ int vprintk_store(int facility, int level,
{
const u32 caller_id = printk_caller_id();
struct prb_reserved_entry e;
- enum log_flags lflags = 0;
+ enum printk_info_flags flags = 0;
struct printk_record r;
+ unsigned long irqflags;
u16 trunc_msg_len = 0;
char prefix_buf[8];
+ u8 *recursion_ptr;
u16 reserve_size;
va_list args2;
u16 text_len;
+ int ret = 0;
u64 ts_nsec;
/*
@@ -2055,6 +2121,9 @@ int vprintk_store(int facility, int level,
*/
ts_nsec = local_clock();
+ if (!printk_enter_irqsave(recursion_ptr, irqflags))
+ return 0;
+
/*
* The sprintf needs to come first since the syslog prefix might be
* passed in as a parameter. An extra byte must be reserved so that
@@ -2070,29 +2139,30 @@ int vprintk_store(int facility, int level,
/* Extract log level or control flags. */
if (facility == 0)
- parse_prefix(&prefix_buf[0], &level, &lflags);
+ printk_parse_prefix(&prefix_buf[0], &level, &flags);
if (level == LOGLEVEL_DEFAULT)
level = default_message_loglevel;
if (dev_info)
- lflags |= LOG_NEWLINE;
+ flags |= LOG_NEWLINE;
- if (lflags & LOG_CONT) {
+ if (flags & LOG_CONT) {
prb_rec_init_wr(&r, reserve_size);
if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
- facility, &lflags, fmt, args);
+ facility, &flags, fmt, args);
r.info->text_len += text_len;
- if (lflags & LOG_NEWLINE) {
+ if (flags & LOG_NEWLINE) {
r.info->flags |= LOG_NEWLINE;
prb_final_commit(&e);
} else {
prb_commit(&e);
}
- return text_len;
+ ret = text_len;
+ goto out;
}
}
@@ -2108,29 +2178,32 @@ int vprintk_store(int facility, int level,
prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
if (!prb_reserve(&e, prb, &r))
- return 0;
+ goto out;
}
/* fill message */
- text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &lflags, fmt, args);
+ text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
if (trunc_msg_len)
memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
r.info->text_len = text_len + trunc_msg_len;
r.info->facility = facility;
r.info->level = level & 7;
- r.info->flags = lflags & 0x1f;
+ r.info->flags = flags & 0x1f;
r.info->ts_nsec = ts_nsec;
r.info->caller_id = caller_id;
if (dev_info)
memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
/* A message without a trailing newline can be continued. */
- if (!(lflags & LOG_NEWLINE))
+ if (!(flags & LOG_NEWLINE))
prb_commit(&e);
else
prb_final_commit(&e);
- return (text_len + trunc_msg_len);
+ ret = text_len + trunc_msg_len;
+out:
+ printk_exit_irqrestore(recursion_ptr, irqflags);
+ return ret;
}
asmlinkage int vprintk_emit(int facility, int level,
@@ -2139,7 +2212,6 @@ asmlinkage int vprintk_emit(int facility, int level,
{
int printed_len;
bool in_sched = false;
- unsigned long flags;
/* Suppress unimportant messages after panic happens */
if (unlikely(suppress_printk))
@@ -2153,9 +2225,7 @@ asmlinkage int vprintk_emit(int facility, int level,
boot_delay_msec(level);
printk_delay();
- printk_safe_enter_irqsave(flags);
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
- printk_safe_exit_irqrestore(flags);
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
@@ -2186,28 +2256,7 @@ int vprintk_default(const char *fmt, va_list args)
}
EXPORT_SYMBOL_GPL(vprintk_default);
-/**
- * printk - print a kernel message
- * @fmt: format string
- *
- * This is printk(). It can be called from any context. We want it to work.
- *
- * We try to grab the console_lock. If we succeed, it's easy - we log the
- * output and call the console drivers. If we fail to get the semaphore, we
- * place the output into the log buffer and return. The current holder of
- * the console_sem will notice the new output in console_unlock(); and will
- * send it to the consoles before releasing the lock.
- *
- * One effect of this deferred printing is that code which calls printk() and
- * then changes console_loglevel may break. This is because console_loglevel
- * is inspected when the actual printing occurs.
- *
- * See also:
- * printf(3)
- *
- * See the vsnprintf() documentation for format string extensions over C99.
- */
-asmlinkage __visible int printk(const char *fmt, ...)
+asmlinkage __visible int _printk(const char *fmt, ...)
{
va_list args;
int r;
@@ -2218,7 +2267,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
return r;
}
-EXPORT_SYMBOL(printk);
+EXPORT_SYMBOL(_printk);
#else /* CONFIG_PRINTK */
@@ -2404,6 +2453,18 @@ module_param_named(console_suspend, console_suspend_enabled,
MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
" and hibernate operations");
+static bool printk_console_no_auto_verbose;
+
+void console_verbose(void)
+{
+ if (console_loglevel && !printk_console_no_auto_verbose)
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+EXPORT_SYMBOL_GPL(console_verbose);
+
+module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
+MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
+
/**
* suspend_console - suspend the console subsystem
*
@@ -2545,6 +2606,7 @@ void console_unlock(void)
bool do_cond_resched, retry;
struct printk_info info;
struct printk_record r;
+ u64 __maybe_unused next_seq;
if (console_suspended) {
up_console_sem();
@@ -2584,9 +2646,9 @@ again:
for (;;) {
size_t ext_len = 0;
+ int handover;
size_t len;
- printk_safe_enter_irqsave(flags);
skip:
if (!prb_read_valid(prb, console_seq, &r))
break;
@@ -2636,26 +2698,31 @@ skip:
* were to occur on another CPU, it may wait for this one to
* finish. This task can not be preempted if there is a
* waiter waiting to take over.
+ *
+ * Interrupts are disabled because the hand over to a waiter
+ * must not be interrupted until the hand over is completed
+ * (@console_waiter is cleared).
*/
+ printk_safe_enter_irqsave(flags);
console_lock_spinning_enable();
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(ext_text, ext_len, text, len);
start_critical_timings();
- if (console_lock_spinning_disable_and_check()) {
- printk_safe_exit_irqrestore(flags);
- return;
- }
-
+ handover = console_lock_spinning_disable_and_check();
printk_safe_exit_irqrestore(flags);
+ if (handover)
+ return;
if (do_cond_resched)
cond_resched();
}
- console_locked = 0;
+ /* Get consistent value of the next-to-be-used sequence number. */
+ next_seq = console_seq;
+ console_locked = 0;
up_console_sem();
/*
@@ -2664,9 +2731,7 @@ skip:
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
- retry = prb_read_valid(prb, console_seq, NULL);
- printk_safe_exit_irqrestore(flags);
-
+ retry = prb_read_valid(prb, next_seq, NULL);
if (retry && console_trylock())
goto again;
}
@@ -2728,13 +2793,8 @@ void console_flush_on_panic(enum con_flush_mode mode)
console_trylock();
console_may_schedule = 0;
- if (mode == CONSOLE_REPLAY_ALL) {
- unsigned long flags;
-
- printk_safe_enter_irqsave(flags);
+ if (mode == CONSOLE_REPLAY_ALL)
console_seq = prb_first_valid_seq(prb);
- printk_safe_exit_irqrestore(flags);
- }
console_unlock();
}
@@ -2869,7 +2929,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified)
*/
void register_console(struct console *newcon)
{
- unsigned long flags;
struct console *bcon = NULL;
int err;
@@ -2974,9 +3033,9 @@ void register_console(struct console *newcon)
exclusive_console_stop_seq = console_seq;
/* Get a consistent copy of @syslog_seq. */
- raw_spin_lock_irqsave(&syslog_lock, flags);
+ mutex_lock(&syslog_lock);
console_seq = syslog_seq;
- raw_spin_unlock_irqrestore(&syslog_lock, flags);
+ mutex_unlock(&syslog_lock);
}
console_unlock();
console_sysfs_notify();
@@ -3203,7 +3262,7 @@ int vprintk_deferred(const char *fmt, va_list args)
return r;
}
-int printk_deferred(const char *fmt, ...)
+int _printk_deferred(const char *fmt, ...)
{
va_list args;
int r;
@@ -3386,14 +3445,12 @@ bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
struct printk_info info;
unsigned int line_count;
struct printk_record r;
- unsigned long flags;
size_t l = 0;
bool ret = false;
if (iter->cur_seq < min_seq)
iter->cur_seq = min_seq;
- printk_safe_enter_irqsave(flags);
prb_rec_init_rd(&r, &info, line, size);
/* Read text or count text lines? */
@@ -3414,7 +3471,6 @@ bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
iter->cur_seq = r.info->seq + 1;
ret = true;
out:
- printk_safe_exit_irqrestore(flags);
if (len)
*len = l;
return ret;
@@ -3446,7 +3502,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
u64 min_seq = latched_seq_read_nolock(&clear_seq);
struct printk_info info;
struct printk_record r;
- unsigned long flags;
u64 seq;
u64 next_seq;
size_t len = 0;
@@ -3459,7 +3514,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
if (iter->cur_seq < min_seq)
iter->cur_seq = min_seq;
- printk_safe_enter_irqsave(flags);
if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
if (info.seq != iter->cur_seq) {
/* messages are gone, move to first available one */
@@ -3468,10 +3522,8 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
}
/* last entry */
- if (iter->cur_seq >= iter->next_seq) {
- printk_safe_exit_irqrestore(flags);
+ if (iter->cur_seq >= iter->next_seq)
goto out;
- }
/*
* Find first record that fits, including all following records,
@@ -3503,7 +3555,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
iter->next_seq = next_seq;
ret = true;
- printk_safe_exit_irqrestore(flags);
out:
if (len_out)
*len_out = len;
@@ -3521,12 +3572,8 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
*/
void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
{
- unsigned long flags;
-
- printk_safe_enter_irqsave(flags);
iter->cur_seq = latched_seq_read_nolock(&clear_seq);
iter->next_seq = prb_next_seq(prb);
- printk_safe_exit_irqrestore(flags);
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);