From: John Ogness <john.ogness@linutronix.de> Date: Fri, 22 Sep 2023 17:35:04 +0000 Subject: [PATCH 46/48] printk: Add kthread for all legacy consoles The write callback of legacy consoles makes use of spinlocks. This is not permitted with PREEMPT_RT in atomic contexts. For PREEMPT_RT, create a new kthread to handle printing of all the legacy consoles (and nbcon consoles if boot consoles are registered). Since, if running from the kthread, the consoles are printing in a task context, the legacy nbcon printing can use the device_lock(), write_thread(), device_unlock() callbacks for printing. Introduce the macro force_printkthreads() to query if the forced threading of legacy consoles is in effect. These changes only affect CONFIG_PREEMPT_RT. Signed-off-by: John Ogness <john.ogness@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- kernel/printk/internal.h | 20 +++ kernel/printk/nbcon.c | 50 +++++---- kernel/printk/printk.c | 252 +++++++++++++++++++++++++++++++++++++---------- 3 files changed, 251 insertions(+), 71 deletions(-) Index: linux-6.8.2-rt10/kernel/printk/internal.h =================================================================== @ linux-6.8.2-rt10/kernel/printk/internal.h:24 @ int devkmsg_sysctl_set_loglvl(struct ctl (con->flags & CON_BOOT) ? "boot" : "", \ con->name, con->index, ##__VA_ARGS__) +#ifdef CONFIG_PREEMPT_RT +# define force_printkthreads() (true) +#else +# define force_printkthreads() (false) +#endif + #ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK_CALLER @ linux-6.8.2-rt10/kernel/printk/internal.h:99 @ void nbcon_free(struct console *con); enum nbcon_prio nbcon_get_default_prio(void); void nbcon_atomic_flush_pending(void); bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie); + int cookie, bool use_atomic); void nbcon_kthread_create(struct console *con); void nbcon_wake_threads(void); +void nbcon_legacy_kthread_create(void); /* * Check if the given console is currently capable and allowed to print @ linux-6.8.2-rt10/kernel/printk/internal.h:189 @ static inline void nbcon_free(struct con static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; } static inline void nbcon_atomic_flush_pending(void) { } static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie) { return false; } + int cookie, bool use_atomic) { return false; } static inline bool console_is_usable(struct console *con, short flags, bool use_atomic) { return false; } @ linux-6.8.2-rt10/kernel/printk/internal.h:197 @ static inline bool console_is_usable(str #endif /* CONFIG_PRINTK */ extern bool have_boot_console; +extern bool have_legacy_console; + +/* + * Specifies if the console lock/unlock dance is needed for console + * printing. If @have_boot_console is true, the nbcon consoles will + * be printed serially along with the legacy consoles because nbcon + * consoles cannot print simultaneously with boot consoles. + */ +#define printing_via_unlock (have_legacy_console || have_boot_console) extern struct printk_buffers printk_shared_pbufs; Index: linux-6.8.2-rt10/kernel/printk/nbcon.c =================================================================== --- linux-6.8.2-rt10.orig/kernel/printk/nbcon.c +++ linux-6.8.2-rt10/kernel/printk/nbcon.c @ linux-6.8.2-rt10/kernel/printk/internal.h:1205 @ static __ref unsigned int *nbcon_get_cpu } /** - * nbcon_atomic_emit_one - Print one record for an nbcon console using the - * write_atomic() callback + * nbcon_emit_one - Print one record for an nbcon console using the + * specified callback * @wctxt: An initialized write context struct to use for this context + * @use_atomic: True if the write_atomic callback is to be used * * Return: False if it is known there are no more records to print, * otherwise true. @ linux-6.8.2-rt10/kernel/printk/internal.h:1216 @ static __ref unsigned int *nbcon_get_cpu * This is an internal helper to handle the locking of the console before * calling nbcon_emit_next_record(). */ -static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt) +static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic) { struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); @ linux-6.8.2-rt10/kernel/printk/internal.h:1228 @ static bool nbcon_atomic_emit_one(struct * handed over or taken over. In both cases the context is no * longer valid. */ - if (!nbcon_emit_next_record(wctxt, true)) + if (!nbcon_emit_next_record(wctxt, use_atomic)) return true; nbcon_context_release(ctxt); @ linux-6.8.2-rt10/kernel/printk/internal.h:1267 @ enum nbcon_prio nbcon_get_default_prio(v * both the console_lock and the SRCU read lock. Otherwise it * is set to false. * @cookie: The cookie from the SRCU read lock. + * @use_atomic: True if the write_atomic callback is to be used * * Context: Any context except NMI. * Return: False if the given console has no next record to print, @ linux-6.8.2-rt10/kernel/printk/internal.h:1278 @ enum nbcon_prio nbcon_get_default_prio(v * Essentially it is the nbcon version of console_emit_next_record(). */ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie) + int cookie, bool use_atomic) { struct nbcon_write_context wctxt = { }; struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); @ linux-6.8.2-rt10/kernel/printk/internal.h:1287 @ bool nbcon_legacy_emit_next_record(struc *handover = false; - /* Use the same procedure as console_emit_next_record(). */ - printk_safe_enter_irqsave(flags); - console_lock_spinning_enable(); - stop_critical_timings(); - - ctxt->console = con; - ctxt->prio = nbcon_get_default_prio(); - - progress = nbcon_atomic_emit_one(&wctxt); - - start_critical_timings(); - *handover = console_lock_spinning_disable_and_check(cookie); - printk_safe_exit_irqrestore(flags); + ctxt->console = con; + + if (use_atomic) { + /* Use the same procedure as console_emit_next_record(). */ + printk_safe_enter_irqsave(flags); + console_lock_spinning_enable(); + stop_critical_timings(); + + ctxt->prio = nbcon_get_default_prio(); + progress = nbcon_emit_one(&wctxt, use_atomic); + + start_critical_timings(); + *handover = console_lock_spinning_disable_and_check(cookie); + printk_safe_exit_irqrestore(flags); + } else { + con->device_lock(con, &flags); + cant_migrate(); + + ctxt->prio = nbcon_get_default_prio(); + progress = nbcon_emit_one(&wctxt, use_atomic); + + con->device_unlock(con, flags); + } return progress; } @ linux-6.8.2-rt10/kernel/printk/internal.h:1551 @ static int __init printk_setup_threads(v printk_threads_enabled = true; for_each_console(con) nbcon_kthread_create(con); + if (force_printkthreads() && printing_via_unlock) + nbcon_legacy_kthread_create(); console_list_unlock(); return 0; } Index: linux-6.8.2-rt10/kernel/printk/printk.c =================================================================== --- linux-6.8.2-rt10.orig/kernel/printk/printk.c +++ linux-6.8.2-rt10/kernel/printk/printk.c @ linux-6.8.2-rt10/kernel/printk/internal.h:471 @ static DEFINE_MUTEX(syslog_lock); * present, it is necessary to perform the console lock/unlock dance * whenever console flushing should occur. */ -static bool have_legacy_console; +bool have_legacy_console; /* * Specifies if an nbcon console is registered. If nbcon consoles are present, @ linux-6.8.2-rt10/kernel/printk/internal.h:488 @ static bool have_nbcon_console; */ bool have_boot_console; -/* - * Specifies if the console lock/unlock dance is needed for console - * printing. If @have_boot_console is true, the nbcon consoles will - * be printed serially along with the legacy consoles because nbcon - * consoles cannot print simultaneously with boot consoles. - */ -#define printing_via_unlock (have_legacy_console || have_boot_console) - #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); + +static DECLARE_WAIT_QUEUE_HEAD(legacy_wait); + /* All 3 protected by @syslog_lock. */ /* the next printk record to read by syslog(READ) or /proc/kmsg */ static u64 syslog_seq; @ linux-6.8.2-rt10/kernel/printk/internal.h:2356 @ asmlinkage int vprintk_emit(int facility const struct dev_printk_info *dev_info, const char *fmt, va_list args) { - bool do_trylock_unlock = printing_via_unlock; + bool do_trylock_unlock = printing_via_unlock && + !force_printkthreads(); int printed_len; /* Suppress unimportant messages after panic happens */ @ linux-6.8.2-rt10/kernel/printk/internal.h:2480 @ EXPORT_SYMBOL(_printk); static bool pr_flush(int timeout_ms, bool reset_on_progress); static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); +static struct task_struct *nbcon_legacy_kthread; + +static inline void wake_up_legacy_kthread(void) +{ + if (nbcon_legacy_kthread) + wake_up_interruptible(&legacy_wait); +} + #else /* CONFIG_PRINTK */ #define printk_time false @ linux-6.8.2-rt10/kernel/printk/internal.h:2501 @ static u64 syslog_seq; static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } +static inline void nbcon_legacy_kthread_create(void) { } +static inline void wake_up_legacy_kthread(void) { } #endif /* CONFIG_PRINTK */ #ifdef CONFIG_EARLY_PRINTK @ linux-6.8.2-rt10/kernel/printk/internal.h:2748 @ void resume_console(void) } console_srcu_read_unlock(cookie); + wake_up_legacy_kthread(); + pr_flush(1000, true); } @ linux-6.8.2-rt10/kernel/printk/internal.h:2764 @ void resume_console(void) */ static int console_cpu_notify(unsigned int cpu) { - if (!cpuhp_tasks_frozen && printing_via_unlock) { + if (!cpuhp_tasks_frozen && printing_via_unlock && + !force_printkthreads()) { /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); @ linux-6.8.2-rt10/kernel/printk/internal.h:3025 @ static bool console_emit_next_record(str con->dropped = 0; } - /* - * While actively printing out messages, if another printk() - * were to occur on another CPU, it may wait for this one to - * finish. This task can not be preempted if there is a - * waiter waiting to take over. - * - * Interrupts are disabled because the hand over to a waiter - * must not be interrupted until the hand over is completed - * (@console_waiter is cleared). - */ - printk_safe_enter_irqsave(flags); - console_lock_spinning_enable(); + /* Write everything out to the hardware. */ + + if (force_printkthreads()) { + /* + * With forced threading this function is either in a thread + * or panic context. So there is no need for concern about + * printk reentrance or handovers. + */ - /* Do not trace print latency. */ - stop_critical_timings(); + con->write(con, outbuf, pmsg.outbuf_len); + con->seq = pmsg.seq + 1; + } else { + /* + * While actively printing out messages, if another printk() + * were to occur on another CPU, it may wait for this one to + * finish. This task can not be preempted if there is a + * waiter waiting to take over. + * + * Interrupts are disabled because the hand over to a waiter + * must not be interrupted until the hand over is completed + * (@console_waiter is cleared). + */ + printk_safe_enter_irqsave(flags); + console_lock_spinning_enable(); - /* Write everything out to the hardware. */ - con->write(con, outbuf, pmsg.outbuf_len); + /* Do not trace print latency. */ + stop_critical_timings(); - start_critical_timings(); + con->write(con, outbuf, pmsg.outbuf_len); - con->seq = pmsg.seq + 1; + start_critical_timings(); - *handover = console_lock_spinning_disable_and_check(cookie); - printk_safe_exit_irqrestore(flags); + con->seq = pmsg.seq + 1; + + *handover = console_lock_spinning_disable_and_check(cookie); + printk_safe_exit_irqrestore(flags); + } skip: return true; } @ linux-6.8.2-rt10/kernel/printk/internal.h:3125 @ static bool console_flush_all(bool do_co if ((flags & CON_NBCON) && con->kthread) continue; - if (!console_is_usable(con, flags, true)) + if (!console_is_usable(con, flags, !do_cond_resched)) continue; any_usable = true; if (flags & CON_NBCON) { - progress = nbcon_legacy_emit_next_record(con, handover, cookie); + progress = nbcon_legacy_emit_next_record(con, handover, cookie, + !do_cond_resched); printk_seq = nbcon_seq_read(con); } else { progress = console_emit_next_record(con, handover, cookie); @ linux-6.8.2-rt10/kernel/printk/internal.h:3170 @ abandon: return false; } -/** - * console_unlock - unblock the console subsystem from printing - * - * Releases the console_lock which the caller holds to block printing of - * the console subsystem. - * - * While the console_lock was held, console output may have been buffered - * by printk(). If this is the case, console_unlock(); emits - * the output prior to releasing the lock. - * - * console_unlock(); may be called from any context. - */ -void console_unlock(void) +static void console_flush_and_unlock(void) { bool do_cond_resched; bool handover; @ linux-6.8.2-rt10/kernel/printk/internal.h:3214 @ void console_unlock(void) */ } while (prb_read_valid(prb, next_seq, NULL) && console_trylock()); } + +/** + * console_unlock - unblock the console subsystem from printing + * + * Releases the console_lock which the caller holds to block printing of + * the console subsystem. + * + * While the console_lock was held, console output may have been buffered + * by printk(). If this is the case, console_unlock(); emits + * the output prior to releasing the lock. + * + * console_unlock(); may be called from any context. + */ +void console_unlock(void) +{ + /* + * Forced threading relies on kthread and atomic consoles for + * printing. It never attempts to print from console_unlock(). + */ + if (force_printkthreads()) { + __console_unlock(); + return; + } + + console_flush_and_unlock(); +} EXPORT_SYMBOL(console_unlock); /** @ linux-6.8.2-rt10/kernel/printk/internal.h:3449 @ void console_start(struct console *conso if (flags & CON_NBCON) nbcon_kthread_wake(console); + else + wake_up_legacy_kthread(); __pr_flush(console, 1000, true); } EXPORT_SYMBOL(console_start); +#ifdef CONFIG_PRINTK +static bool printer_should_wake(void) +{ + bool available = false; + struct console *con; + int cookie; + + if (kthread_should_stop()) + return true; + + cookie = console_srcu_read_lock(); + for_each_console_srcu(con) { + short flags = console_srcu_read_flags(con); + u64 printk_seq; + + /* + * The legacy printer thread is only for legacy consoles, + * unless the nbcon console has no kthread printer. + */ + if ((flags & CON_NBCON) && con->kthread) + continue; + + if (!console_is_usable(con, flags, true)) + continue; + + if (flags & CON_NBCON) { + printk_seq = nbcon_seq_read(con); + } else { + /* + * It is safe to read @seq because only this + * thread context updates @seq. + */ + printk_seq = con->seq; + } + + if (prb_read_valid(prb, printk_seq, NULL)) { + available = true; + break; + } + } + console_srcu_read_unlock(cookie); + + return available; +} + +static int nbcon_legacy_kthread_func(void *unused) +{ + int error; + + for (;;) { + error = wait_event_interruptible(legacy_wait, printer_should_wake()); + + if (kthread_should_stop()) + break; + + if (error) + continue; + + console_lock(); + console_flush_and_unlock(); + } + + return 0; +} + +void nbcon_legacy_kthread_create(void) +{ + struct task_struct *kt; + + lockdep_assert_held(&console_mutex); + + if (!force_printkthreads()) + return; + + if (!printk_threads_enabled || nbcon_legacy_kthread) + return; + + kt = kthread_run(nbcon_legacy_kthread_func, NULL, "pr/legacy"); + if (IS_ERR(kt)) { + pr_err("unable to start legacy printing thread\n"); + return; + } + + nbcon_legacy_kthread = kt; + + /* + * It is important that console printing threads are scheduled + * shortly after a printk call and with generous runtime budgets. + */ + sched_set_normal(nbcon_legacy_kthread, -20); +} +#endif /* CONFIG_PRINTK */ + static int __read_mostly keep_bootcon; static int __init keep_bootcon_setup(char *str) @ linux-6.8.2-rt10/kernel/printk/internal.h:3825 @ void register_console(struct console *ne newcon->seq = 0; } else { have_legacy_console = true; + nbcon_legacy_kthread_create(); } if (newcon->flags & CON_BOOT) @ linux-6.8.2-rt10/kernel/printk/internal.h:3981 @ static int unregister_console_locked(str nbcon_kthread_create(c); } +#ifdef CONFIG_PRINTK + if (!printing_via_unlock && nbcon_legacy_kthread) { + kthread_stop(nbcon_legacy_kthread); + nbcon_legacy_kthread = NULL; + } +#endif + return res; } @ linux-6.8.2-rt10/kernel/printk/internal.h:4146 @ static bool __pr_flush(struct console *c seq = prb_next_reserve_seq(prb); - /* Flush the consoles so that records up to @seq are printed. */ - if (printing_via_unlock) { + /* + * Flush the consoles so that records up to @seq are printed. + * Otherwise this function will just wait for the threaded printers + * to print up to @seq. + */ + if (printing_via_unlock && !force_printkthreads()) { console_lock(); console_unlock(); } @ linux-6.8.2-rt10/kernel/printk/internal.h:4265 @ static void wake_up_klogd_work_func(stru int pending = this_cpu_xchg(printk_pending, 0); if (pending & PRINTK_PENDING_OUTPUT) { - /* If trylock fails, someone else is doing the printing */ - if (console_trylock()) - console_unlock(); + if (force_printkthreads()) { + wake_up_legacy_kthread(); + } else { + /* + * If trylock fails, some other context + * will do the printing. + */ + if (console_trylock()) + console_unlock(); + } } if (pending & PRINTK_PENDING_WAKEUP)