From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 22 Sep 2023 17:35:04 +0000
Subject: [PATCH 44/46] printk: Add kthread for all legacy consoles

The write callback of legacy consoles makes use of spinlocks.
This is not permitted with PREEMPT_RT in atomic contexts.

For PREEMPT_RT, create a new kthread to handle printing of all
the legacy consoles (and nbcon consoles if boot consoles are
registered).

Since, if running from the kthread, the consoles are printing
in a task context, the legacy nbcon printing can use the
device_lock(), write_thread(), device_unlock() callbacks for
printing.

Introduce the macro force_printkthreads() to query if the
forced threading of legacy consoles is in effect.

These changes only affect CONFIG_PREEMPT_RT.

Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/printk/internal.h |   11 +-
 kernel/printk/nbcon.c    |   52 ++++++----
 kernel/printk/printk.c   |  242 ++++++++++++++++++++++++++++++++++++++---------
 3 files changed, 243 insertions(+), 62 deletions(-)

@ kernel/printk/internal.h:24 @ int devkmsg_sysctl_set_loglvl(struct ctl
 		(con->flags & CON_BOOT) ? "boot" : "",		\
 		con->name, con->index, ##__VA_ARGS__)
 
+#ifdef CONFIG_PREEMPT_RT
+# define force_printkthreads()		(true)
+#else
+# define force_printkthreads()		(false)
+#endif
+
 #ifdef CONFIG_PRINTK
 
 #ifdef CONFIG_PRINTK_CALLER
@ kernel/printk/internal.h:99 @ void nbcon_free(struct console *con);
 enum nbcon_prio nbcon_get_default_prio(void);
 void nbcon_atomic_flush_pending(void);
 bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
-				   int cookie);
+				   int cookie, bool use_atomic);
 void nbcon_kthread_create(struct console *con);
 void nbcon_wake_threads(void);
+void nbcon_legacy_kthread_create(void);
 
 /*
  * Check if the given console is currently capable and allowed to print
@ kernel/printk/internal.h:189 @ static inline void nbcon_free(struct con
 static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
 static inline void nbcon_atomic_flush_pending(void) { }
 static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
-						 int cookie) { return false; }
+						 int cookie, bool use_atomic) { return false; }
 
 static inline bool console_is_usable(struct console *con, short flags,
 				     bool use_atomic) { return false; }
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@ kernel/printk/internal.h:1227 @ enum nbcon_prio nbcon_get_default_prio(v
 }
 
 /*
- * nbcon_atomic_emit_one - Print one record for an nbcon console using the
- *				write_atomic() callback
+ * nbcon_emit_one - Print one record for an nbcon console using the
+ *			specified callback
  * @wctxt:	An initialized write context struct to use for this context
+ * @use_atomic:	True if the write_atomic callback is to be used
  *
  * Return:	True, when a record has been printed and there are still
  *		pending records. The caller might want to continue flushing.
@ kernel/printk/internal.h:1243 @ enum nbcon_prio nbcon_get_default_prio(v
  * This is an internal helper to handle the locking of the console before
  * calling nbcon_emit_next_record().
  */
-static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
+static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
 {
 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
 
@ kernel/printk/internal.h:1258 @ static bool nbcon_atomic_emit_one(struct
 	 * The higher priority printing context takes over responsibility
 	 * to print the pending records.
 	 */
-	if (!nbcon_emit_next_record(wctxt, true))
+	if (!nbcon_emit_next_record(wctxt, use_atomic))
 		return false;
 
 	nbcon_context_release(ctxt);
@ kernel/printk/internal.h:1275 @ static bool nbcon_atomic_emit_one(struct
  *		both the console_lock and the SRCU read lock. Otherwise it
  *		is set to false.
  * @cookie:	The cookie from the SRCU read lock.
+ * @use_atomic:	True if the write_atomic callback is to be used
  *
  * Context:	Any context except NMI.
  * Return:	True, when a record has been printed and there are still
@ kernel/printk/internal.h:1291 @ static bool nbcon_atomic_emit_one(struct
  * Essentially it is the nbcon version of console_emit_next_record().
  */
 bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
-				   int cookie)
+				   int cookie, bool use_atomic)
 {
 	struct nbcon_write_context wctxt = { };
 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
 	unsigned long flags;
 	bool progress;
 
-	/* Use the same procedure as console_emit_next_record(). */
-	printk_safe_enter_irqsave(flags);
-	console_lock_spinning_enable();
-	stop_critical_timings();
-
-	ctxt->console	= con;
-	ctxt->prio	= nbcon_get_default_prio();
-
-	progress = nbcon_atomic_emit_one(&wctxt);
-
-	start_critical_timings();
-	*handover = console_lock_spinning_disable_and_check(cookie);
-	printk_safe_exit_irqrestore(flags);
+	ctxt->console = con;
+
+	if (use_atomic) {
+		/* Use the same procedure as console_emit_next_record(). */
+		printk_safe_enter_irqsave(flags);
+		console_lock_spinning_enable();
+		stop_critical_timings();
+
+		ctxt->prio = nbcon_get_default_prio();
+		progress = nbcon_emit_one(&wctxt, use_atomic);
+
+		start_critical_timings();
+		*handover = console_lock_spinning_disable_and_check(cookie);
+		printk_safe_exit_irqrestore(flags);
+	} else {
+		*handover = false;
+
+		con->device_lock(con, &flags);
+		cant_migrate();
+
+		ctxt->prio = nbcon_get_default_prio();
+		progress = nbcon_emit_one(&wctxt, use_atomic);
+
+		con->device_unlock(con, flags);
+	}
 
 	return progress;
 }
@ kernel/printk/internal.h:1639 @ static int __init printk_setup_threads(v
 	printk_threads_enabled = true;
 	for_each_console(con)
 		nbcon_kthread_create(con);
+	if (force_printkthreads() && printing_via_unlock)
+		nbcon_legacy_kthread_create();
 	console_list_unlock();
 	return 0;
 }
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@ kernel/printk/internal.h:490 @ bool have_boot_console;
 
 #ifdef CONFIG_PRINTK
 DECLARE_WAIT_QUEUE_HEAD(log_wait);
+
+static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
+
 /* All 3 protected by @syslog_lock. */
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 static u64 syslog_seq;
@ kernel/printk/internal.h:2351 @ asmlinkage int vprintk_emit(int facility
 			    const struct dev_printk_info *dev_info,
 			    const char *fmt, va_list args)
 {
-	bool do_trylock_unlock = printing_via_unlock;
+	bool do_trylock_unlock = printing_via_unlock &&
+				 !force_printkthreads();
 	int printed_len;
 
 	/* Suppress unimportant messages after panic happens */
@ kernel/printk/internal.h:2475 @ EXPORT_SYMBOL(_printk);
 static bool pr_flush(int timeout_ms, bool reset_on_progress);
 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
 
+static struct task_struct *nbcon_legacy_kthread;
+
+static inline void wake_up_legacy_kthread(void)
+{
+	if (nbcon_legacy_kthread)
+		wake_up_interruptible(&legacy_wait);
+}
+
 #else /* CONFIG_PRINTK */
 
 #define printk_time		false
@ kernel/printk/internal.h:2496 @ static u64 syslog_seq;
 static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
 
+static inline void nbcon_legacy_kthread_create(void) { }
+static inline void wake_up_legacy_kthread(void) { }
 #endif /* CONFIG_PRINTK */
 
 #ifdef CONFIG_EARLY_PRINTK
@ kernel/printk/internal.h:2743 @ void resume_console(void)
 	}
 	console_srcu_read_unlock(cookie);
 
+	wake_up_legacy_kthread();
+
 	pr_flush(1000, true);
 }
 
@ kernel/printk/internal.h:2759 @ void resume_console(void)
  */
 static int console_cpu_notify(unsigned int cpu)
 {
-	if (!cpuhp_tasks_frozen && printing_via_unlock) {
+	if (!cpuhp_tasks_frozen && printing_via_unlock &&
+	    !force_printkthreads()) {
 		/* If trylock fails, someone else is doing the printing */
 		if (console_trylock())
 			console_unlock();
@ kernel/printk/internal.h:3020 @ static bool console_emit_next_record(str
 		con->dropped = 0;
 	}
 
-	/*
-	 * While actively printing out messages, if another printk()
-	 * were to occur on another CPU, it may wait for this one to
-	 * finish. This task can not be preempted if there is a
-	 * waiter waiting to take over.
-	 *
-	 * Interrupts are disabled because the hand over to a waiter
-	 * must not be interrupted until the hand over is completed
-	 * (@console_waiter is cleared).
-	 */
-	printk_safe_enter_irqsave(flags);
-	console_lock_spinning_enable();
+	/* Write everything out to the hardware. */
 
-	/* Do not trace print latency. */
-	stop_critical_timings();
+	if (force_printkthreads()) {
+		/*
+		 * With forced threading this function is either in a thread
+		 * or panic context. So there is no need for concern about
+		 * printk reentrance or handovers.
+		 */
 
-	/* Write everything out to the hardware. */
-	con->write(con, outbuf, pmsg.outbuf_len);
+		con->write(con, outbuf, pmsg.outbuf_len);
+		con->seq = pmsg.seq + 1;
+	} else {
+		/*
+		 * While actively printing out messages, if another printk()
+		 * were to occur on another CPU, it may wait for this one to
+		 * finish. This task can not be preempted if there is a
+		 * waiter waiting to take over.
+		 *
+		 * Interrupts are disabled because the hand over to a waiter
+		 * must not be interrupted until the hand over is completed
+		 * (@console_waiter is cleared).
+		 */
+		printk_safe_enter_irqsave(flags);
+		console_lock_spinning_enable();
 
-	start_critical_timings();
+		/* Do not trace print latency. */
+		stop_critical_timings();
 
-	con->seq = pmsg.seq + 1;
+		con->write(con, outbuf, pmsg.outbuf_len);
 
-	*handover = console_lock_spinning_disable_and_check(cookie);
-	printk_safe_exit_irqrestore(flags);
+		start_critical_timings();
+
+		con->seq = pmsg.seq + 1;
+
+		*handover = console_lock_spinning_disable_and_check(cookie);
+		printk_safe_exit_irqrestore(flags);
+	}
 skip:
 	return true;
 }
@ kernel/printk/internal.h:3120 @ static bool console_flush_all(bool do_co
 			if ((flags & CON_NBCON) && con->kthread)
 				continue;
 
-			if (!console_is_usable(con, flags, true))
+			if (!console_is_usable(con, flags, !do_cond_resched))
 				continue;
 			any_usable = true;
 
 			if (flags & CON_NBCON) {
-				progress = nbcon_legacy_emit_next_record(con, handover, cookie);
+				progress = nbcon_legacy_emit_next_record(con, handover, cookie,
+									 !do_cond_resched);
 				printk_seq = nbcon_seq_read(con);
 			} else {
 				progress = console_emit_next_record(con, handover, cookie);
@ kernel/printk/internal.h:3165 @ static bool console_flush_all(bool do_co
 	return false;
 }
 
-/**
- * console_unlock - unblock the console subsystem from printing
- *
- * Releases the console_lock which the caller holds to block printing of
- * the console subsystem.
- *
- * While the console_lock was held, console output may have been buffered
- * by printk().  If this is the case, console_unlock(); emits
- * the output prior to releasing the lock.
- *
- * console_unlock(); may be called from any context.
- */
-void console_unlock(void)
+static void console_flush_and_unlock(void)
 {
 	bool do_cond_resched;
 	bool handover;
@ kernel/printk/internal.h:3209 @ void console_unlock(void)
 		 */
 	} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
 }
+
+/**
+ * console_unlock - unblock the console subsystem from printing
+ *
+ * Releases the console_lock which the caller holds to block printing of
+ * the console subsystem.
+ *
+ * While the console_lock was held, console output may have been buffered
+ * by printk().  If this is the case, console_unlock(); emits
+ * the output prior to releasing the lock.
+ *
+ * console_unlock(); may be called from any context.
+ */
+void console_unlock(void)
+{
+	/*
+	 * Forced threading relies on kthread and atomic consoles for
+	 * printing. It never attempts to print from console_unlock().
+	 */
+	if (force_printkthreads()) {
+		__console_unlock();
+		return;
+	}
+
+	console_flush_and_unlock();
+}
 EXPORT_SYMBOL(console_unlock);
 
 /**
@ kernel/printk/internal.h:3444 @ void console_start(struct console *conso
 
 	if (flags & CON_NBCON)
 		nbcon_kthread_wake(console);
+	else
+		wake_up_legacy_kthread();
 
 	__pr_flush(console, 1000, true);
 }
 EXPORT_SYMBOL(console_start);
 
+#ifdef CONFIG_PRINTK
+static bool printer_should_wake(void)
+{
+	bool available = false;
+	struct console *con;
+	int cookie;
+
+	if (kthread_should_stop())
+		return true;
+
+	cookie = console_srcu_read_lock();
+	for_each_console_srcu(con) {
+		short flags = console_srcu_read_flags(con);
+		u64 printk_seq;
+
+		/*
+		 * The legacy printer thread is only for legacy consoles,
+		 * unless the nbcon console has no kthread printer.
+		 */
+		if ((flags & CON_NBCON) && con->kthread)
+			continue;
+
+		if (!console_is_usable(con, flags, true))
+			continue;
+
+		if (flags & CON_NBCON) {
+			printk_seq = nbcon_seq_read(con);
+		} else {
+			/*
+			 * It is safe to read @seq because only this
+			 * thread context updates @seq.
+			 */
+			printk_seq = con->seq;
+		}
+
+		if (prb_read_valid(prb, printk_seq, NULL)) {
+			available = true;
+			break;
+		}
+	}
+	console_srcu_read_unlock(cookie);
+
+	return available;
+}
+
+static int nbcon_legacy_kthread_func(void *unused)
+{
+	int error;
+
+	for (;;) {
+		error = wait_event_interruptible(legacy_wait, printer_should_wake());
+
+		if (kthread_should_stop())
+			break;
+
+		if (error)
+			continue;
+
+		console_lock();
+		console_flush_and_unlock();
+	}
+
+	return 0;
+}
+
+void nbcon_legacy_kthread_create(void)
+{
+	struct task_struct *kt;
+
+	lockdep_assert_held(&console_mutex);
+
+	if (!force_printkthreads())
+		return;
+
+	if (!printk_threads_enabled || nbcon_legacy_kthread)
+		return;
+
+	kt = kthread_run(nbcon_legacy_kthread_func, NULL, "pr/legacy");
+	if (IS_ERR(kt)) {
+		pr_err("unable to start legacy printing thread\n");
+		return;
+	}
+
+	nbcon_legacy_kthread = kt;
+
+	/*
+	 * It is important that console printing threads are scheduled
+	 * shortly after a printk call and with generous runtime budgets.
+	 */
+	sched_set_normal(nbcon_legacy_kthread, -20);
+}
+#endif /* CONFIG_PRINTK */
+
 static int __read_mostly keep_bootcon;
 
 static int __init keep_bootcon_setup(char *str)
@ kernel/printk/internal.h:3832 @ void register_console(struct console *ne
 	} else {
 		have_legacy_console = true;
 		newcon->seq = init_seq;
+		nbcon_legacy_kthread_create();
 	}
 
 	if (newcon->flags & CON_BOOT)
@ kernel/printk/internal.h:3999 @ static int unregister_console_locked(str
 			nbcon_kthread_create(c);
 	}
 
+#ifdef CONFIG_PRINTK
+	if (!printing_via_unlock && nbcon_legacy_kthread) {
+		kthread_stop(nbcon_legacy_kthread);
+		nbcon_legacy_kthread = NULL;
+	}
+#endif
+
 	return res;
 }
 
@ kernel/printk/internal.h:4164 @ static bool __pr_flush(struct console *c
 
 	seq = prb_next_reserve_seq(prb);
 
-	/* Flush the consoles so that records up to @seq are printed. */
-	if (printing_via_unlock) {
+	/*
+	 * Flush the consoles so that records up to @seq are printed.
+	 * Otherwise this function will just wait for the threaded printers
+	 * to print up to @seq.
+	 */
+	if (printing_via_unlock && !force_printkthreads()) {
 		console_lock();
 		console_unlock();
 	}
@ kernel/printk/internal.h:4283 @ static void wake_up_klogd_work_func(stru
 	int pending = this_cpu_xchg(printk_pending, 0);
 
 	if (pending & PRINTK_PENDING_OUTPUT) {
-		/* If trylock fails, someone else is doing the printing */
-		if (console_trylock())
-			console_unlock();
+		if (force_printkthreads()) {
+			wake_up_legacy_kthread();
+		} else {
+			/*
+			 * If trylock fails, some other context
+			 * will do the printing.
+			 */
+			if (console_trylock())
+				console_unlock();
+		}
 	}
 
 	if (pending & PRINTK_PENDING_WAKEUP)