Index: linux-6.1.90-rt30/arch/arm/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/arch/arm/Kconfig +++ linux-6.1.90-rt30/arch/arm/Kconfig @@ -33,6 +33,7 @@ config ARM select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE + select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_MEMTEST @@ -71,7 +72,7 @@ config ARM select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL @@ -94,7 +95,7 @@ config ARM select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU select HAVE_EXIT_THREAD - select HAVE_FAST_GUP if ARM_LPAE + select HAVE_FAST_GUP if ARM_LPAE && !(PREEMPT_RT && HIGHPTE) select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER if !XIP_KERNEL @@ -115,6 +116,8 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM + select HAVE_PREEMPT_LAZY select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ Index: linux-6.1.90-rt30/arch/arm/include/asm/thread_info.h =================================================================== --- linux-6.1.90-rt30.orig/arch/arm/include/asm/thread_info.h +++ linux-6.1.90-rt30/arch/arm/include/asm/thread_info.h @@ -62,6 +62,7 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ __u32 cpu; /* cpu */ __u32 cpu_domain; /* cpu domain */ struct cpu_context_save cpu_context; /* cpu context */ @@ -129,6 +130,7 @@ extern int vfp_restore_user_hwstate(stru #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */ #define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ +#define TIF_NEED_RESCHED_LAZY 5 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -148,6 +150,7 @@ extern int vfp_restore_user_hwstate(stru #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ @@ -157,7 +160,8 @@ extern int vfp_restore_user_hwstate(stru /* * Change these and you break ASM code in entry-common.S */ -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ + _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ _TIF_NOTIFY_SIGNAL) Index: linux-6.1.90-rt30/arch/arm/kernel/asm-offsets.c =================================================================== --- linux-6.1.90-rt30.orig/arch/arm/kernel/asm-offsets.c +++ linux-6.1.90-rt30/arch/arm/kernel/asm-offsets.c @@ -43,6 +43,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); Index: linux-6.1.90-rt30/arch/arm/kernel/entry-armv.S =================================================================== --- linux-6.1.90-rt30.orig/arch/arm/kernel/entry-armv.S +++ linux-6.1.90-rt30/arch/arm/kernel/entry-armv.S @@ -222,11 +222,18 @@ __irq_svc: #ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 + bne 1f @ return from exeption + ldr r0, [tsk, #TI_FLAGS] @ get flags + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set + blne svc_preempt @ preempt! + + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r8, #0 @ if preempt lazy count != 0 movne r0, #0 @ force flags to 0 - tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED_LAZY blne svc_preempt +1: #endif svc_exit r5, irq = 1 @ return from exception @@ -241,8 +248,14 @@ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED + bne 1b + tst r0, #_TIF_NEED_RESCHED_LAZY reteq r8 @ go again - b 1b + ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r0, #0 @ if preempt lazy count != 0 + beq 1b + ret r8 @ go again + #endif __und_fault: Index: linux-6.1.90-rt30/arch/arm/kernel/signal.c =================================================================== --- linux-6.1.90-rt30.orig/arch/arm/kernel/signal.c +++ linux-6.1.90-rt30/arch/arm/kernel/signal.c @@ -607,7 +607,8 @@ do_work_pending(struct pt_regs *regs, un */ trace_hardirqs_off(); do { - if (likely(thread_flags & _TIF_NEED_RESCHED)) { + if (likely(thread_flags & (_TIF_NEED_RESCHED | + _TIF_NEED_RESCHED_LAZY))) { schedule(); } else { if (unlikely(!user_mode(regs))) Index: linux-6.1.90-rt30/arch/arm/mm/fault.c =================================================================== --- linux-6.1.90-rt30.orig/arch/arm/mm/fault.c +++ linux-6.1.90-rt30/arch/arm/mm/fault.c @@ -400,6 +400,9 @@ do_translation_fault(unsigned long addr, if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); + if (interrupts_enabled(regs)) + local_irq_enable(); + if (user_mode(regs)) goto bad_area; @@ -470,6 +473,9 @@ do_translation_fault(unsigned long addr, static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + if (interrupts_enabled(regs)) + local_irq_enable(); + do_bad_area(addr, fsr, regs); return 0; } Index: linux-6.1.90-rt30/arch/arm64/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/arch/arm64/Kconfig +++ linux-6.1.90-rt30/arch/arm64/Kconfig @@ -93,6 +93,7 @@ config ARM64 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_PAGE_TABLE_CHECK + select ARCH_SUPPORTS_RT select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT @@ -199,6 +200,7 @@ config ARM64 select HAVE_PERF_USER_STACK_DUMP select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_PREEMPT_LAZY select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_FUNCTION_ARG_ACCESS_API select MMU_GATHER_RCU_TABLE_FREE Index: linux-6.1.90-rt30/arch/arm64/include/asm/preempt.h =================================================================== --- linux-6.1.90-rt30.orig/arch/arm64/include/asm/preempt.h +++ linux-6.1.90-rt30/arch/arm64/include/asm/preempt.h @@ -71,13 +71,36 @@ static inline bool __preempt_count_dec_a * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE * pair. */ - return !pc || !READ_ONCE(ti->preempt_count); + if (!pc || !READ_ONCE(ti->preempt_count)) + return true; +#ifdef CONFIG_PREEMPT_LAZY + if ((pc & ~PREEMPT_NEED_RESCHED)) + return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else + return false; +#endif } static inline bool should_resched(int preempt_offset) { +#ifdef CONFIG_PREEMPT_LAZY + u64 pc = READ_ONCE(current_thread_info()->preempt_count); + if (pc == preempt_offset) + return true; + + if ((pc & ~PREEMPT_NEED_RESCHED) != preempt_offset) + return false; + + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else u64 pc = READ_ONCE(current_thread_info()->preempt_count); return pc == preempt_offset; +#endif } #ifdef CONFIG_PREEMPTION Index: linux-6.1.90-rt30/arch/arm64/include/asm/thread_info.h =================================================================== --- linux-6.1.90-rt30.orig/arch/arm64/include/asm/thread_info.h +++ linux-6.1.90-rt30/arch/arm64/include/asm/thread_info.h @@ -26,6 +26,7 @@ struct thread_info { #ifdef CONFIG_ARM64_SW_TTBR0_PAN u64 ttbr0; /* saved TTBR0_EL1 */ #endif + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ union { u64 preempt_count; /* 0 => preemptible, <0 => bug */ struct { @@ -68,6 +69,7 @@ int arch_dup_task_struct(struct task_str #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ +#define TIF_NEED_RESCHED_LAZY 7 #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -100,8 +102,10 @@ int arch_dup_task_struct(struct task_str #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ + _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ _TIF_NOTIFY_SIGNAL) @@ -110,6 +114,8 @@ int arch_dup_task_struct(struct task_str _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_SYSCALL_EMU) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + #ifdef CONFIG_SHADOW_CALL_STACK #define INIT_SCS \ .scs_base = init_shadow_call_stack, \ Index: linux-6.1.90-rt30/arch/arm64/kernel/asm-offsets.c =================================================================== --- linux-6.1.90-rt30.orig/arch/arm64/kernel/asm-offsets.c +++ linux-6.1.90-rt30/arch/arm64/kernel/asm-offsets.c @@ -32,6 +32,7 @@ int main(void) DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu)); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); + DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); #endif Index: linux-6.1.90-rt30/arch/arm64/kernel/signal.c =================================================================== --- linux-6.1.90-rt30.orig/arch/arm64/kernel/signal.c +++ linux-6.1.90-rt30/arch/arm64/kernel/signal.c @@ -1108,7 +1108,7 @@ static void do_signal(struct pt_regs *re void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { - if (thread_flags & _TIF_NEED_RESCHED) { + if (thread_flags & _TIF_NEED_RESCHED_MASK) { /* Unmask Debug and SError for the next task */ local_daif_restore(DAIF_PROCCTX_NOIRQ); Index: linux-6.1.90-rt30/arch/powerpc/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/arch/powerpc/Kconfig +++ linux-6.1.90-rt30/arch/powerpc/Kconfig @@ -151,6 +151,7 @@ config PPC select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x + select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_MEMTEST @@ -242,8 +243,10 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE + select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM select HAVE_RSEQ select HAVE_SETUP_PER_CPU_AREA if PPC64 select HAVE_SOFTIRQ_ON_OWN_STACK Index: linux-6.1.90-rt30/arch/powerpc/include/asm/stackprotector.h =================================================================== --- linux-6.1.90-rt30.orig/arch/powerpc/include/asm/stackprotector.h +++ linux-6.1.90-rt30/arch/powerpc/include/asm/stackprotector.h @@ -24,7 +24,11 @@ static __always_inline void boot_init_st unsigned long canary; /* Try to get a semi random initial value. */ +#ifdef CONFIG_PREEMPT_RT + canary = (unsigned long)&canary; +#else canary = get_random_canary(); +#endif canary ^= mftb(); canary ^= LINUX_VERSION_CODE; canary &= CANARY_MASK; Index: linux-6.1.90-rt30/arch/powerpc/include/asm/thread_info.h =================================================================== --- linux-6.1.90-rt30.orig/arch/powerpc/include/asm/thread_info.h +++ linux-6.1.90-rt30/arch/powerpc/include/asm/thread_info.h @@ -53,6 +53,8 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_lazy_count; /* 0 => preemptable, + <0 => BUG */ #ifdef CONFIG_SMP unsigned int cpu; #endif @@ -77,6 +79,7 @@ struct thread_info { #define INIT_THREAD_INFO(tsk) \ { \ .preempt_count = INIT_PREEMPT_COUNT, \ + .preempt_lazy_count = 0, \ .flags = 0, \ } @@ -102,6 +105,7 @@ void arch_setup_new_exec(void); #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -117,6 +121,7 @@ void arch_setup_new_exec(void); #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 20 /* 32 bit binary */ + /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<msr & MSR_EE)); again: - if (IS_ENABLED(CONFIG_PREEMPT)) { + if (IS_ENABLED(CONFIG_PREEMPTION)) { /* Return to preemptible kernel context */ if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { if (preempt_count() == 0) preempt_schedule_irq(); + } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) { + if ((preempt_count() == 0) && + (current_thread_info()->preempt_lazy_count == 0)) + preempt_schedule_irq(); } } Index: linux-6.1.90-rt30/arch/powerpc/kernel/traps.c =================================================================== --- linux-6.1.90-rt30.orig/arch/powerpc/kernel/traps.c +++ linux-6.1.90-rt30/arch/powerpc/kernel/traps.c @@ -261,12 +261,17 @@ static char *get_mmu_str(void) static int __die(const char *str, struct pt_regs *regs, long err) { + const char *pr = ""; + printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); + if (IS_ENABLED(CONFIG_PREEMPTION)) + pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; + printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", PAGE_SIZE / 1024, get_mmu_str(), - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", + pr, IS_ENABLED(CONFIG_SMP) ? " SMP" : "", IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", Index: linux-6.1.90-rt30/arch/powerpc/kvm/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/arch/powerpc/kvm/Kconfig +++ linux-6.1.90-rt30/arch/powerpc/kvm/Kconfig @@ -225,6 +225,7 @@ config KVM_E500MC config KVM_MPIC bool "KVM in-kernel MPIC emulation" depends on KVM && PPC_E500 + depends on !PREEMPT_RT select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING Index: linux-6.1.90-rt30/arch/powerpc/platforms/pseries/iommu.c =================================================================== --- linux-6.1.90-rt30.orig/arch/powerpc/platforms/pseries/iommu.c +++ linux-6.1.90-rt30/arch/powerpc/platforms/pseries/iommu.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -200,7 +201,13 @@ static int tce_build_pSeriesLP(unsigned return ret; } -static DEFINE_PER_CPU(__be64 *, tce_page); +struct tce_page { + __be64 * page; + local_lock_t lock; +}; +static DEFINE_PER_CPU(struct tce_page, tce_page) = { + .lock = INIT_LOCAL_LOCK(lock), +}; static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, @@ -223,9 +230,10 @@ static int tce_buildmulti_pSeriesLP(stru direction, attrs); } - local_irq_save(flags); /* to protect tcep and the page behind it */ + /* to protect tcep and the page behind it */ + local_lock_irqsave(&tce_page.lock, flags); - tcep = __this_cpu_read(tce_page); + tcep = __this_cpu_read(tce_page.page); /* This is safe to do since interrupts are off when we're called * from iommu_alloc{,_sg}() @@ -234,12 +242,12 @@ static int tce_buildmulti_pSeriesLP(stru tcep = (__be64 *)__get_free_page(GFP_ATOMIC); /* If allocation fails, fall back to the loop implementation */ if (!tcep) { - local_irq_restore(flags); + local_unlock_irqrestore(&tce_page.lock, flags); return tce_build_pSeriesLP(tbl->it_index, tcenum, tceshift, npages, uaddr, direction, attrs); } - __this_cpu_write(tce_page, tcep); + __this_cpu_write(tce_page.page, tcep); } rpn = __pa(uaddr) >> tceshift; @@ -269,7 +277,7 @@ static int tce_buildmulti_pSeriesLP(stru tcenum += limit; } while (npages > 0 && !rc); - local_irq_restore(flags); + local_unlock_irqrestore(&tce_page.lock, flags); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; @@ -454,16 +462,17 @@ static int tce_setrange_multi_pSeriesLP( DMA_BIDIRECTIONAL, 0); } - local_irq_disable(); /* to protect tcep and the page behind it */ - tcep = __this_cpu_read(tce_page); + /* to protect tcep and the page behind it */ + local_lock_irq(&tce_page.lock); + tcep = __this_cpu_read(tce_page.page); if (!tcep) { tcep = (__be64 *)__get_free_page(GFP_ATOMIC); if (!tcep) { - local_irq_enable(); + local_unlock_irq(&tce_page.lock); return -ENOMEM; } - __this_cpu_write(tce_page, tcep); + __this_cpu_write(tce_page.page, tcep); } proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; @@ -506,7 +515,7 @@ static int tce_setrange_multi_pSeriesLP( /* error cleanup: caller will clear whole range */ - local_irq_enable(); + local_unlock_irq(&tce_page.lock); return rc; } Index: linux-6.1.90-rt30/arch/x86/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/arch/x86/Kconfig +++ linux-6.1.90-rt30/arch/x86/Kconfig @@ -114,6 +114,7 @@ config X86 select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_LTO_CLANG_THIN + select ARCH_SUPPORTS_RT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS @@ -251,6 +252,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT select MMU_GATHER_MERGE_VMAS select HAVE_POSIX_CPU_TIMERS_TASK_WORK Index: linux-6.1.90-rt30/arch/x86/include/asm/preempt.h =================================================================== --- linux-6.1.90-rt30.orig/arch/x86/include/asm/preempt.h +++ linux-6.1.90-rt30/arch/x86/include/asm/preempt.h @@ -90,17 +90,48 @@ static __always_inline void __preempt_co * a decrement which hits zero means we have no preempt_count and should * reschedule. */ -static __always_inline bool __preempt_count_dec_and_test(void) +static __always_inline bool ____preempt_count_dec_and_test(void) { return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); } +static __always_inline bool __preempt_count_dec_and_test(void) +{ + if (____preempt_count_dec_and_test()) + return true; +#ifdef CONFIG_PREEMPT_LAZY + if (preempt_count()) + return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else + return false; +#endif +} + /* * Returns true when we need to resched and can (barring IRQ state). */ static __always_inline bool should_resched(int preempt_offset) { +#ifdef CONFIG_PREEMPT_LAZY + u32 tmp; + tmp = raw_cpu_read_4(__preempt_count); + if (tmp == preempt_offset) + return true; + + /* preempt count == 0 ? */ + tmp &= ~PREEMPT_NEED_RESCHED; + if (tmp != preempt_offset) + return false; + /* XXX PREEMPT_LOCK_OFFSET */ + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); +#endif } #ifdef CONFIG_PREEMPTION Index: linux-6.1.90-rt30/arch/x86/include/asm/thread_info.h =================================================================== --- linux-6.1.90-rt30.orig/arch/x86/include/asm/thread_info.h +++ linux-6.1.90-rt30/arch/x86/include/asm/thread_info.h @@ -57,6 +57,8 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long syscall_work; /* SYSCALL_WORK_ flags */ u32 status; /* thread synchronous flags */ + int preempt_lazy_count; /* 0 => lazy preemptable + <0 => BUG */ #ifdef CONFIG_SMP u32 cpu; /* current CPU */ #endif @@ -65,6 +67,7 @@ struct thread_info { #define INIT_THREAD_INFO(tsk) \ { \ .flags = 0, \ + .preempt_lazy_count = 0, \ } #else /* !__ASSEMBLY__ */ @@ -92,6 +95,7 @@ struct thread_info { #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */ +#define TIF_NEED_RESCHED_LAZY 19 /* lazy rescheduling necessary */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ @@ -115,6 +119,7 @@ struct thread_info { #define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) #define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) Index: linux-6.1.90-rt30/drivers/block/zram/zram_drv.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/block/zram/zram_drv.c +++ linux-6.1.90-rt30/drivers/block/zram/zram_drv.c @@ -57,6 +57,40 @@ static void zram_free_page(struct zram * static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, u32 index, int offset, struct bio *bio); +#ifdef CONFIG_PREEMPT_RT +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) +{ + size_t index; + + for (index = 0; index < num_pages; index++) + spin_lock_init(&zram->table[index].lock); +} + +static int zram_slot_trylock(struct zram *zram, u32 index) +{ + int ret; + + ret = spin_trylock(&zram->table[index].lock); + if (ret) + __set_bit(ZRAM_LOCK, &zram->table[index].flags); + return ret; +} + +static void zram_slot_lock(struct zram *zram, u32 index) +{ + spin_lock(&zram->table[index].lock); + __set_bit(ZRAM_LOCK, &zram->table[index].flags); +} + +static void zram_slot_unlock(struct zram *zram, u32 index) +{ + __clear_bit(ZRAM_LOCK, &zram->table[index].flags); + spin_unlock(&zram->table[index].lock); +} + +#else + +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } static int zram_slot_trylock(struct zram *zram, u32 index) { @@ -72,6 +106,7 @@ static void zram_slot_unlock(struct zram { bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); } +#endif static inline bool init_done(struct zram *zram) { @@ -1187,6 +1222,7 @@ static bool zram_meta_alloc(struct zram if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); + zram_meta_init_table_locks(zram, num_pages); return true; } Index: linux-6.1.90-rt30/drivers/block/zram/zram_drv.h =================================================================== --- linux-6.1.90-rt30.orig/drivers/block/zram/zram_drv.h +++ linux-6.1.90-rt30/drivers/block/zram/zram_drv.h @@ -62,6 +62,9 @@ struct zram_table_entry { unsigned long element; }; unsigned long flags; +#ifdef CONFIG_PREEMPT_RT + spinlock_t lock; +#endif #ifdef CONFIG_ZRAM_MEMORY_TRACKING ktime_t ac_time; #endif Index: linux-6.1.90-rt30/drivers/char/tpm/tpm_tis.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/char/tpm/tpm_tis.c +++ linux-6.1.90-rt30/drivers/char/tpm/tpm_tis.c @@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to return container_of(data, struct tpm_tis_tcg_phy, priv); } +#ifdef CONFIG_PREEMPT_RT +/* + * Flushes previous write operations to chip so that a subsequent + * ioread*()s won't stall a cpu. + */ +static inline void tpm_tis_flush(void __iomem *iobase) +{ + ioread8(iobase + TPM_ACCESS(0)); +} +#else +#define tpm_tis_flush(iobase) do { } while (0) +#endif + +static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr) +{ + iowrite8(b, iobase + addr); + tpm_tis_flush(iobase); +} + +static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr) +{ + iowrite32(b, iobase + addr); + tpm_tis_flush(iobase); +} + static int interrupts = -1; module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); @@ -202,12 +227,12 @@ static int tpm_tcg_write_bytes(struct tp switch (io_mode) { case TPM_TIS_PHYS_8: while (len--) - iowrite8(*value++, phy->iobase + addr); + tpm_tis_iowrite8(*value++, phy->iobase, addr); break; case TPM_TIS_PHYS_16: return -EINVAL; case TPM_TIS_PHYS_32: - iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr); + tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr); break; } Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/Kconfig +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/Kconfig @@ -3,7 +3,6 @@ config DRM_I915 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" depends on DRM depends on X86 && PCI - depends on !PREEMPT_RT select INTEL_GTT if X86 select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/display/intel_crtc.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/display/intel_crtc.c +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/display/intel_crtc.c @@ -521,7 +521,8 @@ void intel_pipe_update_start(struct inte */ intel_psr_wait_for_idle_locked(new_crtc_state); - local_irq_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_disable(); crtc->debug.min_vbl = min; crtc->debug.max_vbl = max; @@ -546,11 +547,13 @@ void intel_pipe_update_start(struct inte break; } - local_irq_enable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_enable(); timeout = schedule_timeout(timeout); - local_irq_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_disable(); } finish_wait(wq, &wait); @@ -583,7 +586,8 @@ void intel_pipe_update_start(struct inte return; irq_disable: - local_irq_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_disable(); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) @@ -684,7 +688,8 @@ void intel_pipe_update_end(struct intel_ */ intel_vrr_send_push(new_crtc_state); - local_irq_enable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_enable(); if (intel_vgpu_active(dev_priv)) return; Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -315,7 +315,12 @@ void __intel_breadcrumbs_park(struct int return; /* Kick the work once more to drain the signalers, and disarm the irq */ - irq_work_queue(&b->irq_work); + irq_work_sync(&b->irq_work); + while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) { + irq_work_queue(&b->irq_work); + cond_resched(); + irq_work_sync(&b->irq_work); + } } void intel_breadcrumbs_free(struct kref *kref) Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/gt/intel_execlists_submission.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -1302,7 +1302,7 @@ static void execlists_dequeue(struct int * and context switches) submission. */ - spin_lock(&sched_engine->lock); + spin_lock_irq(&sched_engine->lock); /* * If the queue is higher priority than the last @@ -1402,7 +1402,7 @@ static void execlists_dequeue(struct int * Even if ELSP[1] is occupied and not worthy * of timeslices, our queue might be. */ - spin_unlock(&sched_engine->lock); + spin_unlock_irq(&sched_engine->lock); return; } } @@ -1428,7 +1428,7 @@ static void execlists_dequeue(struct int if (last && !can_merge_rq(last, rq)) { spin_unlock(&ve->base.sched_engine->lock); - spin_unlock(&engine->sched_engine->lock); + spin_unlock_irq(&engine->sched_engine->lock); return; /* leave this for another sibling */ } @@ -1590,7 +1590,7 @@ done: */ sched_engine->queue_priority_hint = queue_prio(sched_engine); i915_sched_engine_reset_on_empty(sched_engine); - spin_unlock(&sched_engine->lock); + spin_unlock_irq(&sched_engine->lock); /* * We can skip poking the HW if we ended up with exactly the same set @@ -1616,13 +1616,6 @@ done: } } -static void execlists_dequeue_irq(struct intel_engine_cs *engine) -{ - local_irq_disable(); /* Suspend interrupts across request submission */ - execlists_dequeue(engine); - local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */ -} - static void clear_ports(struct i915_request **ports, int count) { memset_p((void **)ports, NULL, count); @@ -2476,7 +2469,7 @@ static void execlists_submission_tasklet } if (!engine->execlists.pending[0]) { - execlists_dequeue_irq(engine); + execlists_dequeue(engine); start_timeslice(engine); } Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/gt/intel_reset.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/gt/intel_reset.c +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/gt/intel_reset.c @@ -174,13 +174,13 @@ static int i915_do_reset(struct intel_gt /* Assert reset for at least 20 usec, and wait for acknowledgement. */ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); udelay(50); - err = wait_for_atomic(i915_in_reset(pdev), 50); + err = _wait_for_atomic(i915_in_reset(pdev), 50, 0); /* Clear the reset request. */ pci_write_config_byte(pdev, I915_GDRST, 0); udelay(50); if (!err) - err = wait_for_atomic(!i915_in_reset(pdev), 50); + err = _wait_for_atomic(!i915_in_reset(pdev), 50, 0); return err; } @@ -200,7 +200,7 @@ static int g33_do_reset(struct intel_gt struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); - return wait_for_atomic(g4x_reset_complete(pdev), 50); + return _wait_for_atomic(g4x_reset_complete(pdev), 50, 0); } static int g4x_do_reset(struct intel_gt *gt, @@ -217,7 +217,7 @@ static int g4x_do_reset(struct intel_gt pci_write_config_byte(pdev, I915_GDRST, GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for_atomic(g4x_reset_complete(pdev), 50); + ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0); if (ret) { GT_TRACE(gt, "Wait for media reset failed\n"); goto out; @@ -225,7 +225,7 @@ static int g4x_do_reset(struct intel_gt pci_write_config_byte(pdev, I915_GDRST, GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for_atomic(g4x_reset_complete(pdev), 50); + ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0); if (ret) { GT_TRACE(gt, "Wait for render reset failed\n"); goto out; @@ -718,9 +718,7 @@ int __intel_gt_reset(struct intel_gt *gt intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { GT_TRACE(gt, "engine_mask=%x\n", engine_mask); - preempt_disable(); ret = reset(gt, engine_mask, retry); - preempt_enable(); } intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_irq.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/i915_irq.c +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_irq.c @@ -917,7 +917,8 @@ static bool i915_get_crtc_scanoutpos(str */ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); /* Get optional system timestamp before query. */ if (stime) @@ -981,7 +982,8 @@ static bool i915_get_crtc_scanoutpos(str if (etime) *etime = ktime_get(); - /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_request.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/i915_request.c +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_request.c @@ -609,7 +609,6 @@ bool __i915_request_submit(struct i915_r RQ_TRACE(request, "\n"); - GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->sched_engine->lock); /* @@ -718,7 +717,6 @@ void __i915_request_unsubmit(struct i915 */ RQ_TRACE(request, "\n"); - GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->sched_engine->lock); /* Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_trace.h =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/i915_trace.h +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_trace.h @@ -6,6 +6,10 @@ #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _I915_TRACE_H_ +#ifdef CONFIG_PREEMPT_RT +#define NOTRACE +#endif + #include #include #include @@ -323,7 +327,7 @@ DEFINE_EVENT(i915_request, i915_request_ TP_ARGS(rq) ); -#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) +#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE) DEFINE_EVENT(i915_request, i915_request_guc_submit, TP_PROTO(struct i915_request *rq), TP_ARGS(rq) Index: linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_utils.h =================================================================== --- linux-6.1.90-rt30.orig/drivers/gpu/drm/i915/i915_utils.h +++ linux-6.1.90-rt30/drivers/gpu/drm/i915/i915_utils.h @@ -294,7 +294,7 @@ wait_remaining_ms_from_jiffies(unsigned #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ -#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) +#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT) # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) #else # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) Index: linux-6.1.90-rt30/drivers/net/ethernet/alacritech/slic.h =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/alacritech/slic.h +++ linux-6.1.90-rt30/drivers/net/ethernet/alacritech/slic.h @@ -288,13 +288,13 @@ do { \ u64_stats_update_end(&(st)->syncp); \ } while (0) -#define SLIC_GET_STATS_COUNTER(newst, st, counter) \ -{ \ - unsigned int start; \ +#define SLIC_GET_STATS_COUNTER(newst, st, counter) \ +{ \ + unsigned int start; \ do { \ - start = u64_stats_fetch_begin_irq(&(st)->syncp); \ - newst = (st)->counter; \ - } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \ + start = u64_stats_fetch_begin(&(st)->syncp); \ + newst = (st)->counter; \ + } while (u64_stats_fetch_retry(&(st)->syncp, start)); \ } struct slic_upr { Index: linux-6.1.90-rt30/drivers/net/ethernet/amazon/ena/ena_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -118,9 +118,9 @@ static void ena_safe_update_stat(u64 *sr unsigned int start; do { - start = u64_stats_fetch_begin_irq(syncp); + start = u64_stats_fetch_begin(syncp); *(dst) = *src; - } while (u64_stats_fetch_retry_irq(syncp, start)); + } while (u64_stats_fetch_retry(syncp, start)); } static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) Index: linux-6.1.90-rt30/drivers/net/ethernet/amazon/ena/ena_netdev.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ linux-6.1.90-rt30/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3345,10 +3345,10 @@ static void ena_get_stats64(struct net_d tx_ring = &adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&tx_ring->syncp); + start = u64_stats_fetch_begin(&tx_ring->syncp); packets = tx_ring->tx_stats.cnt; bytes = tx_ring->tx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; @@ -3356,20 +3356,20 @@ static void ena_get_stats64(struct net_d rx_ring = &adapter->rx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + start = u64_stats_fetch_begin(&rx_ring->syncp); packets = rx_ring->rx_stats.cnt; bytes = rx_ring->rx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } do { - start = u64_stats_fetch_begin_irq(&adapter->syncp); + start = u64_stats_fetch_begin(&adapter->syncp); rx_drops = adapter->dev_stats.rx_drops; tx_drops = adapter->dev_stats.tx_drops; - } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); + } while (u64_stats_fetch_retry(&adapter->syncp, start)); stats->rx_dropped = rx_drops; stats->tx_dropped = tx_drops; Index: linux-6.1.90-rt30/drivers/net/ethernet/aquantia/atlantic/aq_ring.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ linux-6.1.90-rt30/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -941,7 +941,7 @@ unsigned int aq_ring_fill_stats_data(str /* This data should mimic aq_ethtool_queue_rx_stat_names structure */ do { count = 0; - start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp); + start = u64_stats_fetch_begin(&self->stats.rx.syncp); data[count] = self->stats.rx.packets; data[++count] = self->stats.rx.jumbo_packets; data[++count] = self->stats.rx.lro_packets; @@ -958,15 +958,15 @@ unsigned int aq_ring_fill_stats_data(str data[++count] = self->stats.rx.xdp_tx; data[++count] = self->stats.rx.xdp_invalid; data[++count] = self->stats.rx.xdp_redirect; - } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); + } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start)); } else { /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ do { count = 0; - start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp); + start = u64_stats_fetch_begin(&self->stats.tx.syncp); data[count] = self->stats.tx.packets; data[++count] = self->stats.tx.queue_restarts; - } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start)); + } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start)); } return ++count; Index: linux-6.1.90-rt30/drivers/net/ethernet/asix/ax88796c_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/asix/ax88796c_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/asix/ax88796c_main.c @@ -662,12 +662,12 @@ static void ax88796c_get_stats64(struct s = per_cpu_ptr(ax_local->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&s->syncp); + start = u64_stats_fetch_begin(&s->syncp); rx_packets = u64_stats_read(&s->rx_packets); rx_bytes = u64_stats_read(&s->rx_bytes); tx_packets = u64_stats_read(&s->tx_packets); tx_bytes = u64_stats_read(&s->tx_bytes); - } while (u64_stats_fetch_retry_irq(&s->syncp, start)); + } while (u64_stats_fetch_retry(&s->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/broadcom/b44.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/broadcom/b44.c +++ linux-6.1.90-rt30/drivers/net/ethernet/broadcom/b44.c @@ -1680,7 +1680,7 @@ static void b44_get_stats64(struct net_d unsigned int start; do { - start = u64_stats_fetch_begin_irq(&hwstat->syncp); + start = u64_stats_fetch_begin(&hwstat->syncp); /* Convert HW stats into rtnl_link_stats64 stats. */ nstat->rx_packets = hwstat->rx_pkts; @@ -1714,7 +1714,7 @@ static void b44_get_stats64(struct net_d /* Carrier lost counter seems to be broken for some devices */ nstat->tx_carrier_errors = hwstat->tx_carrier_lost; #endif - } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry(&hwstat->syncp, start)); } @@ -2084,12 +2084,12 @@ static void b44_get_ethtool_stats(struct do { data_src = &hwstat->tx_good_octets; data_dst = data; - start = u64_stats_fetch_begin_irq(&hwstat->syncp); + start = u64_stats_fetch_begin(&hwstat->syncp); for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) *data_dst++ = *data_src++; - } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry(&hwstat->syncp, start)); } static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) Index: linux-6.1.90-rt30/drivers/net/ethernet/broadcom/bcmsysport.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/broadcom/bcmsysport.c +++ linux-6.1.90-rt30/drivers/net/ethernet/broadcom/bcmsysport.c @@ -457,10 +457,10 @@ static void bcm_sysport_update_tx_stats( for (q = 0; q < priv->netdev->num_tx_queues; q++) { ring = &priv->tx_rings[q]; do { - start = u64_stats_fetch_begin_irq(&priv->syncp); + start = u64_stats_fetch_begin(&priv->syncp); bytes = ring->bytes; packets = ring->packets; - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + } while (u64_stats_fetch_retry(&priv->syncp, start)); *tx_bytes += bytes; *tx_packets += packets; @@ -504,9 +504,9 @@ static void bcm_sysport_get_stats(struct if (s->stat_sizeof == sizeof(u64) && s->type == BCM_SYSPORT_STAT_NETDEV64) { do { - start = u64_stats_fetch_begin_irq(syncp); + start = u64_stats_fetch_begin(syncp); data[i] = *(u64 *)p; - } while (u64_stats_fetch_retry_irq(syncp, start)); + } while (u64_stats_fetch_retry(syncp, start)); } else data[i] = *(u32 *)p; j++; @@ -1878,10 +1878,10 @@ static void bcm_sysport_get_stats64(stru &stats->tx_packets); do { - start = u64_stats_fetch_begin_irq(&priv->syncp); + start = u64_stats_fetch_begin(&priv->syncp); stats->rx_packets = stats64->rx_packets; stats->rx_bytes = stats64->rx_bytes; - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + } while (u64_stats_fetch_retry(&priv->syncp, start)); } static void bcm_sysport_netif_start(struct net_device *dev) Index: linux-6.1.90-rt30/drivers/net/ethernet/cortina/gemini.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/cortina/gemini.c +++ linux-6.1.90-rt30/drivers/net/ethernet/cortina/gemini.c @@ -1949,7 +1949,7 @@ static void gmac_get_stats64(struct net_ /* Racing with RX NAPI */ do { - start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp); + start = u64_stats_fetch_begin(&port->rx_stats_syncp); stats->rx_packets = port->stats.rx_packets; stats->rx_bytes = port->stats.rx_bytes; @@ -1961,11 +1961,11 @@ static void gmac_get_stats64(struct net_ stats->rx_crc_errors = port->stats.rx_crc_errors; stats->rx_frame_errors = port->stats.rx_frame_errors; - } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start)); + } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); /* Racing with MIB and TX completion interrupts */ do { - start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp); + start = u64_stats_fetch_begin(&port->ir_stats_syncp); stats->tx_errors = port->stats.tx_errors; stats->tx_packets = port->stats.tx_packets; @@ -1975,15 +1975,15 @@ static void gmac_get_stats64(struct net_ stats->rx_missed_errors = port->stats.rx_missed_errors; stats->rx_fifo_errors = port->stats.rx_fifo_errors; - } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start)); + } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); /* Racing with hard_start_xmit */ do { - start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp); + start = u64_stats_fetch_begin(&port->tx_stats_syncp); stats->tx_dropped = port->stats.tx_dropped; - } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start)); + } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); stats->rx_dropped += stats->rx_missed_errors; } @@ -2052,18 +2052,18 @@ static void gmac_get_ethtool_stats(struc /* Racing with MIB interrupt */ do { p = values; - start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp); + start = u64_stats_fetch_begin(&port->ir_stats_syncp); for (i = 0; i < RX_STATS_NUM; i++) *p++ = port->hw_stats[i]; - } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start)); + } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); values = p; /* Racing with RX NAPI */ do { p = values; - start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp); + start = u64_stats_fetch_begin(&port->rx_stats_syncp); for (i = 0; i < RX_STATUS_NUM; i++) *p++ = port->rx_stats[i]; @@ -2071,13 +2071,13 @@ static void gmac_get_ethtool_stats(struc *p++ = port->rx_csum_stats[i]; *p++ = port->rx_napi_exits; - } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start)); + } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); values = p; /* Racing with TX start_xmit */ do { p = values; - start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp); + start = u64_stats_fetch_begin(&port->tx_stats_syncp); for (i = 0; i < TX_MAX_FRAGS; i++) { *values++ = port->tx_frag_stats[i]; @@ -2086,7 +2086,7 @@ static void gmac_get_ethtool_stats(struc *values++ = port->tx_frags_linearized; *values++ = port->tx_hw_csummed; - } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start)); + } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); } static int gmac_get_ksettings(struct net_device *netdev, Index: linux-6.1.90-rt30/drivers/net/ethernet/emulex/benet/be_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -389,10 +389,10 @@ static void be_get_ethtool_stats(struct struct be_rx_stats *stats = rx_stats(rxo); do { - start = u64_stats_fetch_begin_irq(&stats->sync); + start = u64_stats_fetch_begin(&stats->sync); data[base] = stats->rx_bytes; data[base + 1] = stats->rx_pkts; - } while (u64_stats_fetch_retry_irq(&stats->sync, start)); + } while (u64_stats_fetch_retry(&stats->sync, start)); for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { p = (u8 *)stats + et_rx_stats[i].offset; @@ -405,19 +405,19 @@ static void be_get_ethtool_stats(struct struct be_tx_stats *stats = tx_stats(txo); do { - start = u64_stats_fetch_begin_irq(&stats->sync_compl); + start = u64_stats_fetch_begin(&stats->sync_compl); data[base] = stats->tx_compl; - } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start)); + } while (u64_stats_fetch_retry(&stats->sync_compl, start)); do { - start = u64_stats_fetch_begin_irq(&stats->sync); + start = u64_stats_fetch_begin(&stats->sync); for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { p = (u8 *)stats + et_tx_stats[i].offset; data[base + i] = (et_tx_stats[i].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - } while (u64_stats_fetch_retry_irq(&stats->sync, start)); + } while (u64_stats_fetch_retry(&stats->sync, start)); base += ETHTOOL_TXSTATS_NUM; } } Index: linux-6.1.90-rt30/drivers/net/ethernet/emulex/benet/be_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/emulex/benet/be_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/emulex/benet/be_main.c @@ -665,10 +665,10 @@ static void be_get_stats64(struct net_de const struct be_rx_stats *rx_stats = rx_stats(rxo); do { - start = u64_stats_fetch_begin_irq(&rx_stats->sync); + start = u64_stats_fetch_begin(&rx_stats->sync); pkts = rx_stats(rxo)->rx_pkts; bytes = rx_stats(rxo)->rx_bytes; - } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); + } while (u64_stats_fetch_retry(&rx_stats->sync, start)); stats->rx_packets += pkts; stats->rx_bytes += bytes; stats->multicast += rx_stats(rxo)->rx_mcast_pkts; @@ -680,10 +680,10 @@ static void be_get_stats64(struct net_de const struct be_tx_stats *tx_stats = tx_stats(txo); do { - start = u64_stats_fetch_begin_irq(&tx_stats->sync); + start = u64_stats_fetch_begin(&tx_stats->sync); pkts = tx_stats(txo)->tx_pkts; bytes = tx_stats(txo)->tx_bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); + } while (u64_stats_fetch_retry(&tx_stats->sync, start)); stats->tx_packets += pkts; stats->tx_bytes += bytes; } @@ -2156,16 +2156,16 @@ static int be_get_new_eqd(struct be_eq_o for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { do { - start = u64_stats_fetch_begin_irq(&rxo->stats.sync); + start = u64_stats_fetch_begin(&rxo->stats.sync); rx_pkts += rxo->stats.rx_pkts; - } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); + } while (u64_stats_fetch_retry(&rxo->stats.sync, start)); } for_all_tx_queues_on_eq(adapter, eqo, txo, i) { do { - start = u64_stats_fetch_begin_irq(&txo->stats.sync); + start = u64_stats_fetch_begin(&txo->stats.sync); tx_pkts += txo->stats.tx_reqs; - } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); + } while (u64_stats_fetch_retry(&txo->stats.sync, start)); } /* Skip, if wrapped around or first calculation */ Index: linux-6.1.90-rt30/drivers/net/ethernet/fungible/funeth/funeth_txrx.h =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/fungible/funeth/funeth_txrx.h +++ linux-6.1.90-rt30/drivers/net/ethernet/fungible/funeth/funeth_txrx.h @@ -206,9 +206,9 @@ struct funeth_rxq { #define FUN_QSTAT_READ(q, seq, stats_copy) \ do { \ - seq = u64_stats_fetch_begin_irq(&(q)->syncp); \ + seq = u64_stats_fetch_begin(&(q)->syncp); \ stats_copy = (q)->stats; \ - } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq))) + } while (u64_stats_fetch_retry(&(q)->syncp, (seq))) #define FUN_INT_NAME_LEN (IFNAMSIZ + 16) Index: linux-6.1.90-rt30/drivers/net/ethernet/google/gve/gve_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/google/gve/gve_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device struct gve_rx_ring *rx = &priv->rx[ring]; start = - u64_stats_fetch_begin_irq(&priv->rx[ring].statss); + u64_stats_fetch_begin(&priv->rx[ring].statss); tmp_rx_pkts = rx->rpackets; tmp_rx_bytes = rx->rbytes; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_desc_err_dropped_pkt = rx->rx_desc_err_dropped_pkt; - } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); rx_pkts += tmp_rx_pkts; rx_bytes += tmp_rx_bytes; @@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device if (priv->tx) { do { start = - u64_stats_fetch_begin_irq(&priv->tx[ring].statss); + u64_stats_fetch_begin(&priv->tx[ring].statss); tmp_tx_pkts = priv->tx[ring].pkt_done; tmp_tx_bytes = priv->tx[ring].bytes_done; - } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, start)); tx_pkts += tmp_tx_pkts; tx_bytes += tmp_tx_bytes; @@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device data[i++] = rx->fill_cnt - rx->cnt; do { start = - u64_stats_fetch_begin_irq(&priv->rx[ring].statss); + u64_stats_fetch_begin(&priv->rx[ring].statss); tmp_rx_bytes = rx->rbytes; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_desc_err_dropped_pkt = rx->rx_desc_err_dropped_pkt; - } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); data[i++] = tmp_rx_bytes; data[i++] = rx->rx_cont_packet_cnt; @@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device } do { start = - u64_stats_fetch_begin_irq(&priv->tx[ring].statss); + u64_stats_fetch_begin(&priv->tx[ring].statss); tmp_tx_bytes = tx->bytes_done; - } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, start)); data[i++] = tmp_tx_bytes; data[i++] = tx->wake_queue; Index: linux-6.1.90-rt30/drivers/net/ethernet/google/gve/gve_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/google/gve/gve_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/google/gve/gve_main.c @@ -51,10 +51,10 @@ static void gve_get_stats(struct net_dev for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { do { start = - u64_stats_fetch_begin_irq(&priv->rx[ring].statss); + u64_stats_fetch_begin(&priv->rx[ring].statss); packets = priv->rx[ring].rpackets; bytes = priv->rx[ring].rbytes; - } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); s->rx_packets += packets; s->rx_bytes += bytes; @@ -64,10 +64,10 @@ static void gve_get_stats(struct net_dev for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { do { start = - u64_stats_fetch_begin_irq(&priv->tx[ring].statss); + u64_stats_fetch_begin(&priv->tx[ring].statss); packets = priv->tx[ring].pkt_done; bytes = priv->tx[ring].bytes_done; - } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, start)); s->tx_packets += packets; s->tx_bytes += bytes; @@ -1260,9 +1260,9 @@ void gve_handle_report_stats(struct gve_ } do { - start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss); + start = u64_stats_fetch_begin(&priv->tx[idx].statss); tx_bytes = priv->tx[idx].bytes_done; - } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start)); + } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); stats[stats_idx++] = (struct stats) { .stat_name = cpu_to_be32(TX_WAKE_CNT), .value = cpu_to_be64(priv->tx[idx].wake_queue), Index: linux-6.1.90-rt30/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ linux-6.1.90-rt30/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2496,7 +2496,7 @@ static void hns3_fetch_stats(struct rtnl unsigned int start; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); if (is_tx) { stats->tx_bytes += ring->stats.tx_bytes; stats->tx_packets += ring->stats.tx_pkts; @@ -2530,7 +2530,7 @@ static void hns3_fetch_stats(struct rtnl stats->multicast += ring->stats.rx_multicast; stats->rx_length_errors += ring->stats.err_pkt_len; } - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); } static void hns3_nic_get_stats64(struct net_device *netdev, Index: linux-6.1.90-rt30/drivers/net/ethernet/huawei/hinic/hinic_rx.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ linux-6.1.90-rt30/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rx unsigned int start; do { - start = u64_stats_fetch_begin_irq(&rxq_stats->syncp); + start = u64_stats_fetch_begin(&rxq_stats->syncp); stats->pkts = rxq_stats->pkts; stats->bytes = rxq_stats->bytes; stats->errors = rxq_stats->csum_errors + rxq_stats->other_errors; stats->csum_errors = rxq_stats->csum_errors; stats->other_errors = rxq_stats->other_errors; - } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); } /** Index: linux-6.1.90-rt30/drivers/net/ethernet/huawei/hinic/hinic_tx.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ linux-6.1.90-rt30/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_tx unsigned int start; do { - start = u64_stats_fetch_begin_irq(&txq_stats->syncp); + start = u64_stats_fetch_begin(&txq_stats->syncp); stats->pkts = txq_stats->pkts; stats->bytes = txq_stats->bytes; stats->tx_busy = txq_stats->tx_busy; stats->tx_wake = txq_stats->tx_wake; stats->tx_dropped = txq_stats->tx_dropped; stats->big_frags_pkts = txq_stats->big_frags_pkts; - } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start)); + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); } /** Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1229,10 +1229,10 @@ static void fm10k_get_stats64(struct net continue; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; @@ -1245,10 +1245,10 @@ static void fm10k_get_stats64(struct net continue; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/i40e/i40e_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -154,7 +154,7 @@ __i40e_add_ethtool_stats(u64 **data, voi * @ring: the ring to copy * * Queue statistics must be copied while protected by - * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats. * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the * ring pointer is null, zero out the queue stat values and update the data * pointer. Otherwise safely copy the stats from the ring into the supplied @@ -172,16 +172,16 @@ i40e_add_queue_stats(u64 **data, struct /* To avoid invalid statistics values, ensure that we keep retrying * the copy until we get a consistent value according to - * u64_stats_fetch_retry_irq. But first, make sure our ring is + * u64_stats_fetch_retry. But first, make sure our ring is * non-null before attempting to access its syncp. */ do { - start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); for (i = 0; i < size; i++) { i40e_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); } - } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/i40e/i40e_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/i40e/i40e_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -425,10 +425,10 @@ static void i40e_get_netdev_stats_struct unsigned int start; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; @@ -478,10 +478,10 @@ static void i40e_get_netdev_stats_struct if (!ring) continue; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; @@ -903,10 +903,10 @@ static void i40e_update_vsi_stats(struct continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; @@ -921,10 +921,10 @@ static void i40e_update_vsi_stats(struct continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; @@ -941,10 +941,10 @@ static void i40e_update_vsi_stats(struct continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/iavf/iavf_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -147,7 +147,7 @@ __iavf_add_ethtool_stats(u64 **data, voi * @ring: the ring to copy * * Queue statistics must be copied while protected by - * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats. + * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats. * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the * ring pointer is null, zero out the queue stat values and update the data * pointer. Otherwise safely copy the stats from the ring into the supplied @@ -165,14 +165,14 @@ iavf_add_queue_stats(u64 **data, struct /* To avoid invalid statistics values, ensure that we keep retrying * the copy until we get a consistent value according to - * u64_stats_fetch_retry_irq. But first, make sure our ring is + * u64_stats_fetch_retry. But first, make sure our ring is * non-null before attempting to access its syncp. */ do { - start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); for (i = 0; i < size; i++) iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); - } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/ice/ice_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/ice/ice_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/ice/ice_main.c @@ -6419,10 +6419,10 @@ ice_fetch_u64_stats_per_ring(struct u64_ unsigned int start; do { - start = u64_stats_fetch_begin_irq(syncp); + start = u64_stats_fetch_begin(syncp); *pkts = stats.pkts; *bytes = stats.bytes; - } while (u64_stats_fetch_retry_irq(syncp, start)); + } while (u64_stats_fetch_retry(syncp, start)); } /** Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/igb/igb_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2316,15 +2316,15 @@ static void igb_get_ethtool_stats(struct ring = adapter->tx_ring[j]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); data[i] = ring->tx_stats.packets; data[i+1] = ring->tx_stats.bytes; data[i+2] = ring->tx_stats.restart_queue; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + start = u64_stats_fetch_begin(&ring->tx_syncp2); restart2 = ring->tx_stats.restart_queue2; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); data[i+2] += restart2; i += IGB_TX_QUEUE_STATS_LEN; @@ -2332,13 +2332,13 @@ static void igb_get_ethtool_stats(struct for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); data[i] = ring->rx_stats.packets; data[i+1] = ring->rx_stats.bytes; data[i+2] = ring->rx_stats.drops; data[i+3] = ring->rx_stats.csum_err; data[i+4] = ring->rx_stats.alloc_failed; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); i += IGB_RX_QUEUE_STATS_LEN; } spin_unlock(&adapter->stats64_lock); Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/igb/igb_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/igb/igb_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/igb/igb_main.c @@ -6660,10 +6660,10 @@ void igb_update_stats(struct igb_adapter } do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -6676,10 +6676,10 @@ void igb_update_stats(struct igb_adapter for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/igc/igc_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -840,15 +840,15 @@ static void igc_ethtool_get_stats(struct ring = adapter->tx_ring[j]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); data[i] = ring->tx_stats.packets; data[i + 1] = ring->tx_stats.bytes; data[i + 2] = ring->tx_stats.restart_queue; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + start = u64_stats_fetch_begin(&ring->tx_syncp2); restart2 = ring->tx_stats.restart_queue2; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); data[i + 2] += restart2; i += IGC_TX_QUEUE_STATS_LEN; @@ -856,13 +856,13 @@ static void igc_ethtool_get_stats(struct for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); data[i] = ring->rx_stats.packets; data[i + 1] = ring->rx_stats.bytes; data[i + 2] = ring->rx_stats.drops; data[i + 3] = ring->rx_stats.csum_err; data[i + 4] = ring->rx_stats.alloc_failed; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); i += IGC_RX_QUEUE_STATS_LEN; } spin_unlock(&adapter->stats64_lock); Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/igc/igc_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/igc/igc_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/igc/igc_main.c @@ -4868,10 +4868,10 @@ void igc_update_stats(struct igc_adapter } do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -4885,10 +4885,10 @@ void igc_update_stats(struct igc_adapter struct igc_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1335,10 +1335,10 @@ static void ixgbe_get_ethtool_stats(stru } do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { @@ -1351,10 +1351,10 @@ static void ixgbe_get_ethtool_stats(stru } do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9047,10 +9047,10 @@ static void ixgbe_get_ring_stats64(struc if (ring) { do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } @@ -9070,10 +9070,10 @@ static void ixgbe_get_stats64(struct net if (ring) { do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbevf/ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -458,10 +458,10 @@ static void ixgbevf_get_ethtool_stats(st } do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } @@ -475,10 +475,10 @@ static void ixgbevf_get_ethtool_stats(st } do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } @@ -492,10 +492,10 @@ static void ixgbevf_get_ethtool_stats(st } do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } } Index: linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4350,10 +4350,10 @@ static void ixgbevf_get_tx_ring_stats(st if (ring) { do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); bytes = ring->stats.bytes; packets = ring->stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_bytes += bytes; stats->tx_packets += packets; } @@ -4376,10 +4376,10 @@ static void ixgbevf_get_stats(struct net for (i = 0; i < adapter->num_rx_queues; i++) { ring = adapter->rx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); bytes = ring->stats.bytes; packets = ring->stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_bytes += bytes; stats->rx_packets += packets; } Index: linux-6.1.90-rt30/drivers/net/ethernet/marvell/mvneta.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/marvell/mvneta.c +++ linux-6.1.90-rt30/drivers/net/ethernet/marvell/mvneta.c @@ -813,14 +813,14 @@ mvneta_get_stats64(struct net_device *de cpu_stats = per_cpu_ptr(pp->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); rx_packets = cpu_stats->es.ps.rx_packets; rx_bytes = cpu_stats->es.ps.rx_bytes; rx_dropped = cpu_stats->rx_dropped; rx_errors = cpu_stats->rx_errors; tx_packets = cpu_stats->es.ps.tx_packets; tx_bytes = cpu_stats->es.ps.tx_bytes; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -4765,7 +4765,7 @@ mvneta_ethtool_update_pcpu_stats(struct stats = per_cpu_ptr(pp->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); skb_alloc_error = stats->es.skb_alloc_error; refill_error = stats->es.refill_error; xdp_redirect = stats->es.ps.xdp_redirect; @@ -4775,7 +4775,7 @@ mvneta_ethtool_update_pcpu_stats(struct xdp_xmit_err = stats->es.ps.xdp_xmit_err; xdp_tx = stats->es.ps.xdp_tx; xdp_tx_err = stats->es.ps.xdp_tx_err; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); es->skb_alloc_error += skb_alloc_error; es->refill_error += refill_error; Index: linux-6.1.90-rt30/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -2033,7 +2033,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *p cpu_stats = per_cpu_ptr(port->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); xdp_redirect = cpu_stats->xdp_redirect; xdp_pass = cpu_stats->xdp_pass; xdp_drop = cpu_stats->xdp_drop; @@ -2041,7 +2041,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *p xdp_xmit_err = cpu_stats->xdp_xmit_err; xdp_tx = cpu_stats->xdp_tx; xdp_tx_err = cpu_stats->xdp_tx_err; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); xdp_stats->xdp_redirect += xdp_redirect; xdp_stats->xdp_pass += xdp_pass; @@ -5140,12 +5140,12 @@ mvpp2_get_stats64(struct net_device *dev cpu_stats = per_cpu_ptr(port->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); rx_packets = cpu_stats->rx_packets; rx_bytes = cpu_stats->rx_bytes; tx_packets = cpu_stats->tx_packets; tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/marvell/sky2.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/marvell/sky2.c +++ linux-6.1.90-rt30/drivers/net/ethernet/marvell/sky2.c @@ -3894,19 +3894,19 @@ static void sky2_get_stats(struct net_de u64 _bytes, _packets; do { - start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); + start = u64_stats_fetch_begin(&sky2->rx_stats.syncp); _bytes = sky2->rx_stats.bytes; _packets = sky2->rx_stats.packets; - } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start)); stats->rx_packets = _packets; stats->rx_bytes = _bytes; do { - start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); + start = u64_stats_fetch_begin(&sky2->tx_stats.syncp); _bytes = sky2->tx_stats.bytes; _packets = sky2->tx_stats.packets; - } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start)); stats->tx_packets = _packets; stats->tx_bytes = _bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/mediatek/mtk_eth_soc.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ linux-6.1.90-rt30/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -865,7 +865,7 @@ static void mtk_get_stats64(struct net_d } do { - start = u64_stats_fetch_begin_irq(&hw_stats->syncp); + start = u64_stats_fetch_begin(&hw_stats->syncp); storage->rx_packets = hw_stats->rx_packets; storage->tx_packets = hw_stats->tx_packets; storage->rx_bytes = hw_stats->rx_bytes; @@ -877,7 +877,7 @@ static void mtk_get_stats64(struct net_d storage->rx_crc_errors = hw_stats->rx_fcs_errors; storage->rx_errors = hw_stats->rx_checksum_errors; storage->tx_aborted_errors = hw_stats->tx_skip; - } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); + } while (u64_stats_fetch_retry(&hw_stats->syncp, start)); storage->tx_errors = dev->stats.tx_errors; storage->rx_dropped = dev->stats.rx_dropped; @@ -3693,13 +3693,13 @@ static void mtk_get_ethtool_stats(struct do { data_dst = data; - start = u64_stats_fetch_begin_irq(&hwstats->syncp); + start = u64_stats_fetch_begin(&hwstats->syncp); for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); if (mtk_page_pool_enabled(mac->hw)) mtk_ethtool_pp_stats(mac->hw, data_dst); - } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); + } while (u64_stats_fetch_retry(&hwstats->syncp, start)); } static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, Index: linux-6.1.90-rt30/drivers/net/ethernet/mellanox/mlxsw/spectrum.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ linux-6.1.90-rt30/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -827,12 +827,12 @@ mlxsw_sp_port_get_sw_stats64(const struc for_each_possible_cpu(i) { p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/microsoft/mana/mana_en.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/microsoft/mana/mana_en.c +++ linux-6.1.90-rt30/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -316,10 +316,10 @@ static void mana_get_stats64(struct net_ rx_stats = &apc->rxqs[q]->stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + start = u64_stats_fetch_begin(&rx_stats->syncp); packets = rx_stats->packets; bytes = rx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); st->rx_packets += packets; st->rx_bytes += bytes; @@ -329,10 +329,10 @@ static void mana_get_stats64(struct net_ tx_stats = &apc->tx_qp[q].txq.stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + start = u64_stats_fetch_begin(&tx_stats->syncp); packets = tx_stats->packets; bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); st->tx_packets += packets; st->tx_bytes += bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/microsoft/mana/mana_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -90,13 +90,13 @@ static void mana_get_ethtool_stats(struc rx_stats = &apc->rxqs[q]->stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + start = u64_stats_fetch_begin(&rx_stats->syncp); packets = rx_stats->packets; bytes = rx_stats->bytes; xdp_drop = rx_stats->xdp_drop; xdp_tx = rx_stats->xdp_tx; xdp_redirect = rx_stats->xdp_redirect; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; @@ -109,11 +109,11 @@ static void mana_get_ethtool_stats(struc tx_stats = &apc->tx_qp[q].txq.stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + start = u64_stats_fetch_begin(&tx_stats->syncp); packets = tx_stats->packets; bytes = tx_stats->bytes; xdp_xmit = tx_stats->xdp_xmit; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/netronome/nfp/nfp_net_common.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ linux-6.1.90-rt30/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1631,21 +1631,21 @@ static void nfp_net_stat64(struct net_de unsigned int start; do { - start = u64_stats_fetch_begin_irq(&r_vec->rx_sync); + start = u64_stats_fetch_begin(&r_vec->rx_sync); data[0] = r_vec->rx_pkts; data[1] = r_vec->rx_bytes; data[2] = r_vec->rx_drops; - } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start)); + } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); stats->rx_packets += data[0]; stats->rx_bytes += data[1]; stats->rx_dropped += data[2]; do { - start = u64_stats_fetch_begin_irq(&r_vec->tx_sync); + start = u64_stats_fetch_begin(&r_vec->tx_sync); data[0] = r_vec->tx_pkts; data[1] = r_vec->tx_bytes; data[2] = r_vec->tx_errors; - } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start)); + } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); stats->tx_packets += data[0]; stats->tx_bytes += data[1]; stats->tx_errors += data[2]; Index: linux-6.1.90-rt30/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ linux-6.1.90-rt30/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -881,7 +881,7 @@ static u64 *nfp_vnic_get_sw_stats(struct unsigned int start; do { - start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync); + start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); data[0] = nn->r_vecs[i].rx_pkts; tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; @@ -889,10 +889,10 @@ static u64 *nfp_vnic_get_sw_stats(struct tmp[3] = nn->r_vecs[i].hw_csum_rx_error; tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; tmp[5] = nn->r_vecs[i].hw_tls_rx; - } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start)); + } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); do { - start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync); + start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); data[1] = nn->r_vecs[i].tx_pkts; data[2] = nn->r_vecs[i].tx_busy; tmp[6] = nn->r_vecs[i].hw_csum_tx; @@ -902,7 +902,7 @@ static u64 *nfp_vnic_get_sw_stats(struct tmp[10] = nn->r_vecs[i].hw_tls_tx; tmp[11] = nn->r_vecs[i].tls_tx_fallback; tmp[12] = nn->r_vecs[i].tls_tx_no_fallback; - } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start)); + } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); data += NN_RVEC_PER_Q_STATS; Index: linux-6.1.90-rt30/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ linux-6.1.90-rt30/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -134,13 +134,13 @@ nfp_repr_get_host_stats64(const struct n repr_stats = per_cpu_ptr(repr->stats, i); do { - start = u64_stats_fetch_begin_irq(&repr_stats->syncp); + start = u64_stats_fetch_begin(&repr_stats->syncp); tbytes = repr_stats->tx_bytes; tpkts = repr_stats->tx_packets; tdrops = repr_stats->tx_drops; rbytes = repr_stats->rx_bytes; rpkts = repr_stats->rx_packets; - } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start)); + } while (u64_stats_fetch_retry(&repr_stats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; Index: linux-6.1.90-rt30/drivers/net/ethernet/nvidia/forcedeth.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/nvidia/forcedeth.c +++ linux-6.1.90-rt30/drivers/net/ethernet/nvidia/forcedeth.c @@ -1734,12 +1734,12 @@ static void nv_get_stats(int cpu, struct u64 tx_packets, tx_bytes, tx_dropped; do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); + syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp); rx_packets = src->stat_rx_packets; rx_bytes = src->stat_rx_bytes; rx_dropped = src->stat_rx_dropped; rx_missed_errors = src->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); + } while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start)); storage->rx_packets += rx_packets; storage->rx_bytes += rx_bytes; @@ -1747,11 +1747,11 @@ static void nv_get_stats(int cpu, struct storage->rx_missed_errors += rx_missed_errors; do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); + syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp); tx_packets = src->stat_tx_packets; tx_bytes = src->stat_tx_bytes; tx_dropped = src->stat_tx_dropped; - } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + } while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start)); storage->tx_packets += tx_packets; storage->tx_bytes += tx_bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ linux-6.1.90-rt30/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -135,9 +135,9 @@ static void rmnet_get_stats64(struct net pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); do { - start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); + start = u64_stats_fetch_begin(&pcpu_ptr->syncp); snapshot = pcpu_ptr->stats; /* struct assignment */ - } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); + } while (u64_stats_fetch_retry(&pcpu_ptr->syncp, start)); total_stats.rx_pkts += snapshot.rx_pkts; total_stats.rx_bytes += snapshot.rx_bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/realtek/8139too.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/realtek/8139too.c +++ linux-6.1.90-rt30/drivers/net/ethernet/realtek/8139too.c @@ -2532,16 +2532,16 @@ rtl8139_get_stats64(struct net_device *d netdev_stats_to_stats64(stats, &dev->stats); do { - start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + start = u64_stats_fetch_begin(&tp->rx_stats.syncp); stats->rx_packets = tp->rx_stats.packets; stats->rx_bytes = tp->rx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry(&tp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + start = u64_stats_fetch_begin(&tp->tx_stats.syncp); stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry(&tp->tx_stats.syncp, start)); } /* Set or clear the multicast filter for this adaptor. Index: linux-6.1.90-rt30/drivers/net/ethernet/socionext/sni_ave.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/socionext/sni_ave.c +++ linux-6.1.90-rt30/drivers/net/ethernet/socionext/sni_ave.c @@ -1508,16 +1508,16 @@ static void ave_get_stats64(struct net_d unsigned int start; do { - start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp); + start = u64_stats_fetch_begin(&priv->stats_rx.syncp); stats->rx_packets = priv->stats_rx.packets; stats->rx_bytes = priv->stats_rx.bytes; - } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start)); + } while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start)); do { - start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp); + start = u64_stats_fetch_begin(&priv->stats_tx.syncp); stats->tx_packets = priv->stats_tx.packets; stats->tx_bytes = priv->stats_tx.bytes; - } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start)); + } while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start)); stats->rx_errors = priv->stats_rx.errors; stats->tx_errors = priv->stats_tx.errors; Index: linux-6.1.90-rt30/drivers/net/ethernet/ti/am65-cpsw-nuss.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ linux-6.1.90-rt30/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1376,12 +1376,12 @@ static void am65_cpsw_nuss_ndo_get_stats cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); rx_packets = cpu_stats->rx_packets; rx_bytes = cpu_stats->rx_bytes; tx_packets = cpu_stats->tx_packets; tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/ti/netcp_core.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/ti/netcp_core.c +++ linux-6.1.90-rt30/drivers/net/ethernet/ti/netcp_core.c @@ -1916,16 +1916,16 @@ netcp_get_stats(struct net_device *ndev, unsigned int start; do { - start = u64_stats_fetch_begin_irq(&p->syncp_rx); + start = u64_stats_fetch_begin(&p->syncp_rx); rxpackets = p->rx_packets; rxbytes = p->rx_bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start)); + } while (u64_stats_fetch_retry(&p->syncp_rx, start)); do { - start = u64_stats_fetch_begin_irq(&p->syncp_tx); + start = u64_stats_fetch_begin(&p->syncp_tx); txpackets = p->tx_packets; txbytes = p->tx_bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start)); + } while (u64_stats_fetch_retry(&p->syncp_tx, start)); stats->rx_packets = rxpackets; stats->rx_bytes = rxbytes; Index: linux-6.1.90-rt30/drivers/net/ethernet/via/via-rhine.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/via/via-rhine.c +++ linux-6.1.90-rt30/drivers/net/ethernet/via/via-rhine.c @@ -2217,16 +2217,16 @@ rhine_get_stats64(struct net_device *dev netdev_stats_to_stats64(stats, &dev->stats); do { - start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); + start = u64_stats_fetch_begin(&rp->rx_stats.syncp); stats->rx_packets = rp->rx_stats.packets; stats->rx_bytes = rp->rx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry(&rp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); + start = u64_stats_fetch_begin(&rp->tx_stats.syncp); stats->tx_packets = rp->tx_stats.packets; stats->tx_bytes = rp->tx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry(&rp->tx_stats.syncp, start)); } static void rhine_set_rx_mode(struct net_device *dev) Index: linux-6.1.90-rt30/drivers/net/ethernet/xilinx/xilinx_axienet_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ linux-6.1.90-rt30/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1305,16 +1305,16 @@ axienet_get_stats64(struct net_device *d netdev_stats_to_stats64(stats, &dev->stats); do { - start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync); + start = u64_stats_fetch_begin(&lp->rx_stat_sync); stats->rx_packets = u64_stats_read(&lp->rx_packets); stats->rx_bytes = u64_stats_read(&lp->rx_bytes); - } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start)); + } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); do { - start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync); + start = u64_stats_fetch_begin(&lp->tx_stat_sync); stats->tx_packets = u64_stats_read(&lp->tx_packets); stats->tx_bytes = u64_stats_read(&lp->tx_bytes); - } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start)); + } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); } static const struct net_device_ops axienet_netdev_ops = { Index: linux-6.1.90-rt30/drivers/net/hyperv/netvsc_drv.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/hyperv/netvsc_drv.c +++ linux-6.1.90-rt30/drivers/net/hyperv/netvsc_drv.c @@ -1268,12 +1268,12 @@ static void netvsc_get_vf_stats(struct n unsigned int start; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); rx_packets = stats->rx_packets; tx_packets = stats->tx_packets; rx_bytes = stats->rx_bytes; tx_bytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; @@ -1298,12 +1298,12 @@ static void netvsc_get_pcpu_stats(struct unsigned int start; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); this_tot->vf_rx_packets = stats->rx_packets; this_tot->vf_tx_packets = stats->tx_packets; this_tot->vf_rx_bytes = stats->rx_bytes; this_tot->vf_tx_bytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); this_tot->rx_packets = this_tot->vf_rx_packets; this_tot->tx_packets = this_tot->vf_tx_packets; this_tot->rx_bytes = this_tot->vf_rx_bytes; @@ -1322,20 +1322,20 @@ static void netvsc_get_pcpu_stats(struct tx_stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + start = u64_stats_fetch_begin(&tx_stats->syncp); packets = tx_stats->packets; bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); this_tot->tx_bytes += bytes; this_tot->tx_packets += packets; rx_stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + start = u64_stats_fetch_begin(&rx_stats->syncp); packets = rx_stats->packets; bytes = rx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); this_tot->rx_bytes += bytes; this_tot->rx_packets += packets; @@ -1374,21 +1374,21 @@ static void netvsc_get_stats64(struct ne tx_stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + start = u64_stats_fetch_begin(&tx_stats->syncp); packets = tx_stats->packets; bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); t->tx_bytes += bytes; t->tx_packets += packets; rx_stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + start = u64_stats_fetch_begin(&rx_stats->syncp); packets = rx_stats->packets; bytes = rx_stats->bytes; multicast = rx_stats->multicast + rx_stats->broadcast; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); t->rx_bytes += bytes; t->rx_packets += packets; @@ -1531,24 +1531,24 @@ static void netvsc_get_ethtool_stats(str tx_stats = &nvdev->chan_table[j].tx_stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + start = u64_stats_fetch_begin(&tx_stats->syncp); packets = tx_stats->packets; bytes = tx_stats->bytes; xdp_xmit = tx_stats->xdp_xmit; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; data[i++] = xdp_xmit; rx_stats = &nvdev->chan_table[j].rx_stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + start = u64_stats_fetch_begin(&rx_stats->syncp); packets = rx_stats->packets; bytes = rx_stats->bytes; xdp_drop = rx_stats->xdp_drop; xdp_redirect = rx_stats->xdp_redirect; xdp_tx = rx_stats->xdp_tx; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; data[i++] = xdp_drop; Index: linux-6.1.90-rt30/drivers/net/ifb.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ifb.c +++ linux-6.1.90-rt30/drivers/net/ifb.c @@ -162,18 +162,18 @@ static void ifb_stats64(struct net_devic for (i = 0; i < dev->num_tx_queues; i++,txp++) { do { - start = u64_stats_fetch_begin_irq(&txp->rx_stats.sync); + start = u64_stats_fetch_begin(&txp->rx_stats.sync); packets = txp->rx_stats.packets; bytes = txp->rx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&txp->rx_stats.sync, start)); + } while (u64_stats_fetch_retry(&txp->rx_stats.sync, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; do { - start = u64_stats_fetch_begin_irq(&txp->tx_stats.sync); + start = u64_stats_fetch_begin(&txp->tx_stats.sync); packets = txp->tx_stats.packets; bytes = txp->tx_stats.bytes; - } while (u64_stats_fetch_retry_irq(&txp->tx_stats.sync, start)); + } while (u64_stats_fetch_retry(&txp->tx_stats.sync, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } @@ -245,12 +245,12 @@ static void ifb_fill_stats_data(u64 **da int j; do { - start = u64_stats_fetch_begin_irq(&q_stats->sync); + start = u64_stats_fetch_begin(&q_stats->sync); for (j = 0; j < IFB_Q_STATS_LEN; j++) { offset = ifb_q_stats_desc[j].offset; (*data)[j] = *(u64 *)(stats_base + offset); } - } while (u64_stats_fetch_retry_irq(&q_stats->sync, start)); + } while (u64_stats_fetch_retry(&q_stats->sync, start)); *data += IFB_Q_STATS_LEN; } Index: linux-6.1.90-rt30/drivers/net/ipvlan/ipvlan_main.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/ipvlan/ipvlan_main.c +++ linux-6.1.90-rt30/drivers/net/ipvlan/ipvlan_main.c @@ -301,13 +301,13 @@ static void ipvlan_get_stats64(struct ne for_each_possible_cpu(idx) { pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx); do { - strt= u64_stats_fetch_begin_irq(&pcptr->syncp); + strt = u64_stats_fetch_begin(&pcptr->syncp); rx_pkts = u64_stats_read(&pcptr->rx_pkts); rx_bytes = u64_stats_read(&pcptr->rx_bytes); rx_mcast = u64_stats_read(&pcptr->rx_mcast); tx_pkts = u64_stats_read(&pcptr->tx_pkts); tx_bytes = u64_stats_read(&pcptr->tx_bytes); - } while (u64_stats_fetch_retry_irq(&pcptr->syncp, + } while (u64_stats_fetch_retry(&pcptr->syncp, strt)); s->rx_packets += rx_pkts; Index: linux-6.1.90-rt30/drivers/net/loopback.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/loopback.c +++ linux-6.1.90-rt30/drivers/net/loopback.c @@ -106,10 +106,10 @@ void dev_lstats_read(struct net_device * lb_stats = per_cpu_ptr(dev->lstats, i); do { - start = u64_stats_fetch_begin_irq(&lb_stats->syncp); + start = u64_stats_fetch_begin(&lb_stats->syncp); tpackets = u64_stats_read(&lb_stats->packets); tbytes = u64_stats_read(&lb_stats->bytes); - } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start)); + } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); *bytes += tbytes; *packets += tpackets; } Index: linux-6.1.90-rt30/drivers/net/macsec.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/macsec.c +++ linux-6.1.90-rt30/drivers/net/macsec.c @@ -2832,9 +2832,9 @@ static void get_rx_sc_stats(struct net_d stats = per_cpu_ptr(rx_sc->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); sum->InOctetsValidated += tmp.InOctetsValidated; sum->InOctetsDecrypted += tmp.InOctetsDecrypted; @@ -2913,9 +2913,9 @@ static void get_tx_sc_stats(struct net_d stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); sum->OutPktsProtected += tmp.OutPktsProtected; sum->OutPktsEncrypted += tmp.OutPktsEncrypted; @@ -2969,9 +2969,9 @@ static void get_secy_stats(struct net_de stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); sum->OutPktsUntagged += tmp.OutPktsUntagged; sum->InPktsUntagged += tmp.InPktsUntagged; Index: linux-6.1.90-rt30/drivers/net/macvlan.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/macvlan.c +++ linux-6.1.90-rt30/drivers/net/macvlan.c @@ -948,13 +948,13 @@ static void macvlan_dev_get_stats64(stru for_each_possible_cpu(i) { p = per_cpu_ptr(vlan->pcpu_stats, i); do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); rx_packets = u64_stats_read(&p->rx_packets); rx_bytes = u64_stats_read(&p->rx_bytes); rx_multicast = u64_stats_read(&p->rx_multicast); tx_packets = u64_stats_read(&p->tx_packets); tx_bytes = u64_stats_read(&p->tx_bytes); - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; Index: linux-6.1.90-rt30/drivers/net/mhi_net.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/mhi_net.c +++ linux-6.1.90-rt30/drivers/net/mhi_net.c @@ -104,19 +104,19 @@ static void mhi_ndo_get_stats64(struct n unsigned int start; do { - start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp); + start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp); stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); - } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); + } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start)); do { - start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp); + start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp); stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); - } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start)); + } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start)); } static const struct net_device_ops mhi_netdev_ops = { Index: linux-6.1.90-rt30/drivers/net/netdevsim/netdev.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/netdevsim/netdev.c +++ linux-6.1.90-rt30/drivers/net/netdevsim/netdev.c @@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, unsigned int start; do { - start = u64_stats_fetch_begin_irq(&ns->syncp); + start = u64_stats_fetch_begin(&ns->syncp); stats->tx_bytes = ns->tx_bytes; stats->tx_packets = ns->tx_packets; - } while (u64_stats_fetch_retry_irq(&ns->syncp, start)); + } while (u64_stats_fetch_retry(&ns->syncp, start)); } static int Index: linux-6.1.90-rt30/drivers/net/team/team.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/team/team.c +++ linux-6.1.90-rt30/drivers/net/team/team.c @@ -1868,13 +1868,13 @@ team_get_stats64(struct net_device *dev, for_each_possible_cpu(i) { p = per_cpu_ptr(team->pcpu_stats, i); do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); rx_packets = u64_stats_read(&p->rx_packets); rx_bytes = u64_stats_read(&p->rx_bytes); rx_multicast = u64_stats_read(&p->rx_multicast); tx_packets = u64_stats_read(&p->tx_packets); tx_bytes = u64_stats_read(&p->tx_bytes); - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; Index: linux-6.1.90-rt30/drivers/net/team/team_mode_loadbalance.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/team/team_mode_loadbalance.c +++ linux-6.1.90-rt30/drivers/net/team/team_mode_loadbalance.c @@ -466,9 +466,9 @@ static void __lb_one_cpu_stats_add(struc struct lb_stats tmp; do { - start = u64_stats_fetch_begin_irq(syncp); + start = u64_stats_fetch_begin(syncp); tmp.tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(syncp, start)); + } while (u64_stats_fetch_retry(syncp, start)); acc_stats->tx_bytes += tmp.tx_bytes; } Index: linux-6.1.90-rt30/drivers/net/veth.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/veth.c +++ linux-6.1.90-rt30/drivers/net/veth.c @@ -182,12 +182,12 @@ static void veth_get_ethtool_stats(struc size_t offset; do { - start = u64_stats_fetch_begin_irq(&rq_stats->syncp); + start = u64_stats_fetch_begin(&rq_stats->syncp); for (j = 0; j < VETH_RQ_STATS_LEN; j++) { offset = veth_rq_stats_desc[j].offset; data[idx + j] = *(u64 *)(stats_base + offset); } - } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); idx += VETH_RQ_STATS_LEN; } @@ -203,12 +203,12 @@ static void veth_get_ethtool_stats(struc tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN; do { - start = u64_stats_fetch_begin_irq(&rq_stats->syncp); + start = u64_stats_fetch_begin(&rq_stats->syncp); for (j = 0; j < VETH_TQ_STATS_LEN; j++) { offset = veth_tq_stats_desc[j].offset; data[tx_idx + j] += *(u64 *)(base + offset); } - } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); } } @@ -381,13 +381,13 @@ static void veth_stats_rx(struct veth_st unsigned int start; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; xdp_tx_err = stats->vs.xdp_tx_err; packets = stats->vs.xdp_packets; bytes = stats->vs.xdp_bytes; drops = stats->vs.rx_drops; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; result->xdp_tx_err += xdp_tx_err; result->xdp_packets += packets; Index: linux-6.1.90-rt30/drivers/net/virtio_net.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/virtio_net.c +++ linux-6.1.90-rt30/drivers/net/virtio_net.c @@ -2107,18 +2107,18 @@ static void virtnet_stats(struct net_dev struct send_queue *sq = &vi->sq[i]; do { - start = u64_stats_fetch_begin_irq(&sq->stats.syncp); + start = u64_stats_fetch_begin(&sq->stats.syncp); tpackets = sq->stats.packets; tbytes = sq->stats.bytes; terrors = sq->stats.tx_timeouts; - } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); + } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); do { - start = u64_stats_fetch_begin_irq(&rq->stats.syncp); + start = u64_stats_fetch_begin(&rq->stats.syncp); rpackets = rq->stats.packets; rbytes = rq->stats.bytes; rdrops = rq->stats.drops; - } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); + } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; @@ -2726,12 +2726,12 @@ static void virtnet_get_ethtool_stats(st stats_base = (u8 *)&rq->stats; do { - start = u64_stats_fetch_begin_irq(&rq->stats.syncp); + start = u64_stats_fetch_begin(&rq->stats.syncp); for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { offset = virtnet_rq_stats_desc[j].offset; data[idx + j] = *(u64 *)(stats_base + offset); } - } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); + } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); idx += VIRTNET_RQ_STATS_LEN; } @@ -2740,12 +2740,12 @@ static void virtnet_get_ethtool_stats(st stats_base = (u8 *)&sq->stats; do { - start = u64_stats_fetch_begin_irq(&sq->stats.syncp); + start = u64_stats_fetch_begin(&sq->stats.syncp); for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { offset = virtnet_sq_stats_desc[j].offset; data[idx + j] = *(u64 *)(stats_base + offset); } - } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); + } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); idx += VIRTNET_SQ_STATS_LEN; } } Index: linux-6.1.90-rt30/drivers/net/vrf.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/vrf.c +++ linux-6.1.90-rt30/drivers/net/vrf.c @@ -159,13 +159,13 @@ static void vrf_get_stats64(struct net_d dstats = per_cpu_ptr(dev->dstats, i); do { - start = u64_stats_fetch_begin_irq(&dstats->syncp); + start = u64_stats_fetch_begin(&dstats->syncp); tbytes = dstats->tx_bytes; tpkts = dstats->tx_pkts; tdrops = dstats->tx_drps; rbytes = dstats->rx_bytes; rpkts = dstats->rx_pkts; - } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); + } while (u64_stats_fetch_retry(&dstats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; stats->tx_dropped += tdrops; Index: linux-6.1.90-rt30/drivers/net/vxlan/vxlan_vnifilter.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/vxlan/vxlan_vnifilter.c +++ linux-6.1.90-rt30/drivers/net/vxlan/vxlan_vnifilter.c @@ -129,9 +129,9 @@ static void vxlan_vnifilter_stats_get(co pstats = per_cpu_ptr(vninode->stats, i); do { - start = u64_stats_fetch_begin_irq(&pstats->syncp); + start = u64_stats_fetch_begin(&pstats->syncp); memcpy(&temp, &pstats->stats, sizeof(temp)); - } while (u64_stats_fetch_retry_irq(&pstats->syncp, start)); + } while (u64_stats_fetch_retry(&pstats->syncp, start)); dest->rx_packets += temp.rx_packets; dest->rx_bytes += temp.rx_bytes; Index: linux-6.1.90-rt30/drivers/net/wwan/mhi_wwan_mbim.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/wwan/mhi_wwan_mbim.c +++ linux-6.1.90-rt30/drivers/net/wwan/mhi_wwan_mbim.c @@ -456,19 +456,19 @@ static void mhi_mbim_ndo_get_stats64(str unsigned int start; do { - start = u64_stats_fetch_begin_irq(&link->rx_syncp); + start = u64_stats_fetch_begin(&link->rx_syncp); stats->rx_packets = u64_stats_read(&link->rx_packets); stats->rx_bytes = u64_stats_read(&link->rx_bytes); stats->rx_errors = u64_stats_read(&link->rx_errors); - } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start)); + } while (u64_stats_fetch_retry(&link->rx_syncp, start)); do { - start = u64_stats_fetch_begin_irq(&link->tx_syncp); + start = u64_stats_fetch_begin(&link->tx_syncp); stats->tx_packets = u64_stats_read(&link->tx_packets); stats->tx_bytes = u64_stats_read(&link->tx_bytes); stats->tx_errors = u64_stats_read(&link->tx_errors); stats->tx_dropped = u64_stats_read(&link->tx_dropped); - } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start)); + } while (u64_stats_fetch_retry(&link->tx_syncp, start)); } static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev, Index: linux-6.1.90-rt30/drivers/net/xen-netfront.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/net/xen-netfront.c +++ linux-6.1.90-rt30/drivers/net/xen-netfront.c @@ -1393,16 +1393,16 @@ static void xennet_get_stats64(struct ne unsigned int start; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + start = u64_stats_fetch_begin(&tx_stats->syncp); tx_packets = tx_stats->packets; tx_bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + start = u64_stats_fetch_begin(&rx_stats->syncp); rx_packets = rx_stats->packets; rx_bytes = rx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; Index: linux-6.1.90-rt30/drivers/spi/spi.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/spi/spi.c +++ linux-6.1.90-rt30/drivers/spi/spi.c @@ -127,10 +127,10 @@ do { \ unsigned int start; \ pcpu_stats = per_cpu_ptr(in, i); \ do { \ - start = u64_stats_fetch_begin_irq( \ + start = u64_stats_fetch_begin( \ &pcpu_stats->syncp); \ inc = u64_stats_read(&pcpu_stats->field); \ - } while (u64_stats_fetch_retry_irq( \ + } while (u64_stats_fetch_retry( \ &pcpu_stats->syncp, start)); \ ret += inc; \ } \ Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250.h =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250.h +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250.h @@ -176,12 +176,49 @@ static inline void serial_dl_write(struc up->dl_write(up, value); } +static inline int serial8250_in_IER(struct uart_8250_port *up) +{ + struct uart_port *port = &up->port; + unsigned long flags; + bool is_console; + int ier; + + is_console = uart_console(port); + + if (is_console) + printk_cpu_sync_get_irqsave(flags); + + ier = serial_in(up, UART_IER); + + if (is_console) + printk_cpu_sync_put_irqrestore(flags); + + return ier; +} + +static inline void serial8250_set_IER(struct uart_8250_port *up, int ier) +{ + struct uart_port *port = &up->port; + unsigned long flags; + bool is_console; + + is_console = uart_console(port); + + if (is_console) + printk_cpu_sync_get_irqsave(flags); + + serial_out(up, UART_IER, ier); + + if (is_console) + printk_cpu_sync_put_irqrestore(flags); +} + static inline bool serial8250_set_THRI(struct uart_8250_port *up) { if (up->ier & UART_IER_THRI) return false; up->ier |= UART_IER_THRI; - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); return true; } @@ -190,7 +227,7 @@ static inline bool serial8250_clear_THRI if (!(up->ier & UART_IER_THRI)) return false; up->ier &= ~UART_IER_THRI; - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); return true; } Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_aspeed_vuart.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -278,7 +278,7 @@ static void __aspeed_vuart_set_throttle( up->ier &= ~irqs; if (!throttle) up->ier |= irqs; - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); } static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle) { Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_bcm7271.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_bcm7271.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_bcm7271.c @@ -609,7 +609,7 @@ static int brcmuart_startup(struct uart_ * will handle this. */ up->ier &= ~UART_IER_RDI; - serial_port_out(port, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); priv->tx_running = false; priv->dma.rx_dma = NULL; @@ -794,10 +794,12 @@ static int brcmuart_handle_irq(struct ua unsigned int iir = serial_port_in(p, UART_IIR); struct brcmuart_priv *priv = p->private_data; struct uart_8250_port *up = up_to_u8250p(p); + unsigned long cs_flags; unsigned int status; unsigned long flags; unsigned int ier; unsigned int mcr; + bool is_console; int handled = 0; /* @@ -808,6 +810,10 @@ static int brcmuart_handle_irq(struct ua spin_lock_irqsave(&p->lock, flags); status = serial_port_in(p, UART_LSR); if ((status & UART_LSR_DR) == 0) { + is_console = uart_console(p); + + if (is_console) + printk_cpu_sync_get_irqsave(cs_flags); ier = serial_port_in(p, UART_IER); /* @@ -828,6 +834,9 @@ static int brcmuart_handle_irq(struct ua serial_port_in(p, UART_RX); } + if (is_console) + printk_cpu_sync_put_irqrestore(cs_flags); + handled = 1; } spin_unlock_irqrestore(&p->lock, flags); @@ -842,8 +851,10 @@ static enum hrtimer_restart brcmuart_hrt struct brcmuart_priv *priv = container_of(t, struct brcmuart_priv, hrt); struct uart_port *p = priv->up; struct uart_8250_port *up = up_to_u8250p(p); + unsigned long cs_flags; unsigned int status; unsigned long flags; + bool is_console; if (priv->shutdown) return HRTIMER_NORESTART; @@ -865,12 +876,20 @@ static enum hrtimer_restart brcmuart_hrt /* re-enable receive unless upper layer has disabled it */ if ((up->ier & (UART_IER_RLSI | UART_IER_RDI)) == (UART_IER_RLSI | UART_IER_RDI)) { + is_console = uart_console(p); + + if (is_console) + printk_cpu_sync_get_irqsave(cs_flags); + status = serial_port_in(p, UART_IER); status |= (UART_IER_RLSI | UART_IER_RDI); serial_port_out(p, UART_IER, status); status = serial_port_in(p, UART_MCR); status |= UART_MCR_RTS; serial_port_out(p, UART_MCR, status); + + if (is_console) + printk_cpu_sync_put_irqrestore(cs_flags); } spin_unlock_irqrestore(&p->lock, flags); return HRTIMER_NORESTART; Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_core.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_core.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_core.c @@ -255,8 +255,11 @@ static void serial8250_timeout(struct ti static void serial8250_backup_timeout(struct timer_list *t) { struct uart_8250_port *up = from_timer(up, t, timer); + struct uart_port *port = &up->port; unsigned int iir, ier = 0, lsr; + unsigned long cs_flags; unsigned long flags; + bool is_console; spin_lock_irqsave(&up->port.lock, flags); @@ -265,8 +268,16 @@ static void serial8250_backup_timeout(st * based handler. */ if (up->port.irq) { + is_console = uart_console(port); + + if (is_console) + printk_cpu_sync_get_irqsave(cs_flags); + ier = serial_in(up, UART_IER); serial_out(up, UART_IER, 0); + + if (is_console) + printk_cpu_sync_put_irqrestore(cs_flags); } iir = serial_in(up, UART_IIR); @@ -289,7 +300,7 @@ static void serial8250_backup_timeout(st serial8250_tx_chars(up); if (up->port.irq) - serial_out(up, UART_IER, ier); + serial8250_set_IER(up, ier); spin_unlock_irqrestore(&up->port.lock, flags); @@ -575,6 +586,14 @@ serial8250_register_ports(struct uart_dr #ifdef CONFIG_SERIAL_8250_CONSOLE +static void univ8250_console_write_atomic(struct console *co, const char *s, + unsigned int count) +{ + struct uart_8250_port *up = &serial8250_ports[co->index]; + + serial8250_console_write_atomic(up, s, count); +} + static void univ8250_console_write(struct console *co, const char *s, unsigned int count) { @@ -668,6 +687,7 @@ static int univ8250_console_match(struct static struct console univ8250_console = { .name = "ttyS", + .write_atomic = univ8250_console_write_atomic, .write = univ8250_console_write, .device = uart_console_device, .setup = univ8250_console_setup, @@ -961,7 +981,7 @@ static void serial_8250_overrun_backoff_ spin_lock_irqsave(&port->lock, flags); up->ier |= UART_IER_RLSI | UART_IER_RDI; up->port.read_status_mask |= UART_LSR_DR; - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); spin_unlock_irqrestore(&port->lock, flags); } Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_exar.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_exar.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_exar.c @@ -189,6 +189,8 @@ static void xr17v35x_set_divisor(struct static int xr17v35x_startup(struct uart_port *port) { + struct uart_8250_port *up = up_to_u8250p(port); + /* * First enable access to IER [7:5], ISR [5:4], FCR [5:4], * MCR [7:5] and MSR [7:0] @@ -199,7 +201,7 @@ static int xr17v35x_startup(struct uart_ * Make sure all interrups are masked until initialization is * complete and the FIFOs are cleared */ - serial_port_out(port, UART_IER, 0); + serial8250_set_IER(up, 0); return serial8250_do_startup(port); } Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_fsl.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_fsl.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_fsl.c @@ -58,7 +58,8 @@ int fsl8250_handle_irq(struct uart_port if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) { unsigned long delay; - up->ier = port->serial_in(port, UART_IER); + up->ier = serial8250_in_IER(up); + if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) { port->ops->stop_rx(port); } else { Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_ingenic.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_ingenic.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_ingenic.c @@ -146,6 +146,7 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value) { + struct uart_8250_port *up = up_to_u8250p(p); int ier; switch (offset) { @@ -167,7 +168,7 @@ static void ingenic_uart_serial_out(stru * If we have enabled modem status IRQs we should enable * modem mode. */ - ier = p->serial_in(p, UART_IER); + ier = serial8250_in_IER(up); if (ier & UART_IER_MSI) value |= UART_MCR_MDCE | UART_MCR_FCM; Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_mtk.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_mtk.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_mtk.c @@ -226,12 +226,40 @@ static void mtk8250_shutdown(struct uart static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask) { - serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask)); + struct uart_port *port = &up->port; + unsigned long flags; + bool is_console; + int ier; + + is_console = uart_console(port); + + if (is_console) + printk_cpu_sync_get_irqsave(flags); + + ier = serial_in(up, UART_IER); + serial_out(up, UART_IER, ier & (~mask)); + + if (is_console) + printk_cpu_sync_put_irqrestore(flags); } static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask) { - serial_out(up, UART_IER, serial_in(up, UART_IER) | mask); + struct uart_port *port = &up->port; + unsigned long flags; + bool is_console; + int ier; + + is_console = uart_console(port); + + if (is_console) + printk_cpu_sync_get_irqsave(flags); + + ier = serial_in(up, UART_IER); + serial_out(up, UART_IER, ier | mask); + + if (is_console) + printk_cpu_sync_put_irqrestore(flags); } static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_omap.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_omap.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_omap.c @@ -330,7 +330,7 @@ static void omap8250_restore_regs(struct /* drop TCR + TLR access, we setup XON/XOFF later */ serial8250_out_MCR(up, mcr); - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_dl_write(up, priv->quot); @@ -520,7 +520,7 @@ static void omap_8250_pm(struct uart_por serial_out(up, UART_EFR, efr | UART_EFR_ECB); serial_out(up, UART_LCR, 0); - serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); + serial8250_set_IER(up, (state != 0) ? UART_IERX_SLEEP : 0); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_EFR, efr); serial_out(up, UART_LCR, 0); @@ -703,7 +703,7 @@ static int omap_8250_startup(struct uart goto err; up->ier = UART_IER_RLSI | UART_IER_RDI; - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); #ifdef CONFIG_PM up->capabilities |= UART_CAP_RPM; @@ -744,7 +744,7 @@ static void omap_8250_shutdown(struct ua serial_out(up, UART_OMAP_EFR2, 0x0); up->ier = 0; - serial_out(up, UART_IER, 0); + serial8250_set_IER(up, 0); if (up->dma) serial8250_release_dma(up); @@ -792,7 +792,7 @@ static void omap_8250_unthrottle(struct up->dma->rx_dma(up); up->ier |= UART_IER_RLSI | UART_IER_RDI; port->read_status_mask |= UART_LSR_DR; - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); spin_unlock_irqrestore(&port->lock, flags); pm_runtime_mark_last_busy(port->dev); @@ -883,7 +883,7 @@ static void __dma_rx_complete(void *para __dma_rx_do_complete(p); if (!priv->throttled) { p->ier |= UART_IER_RLSI | UART_IER_RDI; - serial_out(p, UART_IER, p->ier); + serial8250_set_IER(p, p->ier); if (!(priv->habit & UART_HAS_EFR2)) omap_8250_rx_dma(p); } @@ -940,7 +940,7 @@ static int omap_8250_rx_dma(struct uart_ * callback to run. */ p->ier &= ~(UART_IER_RLSI | UART_IER_RDI); - serial_out(p, UART_IER, p->ier); + serial8250_set_IER(p, p->ier); } goto out; } @@ -1153,12 +1153,12 @@ static void am654_8250_handle_rx_dma(str * periodic timeouts, re-enable interrupts. */ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); omap_8250_rx_dma_flush(up); serial_in(up, UART_IIR); serial_out(up, UART_OMAP_EFR2, 0x0); up->ier |= UART_IER_RLSI | UART_IER_RDI; - serial_out(up, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); } } Index: linux-6.1.90-rt30/drivers/tty/serial/8250/8250_port.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/8250_port.c +++ linux-6.1.90-rt30/drivers/tty/serial/8250/8250_port.c @@ -744,7 +744,7 @@ static void serial8250_set_sleep(struct serial_out(p, UART_EFR, UART_EFR_ECB); serial_out(p, UART_LCR, 0); } - serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0); + serial8250_set_IER(p, sleep ? UART_IERX_SLEEP : 0); if (p->capabilities & UART_CAP_EFR) { serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(p, UART_EFR, efr); @@ -755,12 +755,29 @@ static void serial8250_set_sleep(struct serial8250_rpm_put(p); } -static void serial8250_clear_IER(struct uart_8250_port *up) +static unsigned int serial8250_clear_IER(struct uart_8250_port *up) { + struct uart_port *port = &up->port; + unsigned int clearval = 0; + unsigned long flags; + bool is_console; + unsigned int prior; + + is_console = uart_console(port); + if (up->capabilities & UART_CAP_UUE) - serial_out(up, UART_IER, UART_IER_UUE); - else - serial_out(up, UART_IER, 0); + clearval = UART_IER_UUE; + + if (is_console) + printk_cpu_sync_get_irqsave(flags); + + prior = serial_in(up, UART_IER); + serial_out(up, UART_IER, clearval); + + if (is_console) + printk_cpu_sync_put_irqrestore(flags); + + return prior; } #ifdef CONFIG_SERIAL_8250_RSA @@ -1026,8 +1043,11 @@ static int broken_efr(struct uart_8250_p */ static void autoconfig_16550a(struct uart_8250_port *up) { + struct uart_port *port = &up->port; unsigned char status1, status2; unsigned int iersave; + unsigned long flags; + bool is_console; up->port.type = PORT_16550A; up->capabilities |= UART_CAP_FIFO; @@ -1139,6 +1159,11 @@ static void autoconfig_16550a(struct uar return; } + is_console = uart_console(port); + + if (is_console) + printk_cpu_sync_get_irqsave(flags); + /* * Try writing and reading the UART_IER_UUE bit (b6). * If it works, this is probably one of the Xscale platform's @@ -1174,6 +1199,9 @@ static void autoconfig_16550a(struct uar } serial_out(up, UART_IER, iersave); + if (is_console) + printk_cpu_sync_put_irqrestore(flags); + /* * We distinguish between 16550A and U6 16550A by counting * how many bytes are in the FIFO. @@ -1196,8 +1224,10 @@ static void autoconfig(struct uart_8250_ unsigned char status1, scratch, scratch2, scratch3; unsigned char save_lcr, save_mcr; struct uart_port *port = &up->port; + unsigned long cs_flags; unsigned long flags; unsigned int old_capabilities; + bool is_console; if (!port->iobase && !port->mapbase && !port->membase) return; @@ -1215,6 +1245,11 @@ static void autoconfig(struct uart_8250_ up->bugs = 0; if (!(port->flags & UPF_BUGGY_UART)) { + is_console = uart_console(port); + + if (is_console) + printk_cpu_sync_get_irqsave(cs_flags); + /* * Do a simple existence test first; if we fail this, * there's no point trying anything else. @@ -1244,6 +1279,10 @@ static void autoconfig(struct uart_8250_ #endif scratch3 = serial_in(up, UART_IER) & 0x0f; serial_out(up, UART_IER, scratch); + + if (is_console) + printk_cpu_sync_put_irqrestore(cs_flags); + if (scratch2 != 0 || scratch3 != 0x0F) { /* * We failed; there's nothing here @@ -1367,7 +1406,9 @@ static void autoconfig_irq(struct uart_8 unsigned char save_mcr, save_ier; unsigned char save_ICP = 0; unsigned int ICP = 0; + unsigned long flags; unsigned long irqs; + bool is_console; int irq; if (port->flags & UPF_FOURPORT) { @@ -1377,6 +1418,13 @@ static void autoconfig_irq(struct uart_8 inb_p(ICP); } + is_console = uart_console(port); + + if (is_console) { + console_lock(); + printk_cpu_sync_get_irqsave(flags); + } + /* forget possible initially masked and pending IRQ */ probe_irq_off(probe_irq_on()); save_mcr = serial8250_in_MCR(up); @@ -1407,6 +1455,11 @@ static void autoconfig_irq(struct uart_8 if (port->flags & UPF_FOURPORT) outb_p(save_ICP, ICP); + if (is_console) { + printk_cpu_sync_put_irqrestore(flags); + console_unlock(); + } + port->irq = (irq > 0) ? irq : 0; } @@ -1418,7 +1471,7 @@ static void serial8250_stop_rx(struct ua up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); up->port.read_status_mask &= ~UART_LSR_DR; - serial_port_out(port, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); serial8250_rpm_put(up); } @@ -1448,7 +1501,7 @@ void serial8250_em485_stop_tx(struct uar serial8250_clear_and_reinit_fifos(p); p->ier |= UART_IER_RLSI | UART_IER_RDI; - serial_port_out(&p->port, UART_IER, p->ier); + serial8250_set_IER(p, p->ier); } } EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); @@ -1697,7 +1750,7 @@ static void serial8250_disable_ms(struct mctrl_gpio_disable_ms(up->gpios); up->ier &= ~UART_IER_MSI; - serial_port_out(port, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); } static void serial8250_enable_ms(struct uart_port *port) @@ -1713,7 +1766,7 @@ static void serial8250_enable_ms(struct up->ier |= UART_IER_MSI; serial8250_rpm_get(up); - serial_port_out(port, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); serial8250_rpm_put(up); } @@ -2168,8 +2221,7 @@ static void serial8250_put_poll_char(str /* * First save the IER then disable the interrupts */ - ier = serial_port_in(port, UART_IER); - serial8250_clear_IER(up); + ier = serial8250_clear_IER(up); wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); /* @@ -2182,7 +2234,7 @@ static void serial8250_put_poll_char(str * and restore the IER */ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); - serial_port_out(port, UART_IER, ier); + serial8250_set_IER(up, ier); serial8250_rpm_put(up); } @@ -2191,8 +2243,10 @@ static void serial8250_put_poll_char(str int serial8250_do_startup(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); + unsigned long cs_flags; unsigned long flags; unsigned char iir; + bool is_console; int retval; u16 lsr; @@ -2213,7 +2267,7 @@ int serial8250_do_startup(struct uart_po up->acr = 0; serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); serial_port_out(port, UART_EFR, UART_EFR_ECB); - serial_port_out(port, UART_IER, 0); + serial8250_set_IER(up, 0); serial_port_out(port, UART_LCR, 0); serial_icr_write(up, UART_CSR, 0); /* Reset the UART */ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); @@ -2223,7 +2277,7 @@ int serial8250_do_startup(struct uart_po if (port->type == PORT_DA830) { /* Reset the port */ - serial_port_out(port, UART_IER, 0); + serial8250_set_IER(up, 0); serial_port_out(port, UART_DA830_PWREMU_MGMT, 0); mdelay(10); @@ -2322,6 +2376,8 @@ int serial8250_do_startup(struct uart_po if (retval) goto out; + is_console = uart_console(port); + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; @@ -2338,6 +2394,9 @@ int serial8250_do_startup(struct uart_po */ spin_lock_irqsave(&port->lock, flags); + if (is_console) + printk_cpu_sync_get_irqsave(cs_flags); + wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); udelay(1); /* allow THRE to set */ @@ -2348,6 +2407,9 @@ int serial8250_do_startup(struct uart_po iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + if (is_console) + printk_cpu_sync_put_irqrestore(cs_flags); + spin_unlock_irqrestore(&port->lock, flags); if (port->irqflags & IRQF_SHARED) @@ -2402,10 +2464,14 @@ int serial8250_do_startup(struct uart_po * Do a quick test to see if we receive an interrupt when we enable * the TX irq. */ + if (is_console) + printk_cpu_sync_get_irqsave(cs_flags); serial_port_out(port, UART_IER, UART_IER_THRI); lsr = serial_port_in(port, UART_LSR); iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + if (is_console) + printk_cpu_sync_put_irqrestore(cs_flags); if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) { if (!(up->bugs & UART_BUG_TXEN)) { @@ -2437,7 +2503,7 @@ dont_test_tx_en: if (up->dma) { const char *msg = NULL; - if (uart_console(port)) + if (is_console) msg = "forbid DMA for kernel console"; else if (serial8250_request_dma(up)) msg = "failed to request DMA"; @@ -2488,7 +2554,7 @@ void serial8250_do_shutdown(struct uart_ */ spin_lock_irqsave(&port->lock, flags); up->ier = 0; - serial_port_out(port, UART_IER, 0); + serial8250_set_IER(up, 0); spin_unlock_irqrestore(&port->lock, flags); synchronize_irq(port->irq); @@ -2850,7 +2916,7 @@ serial8250_do_set_termios(struct uart_po if (up->capabilities & UART_CAP_RTOIE) up->ier |= UART_IER_RTOIE; - serial_port_out(port, UART_IER, up->ier); + serial8250_set_IER(up, up->ier); if (up->capabilities & UART_CAP_EFR) { unsigned char efr = 0; @@ -3315,7 +3381,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default #ifdef CONFIG_SERIAL_8250_CONSOLE -static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) +static void serial8250_console_putchar_locked(struct uart_port *port, unsigned char ch) { struct uart_8250_port *up = up_to_u8250p(port); @@ -3323,6 +3389,18 @@ static void serial8250_console_putchar(s serial_port_out(port, UART_TX, ch); } +static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned long flags; + + wait_for_xmitr(up, UART_LSR_THRE); + + printk_cpu_sync_get_irqsave(flags); + serial8250_console_putchar_locked(port, ch); + printk_cpu_sync_put_irqrestore(flags); +} + /* * Restore serial console when h/w power-off detected */ @@ -3349,6 +3427,32 @@ static void serial8250_console_restore(s serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } +void serial8250_console_write_atomic(struct uart_8250_port *up, + const char *s, unsigned int count) +{ + struct uart_port *port = &up->port; + unsigned long flags; + unsigned int ier; + + printk_cpu_sync_get_irqsave(flags); + + touch_nmi_watchdog(); + + ier = serial8250_clear_IER(up); + + if (atomic_fetch_inc(&up->console_printing)) { + uart_console_write(port, "\n", 1, + serial8250_console_putchar_locked); + } + uart_console_write(port, s, count, serial8250_console_putchar_locked); + atomic_dec(&up->console_printing); + + wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); + serial8250_set_IER(up, ier); + + printk_cpu_sync_put_irqrestore(flags); +} + /* * Print a string to the serial port using the device FIFO * @@ -3394,20 +3498,15 @@ void serial8250_console_write(struct uar struct uart_port *port = &up->port; unsigned long flags; unsigned int ier, use_fifo; - int locked = 1; touch_nmi_watchdog(); - if (oops_in_progress) - locked = spin_trylock_irqsave(&port->lock, flags); - else - spin_lock_irqsave(&port->lock, flags); + spin_lock_irqsave(&port->lock, flags); /* * First save the IER then disable the interrupts */ - ier = serial_port_in(port, UART_IER); - serial8250_clear_IER(up); + ier = serial8250_clear_IER(up); /* check scratch reg to see if port powered off during system sleep */ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { @@ -3441,10 +3540,12 @@ void serial8250_console_write(struct uar */ !(up->port.flags & UPF_CONS_FLOW); + atomic_inc(&up->console_printing); if (likely(use_fifo)) serial8250_console_fifo_write(up, s, count); else uart_console_write(port, s, count, serial8250_console_putchar); + atomic_dec(&up->console_printing); /* * Finally, wait for transmitter to become empty @@ -3457,8 +3558,7 @@ void serial8250_console_write(struct uar if (em485->tx_stopped) up->rs485_stop_tx(up); } - - serial_port_out(port, UART_IER, ier); + serial8250_set_IER(up, ier); /* * The receive handling will happen properly because the @@ -3470,8 +3570,7 @@ void serial8250_console_write(struct uar if (up->msr_saved_flags) serial8250_modem_status(up); - if (locked) - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock_irqrestore(&port->lock, flags); } static unsigned int probe_baud(struct uart_port *port) @@ -3491,6 +3590,7 @@ static unsigned int probe_baud(struct ua int serial8250_console_setup(struct uart_port *port, char *options, bool probe) { + struct uart_8250_port *up = up_to_u8250p(port); int baud = 9600; int bits = 8; int parity = 'n'; @@ -3500,6 +3600,8 @@ int serial8250_console_setup(struct uart if (!port->iobase && !port->membase) return -ENODEV; + atomic_set(&up->console_printing, 0); + if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else if (probe) Index: linux-6.1.90-rt30/drivers/tty/serial/8250/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/8250/Kconfig +++ linux-6.1.90-rt30/drivers/tty/serial/8250/Kconfig @@ -9,6 +9,7 @@ config SERIAL_8250 depends on !S390 select SERIAL_CORE select SERIAL_MCTRL_GPIO if GPIOLIB + select HAVE_ATOMIC_CONSOLE help This selects whether you want to include the driver for the standard serial ports. The standard answer is Y. People who might say N Index: linux-6.1.90-rt30/drivers/tty/serial/amba-pl011.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/amba-pl011.c +++ linux-6.1.90-rt30/drivers/tty/serial/amba-pl011.c @@ -2316,18 +2316,24 @@ pl011_console_write(struct console *co, { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int old_cr = 0, new_cr; - unsigned long flags; + unsigned long flags = 0; int locked = 1; clk_enable(uap->clk); - local_irq_save(flags); + /* + * local_irq_save(flags); + * + * This local_irq_save() is nonsense. If we come in via sysrq + * handling then interrupts are already disabled. Aside of + * that the port.sysrq check is racy on SMP regardless. + */ if (uap->port.sysrq) locked = 0; else if (oops_in_progress) - locked = spin_trylock(&uap->port.lock); + locked = spin_trylock_irqsave(&uap->port.lock, flags); else - spin_lock(&uap->port.lock); + spin_lock_irqsave(&uap->port.lock, flags); /* * First save the CR then disable the interrupts @@ -2353,8 +2359,7 @@ pl011_console_write(struct console *co, pl011_write(old_cr, uap, REG_CR); if (locked) - spin_unlock(&uap->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&uap->port.lock, flags); clk_disable(uap->clk); } Index: linux-6.1.90-rt30/drivers/tty/serial/omap-serial.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/serial/omap-serial.c +++ linux-6.1.90-rt30/drivers/tty/serial/omap-serial.c @@ -1241,13 +1241,10 @@ serial_omap_console_write(struct console unsigned int ier; int locked = 1; - local_irq_save(flags); - if (up->port.sysrq) - locked = 0; - else if (oops_in_progress) - locked = spin_trylock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); else - spin_lock(&up->port.lock); + spin_lock_irqsave(&up->port.lock, flags); /* * First save the IER then disable the interrupts @@ -1274,8 +1271,7 @@ serial_omap_console_write(struct console check_modem_status(up); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init Index: linux-6.1.90-rt30/drivers/tty/sysrq.c =================================================================== --- linux-6.1.90-rt30.orig/drivers/tty/sysrq.c +++ linux-6.1.90-rt30/drivers/tty/sysrq.c @@ -582,6 +582,7 @@ void __handle_sysrq(int key, bool check_ rcu_sysrq_start(); rcu_read_lock(); + printk_prefer_direct_enter(); /* * Raise the apparent loglevel to maximum so that the sysrq header * is shown to provide the user with positive feedback. We do not @@ -623,6 +624,7 @@ void __handle_sysrq(int key, bool check_ pr_cont("\n"); console_loglevel = orig_log_level; } + printk_prefer_direct_exit(); rcu_read_unlock(); rcu_sysrq_end(); Index: linux-6.1.90-rt30/drivers/vdpa/vdpa_user/iova_domain.h =================================================================== --- linux-6.1.90-rt30.orig/drivers/vdpa/vdpa_user/iova_domain.h +++ linux-6.1.90-rt30/drivers/vdpa/vdpa_user/iova_domain.h @@ -14,7 +14,6 @@ #include #include #include -#include #define IOVA_START_PFN 1 Index: linux-6.1.90-rt30/include/linux/console.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/console.h +++ linux-6.1.90-rt30/include/linux/console.h @@ -16,6 +16,7 @@ #include #include +#include struct vc_data; struct console_font_op; @@ -137,9 +138,19 @@ static inline int con_debug_leave(void) #define CON_BRL (32) /* Used for a braille device */ #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */ +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE +struct console_atomic_data { + u64 seq; + char *text; + char *ext_text; + char *dropped_text; +}; +#endif + struct console { char name[16]; void (*write)(struct console *, const char *, unsigned); + void (*write_atomic)(struct console *, const char *, unsigned); int (*read)(struct console *, char *, unsigned); struct tty_driver *(*device)(struct console *, int *); void (*unblank)(void); @@ -152,7 +163,26 @@ struct console { uint ispeed; uint ospeed; u64 seq; - unsigned long dropped; + atomic_long_t dropped; +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE + struct console_atomic_data *atomic_data; +#endif + struct task_struct *thread; + bool blocked; + + /* + * The per-console lock is used by printing kthreads to synchronize + * this console with callers of console_lock(). This is necessary in + * order to allow printing kthreads to run in parallel to each other, + * while each safely accessing the @blocked field and synchronizing + * against direct printing via console_lock/console_unlock. + * + * Note: For synchronizing against direct printing via + * console_trylock/console_unlock, see the static global + * variable @console_kthreads_active. + */ + struct mutex lock; + void *data; struct console *next; }; @@ -167,6 +197,7 @@ extern int console_set_on_cmdline; extern struct console *early_console; enum con_flush_mode { + CONSOLE_ATOMIC_FLUSH_PENDING, CONSOLE_FLUSH_PENDING, CONSOLE_REPLAY_ALL, }; Index: linux-6.1.90-rt30/include/linux/entry-common.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/entry-common.h +++ linux-6.1.90-rt30/include/linux/entry-common.h @@ -57,9 +57,15 @@ # define ARCH_EXIT_TO_USER_MODE_WORK (0) #endif +#ifdef CONFIG_PREEMPT_LAZY +# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) +#else +# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED) +#endif + #define EXIT_TO_USER_MODE_WORK \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ + _TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ ARCH_EXIT_TO_USER_MODE_WORK) /** Index: linux-6.1.90-rt30/include/linux/interrupt.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/interrupt.h +++ linux-6.1.90-rt30/include/linux/interrupt.h @@ -609,6 +609,35 @@ extern void __raise_softirq_irqoff(unsig extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +#ifdef CONFIG_PREEMPT_RT +DECLARE_PER_CPU(struct task_struct *, timersd); +DECLARE_PER_CPU(unsigned long, pending_timer_softirq); + +extern void raise_timer_softirq(void); +extern void raise_hrtimer_softirq(void); + +static inline unsigned int local_pending_timers(void) +{ + return __this_cpu_read(pending_timer_softirq); +} + +#else +static inline void raise_timer_softirq(void) +{ + raise_softirq(TIMER_SOFTIRQ); +} + +static inline void raise_hrtimer_softirq(void) +{ + raise_softirq_irqoff(HRTIMER_SOFTIRQ); +} + +static inline unsigned int local_pending_timers(void) +{ + return local_softirq_pending(); +} +#endif + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); static inline struct task_struct *this_cpu_ksoftirqd(void) Index: linux-6.1.90-rt30/include/linux/io-mapping.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/io-mapping.h +++ linux-6.1.90-rt30/include/linux/io-mapping.h @@ -69,7 +69,10 @@ io_mapping_map_atomic_wc(struct io_mappi BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - preempt_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); + else + migrate_disable(); pagefault_disable(); return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); } @@ -79,7 +82,10 @@ io_mapping_unmap_atomic(void __iomem *va { kunmap_local_indexed((void __force *)vaddr); pagefault_enable(); - preempt_enable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); + else + migrate_enable(); } static inline void __iomem * @@ -162,7 +168,10 @@ static inline void __iomem * io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) { - preempt_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); + else + migrate_disable(); pagefault_disable(); return io_mapping_map_wc(mapping, offset, PAGE_SIZE); } @@ -172,7 +181,10 @@ io_mapping_unmap_atomic(void __iomem *va { io_mapping_unmap(vaddr); pagefault_enable(); - preempt_enable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); + else + migrate_enable(); } static inline void __iomem * Index: linux-6.1.90-rt30/include/linux/lockdep.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/lockdep.h +++ linux-6.1.90-rt30/include/linux/lockdep.h @@ -339,6 +339,16 @@ extern void lock_unpin_lock(struct lockd #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) +/* + * Must use lock_map_aquire_try() with override maps to avoid + * lockdep thinking they participate in the block chain. + */ +#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ + struct lockdep_map _name = { \ + .name = #_name "-wait-type-override", \ + .wait_type_inner = _wait_type, \ + .lock_type = LD_LOCK_WAIT_OVERRIDE, } + #else /* !CONFIG_LOCKDEP */ static inline void lockdep_init_task(struct task_struct *task) @@ -427,6 +437,9 @@ extern int lockdep_is_held(const void *) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) +#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ + struct lockdep_map __maybe_unused _name = {} + #endif /* !LOCKDEP */ enum xhlock_context_t { @@ -435,7 +448,6 @@ enum xhlock_context_t { XHLOCK_CTX_NR, }; -#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) /* * To initialize a lockdep_map statically use this macro. * Note that _name must not be NULL. @@ -552,6 +564,7 @@ do { \ #define rwsem_release(l, i) lock_release(l, i) #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) +#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) #define lock_map_release(l) lock_release(l, _THIS_IP_) Index: linux-6.1.90-rt30/include/linux/lockdep_types.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/lockdep_types.h +++ linux-6.1.90-rt30/include/linux/lockdep_types.h @@ -33,6 +33,7 @@ enum lockdep_wait_type { enum lockdep_lock_type { LD_LOCK_NORMAL = 0, /* normal, catch all */ LD_LOCK_PERCPU, /* percpu */ + LD_LOCK_WAIT_OVERRIDE, /* annotation */ LD_LOCK_MAX, }; Index: linux-6.1.90-rt30/include/linux/netdevice.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/netdevice.h +++ linux-6.1.90-rt30/include/linux/netdevice.h @@ -3169,7 +3169,11 @@ struct softnet_data { int defer_count; int defer_ipi_scheduled; struct sk_buff *defer_list; +#ifndef CONFIG_PREEMPT_RT call_single_data_t defer_csd; +#else + struct work_struct defer_work; +#endif }; static inline void input_queue_head_incr(struct softnet_data *sd) Index: linux-6.1.90-rt30/include/linux/preempt.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/preempt.h +++ linux-6.1.90-rt30/include/linux/preempt.h @@ -208,6 +208,20 @@ extern void preempt_count_sub(int val); #define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) +#ifdef CONFIG_PREEMPT_LAZY +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) +#define inc_preempt_lazy_count() add_preempt_lazy_count(1) +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) +#else +#define add_preempt_lazy_count(val) do { } while (0) +#define sub_preempt_lazy_count(val) do { } while (0) +#define inc_preempt_lazy_count() do { } while (0) +#define dec_preempt_lazy_count() do { } while (0) +#define preempt_lazy_count() (0) +#endif + #ifdef CONFIG_PREEMPT_COUNT #define preempt_disable() \ @@ -216,6 +230,12 @@ do { \ barrier(); \ } while (0) +#define preempt_lazy_disable() \ +do { \ + inc_preempt_lazy_count(); \ + barrier(); \ +} while (0) + #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ @@ -247,6 +267,18 @@ do { \ __preempt_schedule(); \ } while (0) +/* + * open code preempt_check_resched() because it is not exported to modules and + * used by local_unlock() or bpf_enable_instrumentation(). + */ +#define preempt_lazy_enable() \ +do { \ + dec_preempt_lazy_count(); \ + barrier(); \ + if (should_resched(0)) \ + __preempt_schedule(); \ +} while (0) + #else /* !CONFIG_PREEMPTION */ #define preempt_enable() \ do { \ @@ -254,6 +286,12 @@ do { \ preempt_count_dec(); \ } while (0) +#define preempt_lazy_enable() \ +do { \ + dec_preempt_lazy_count(); \ + barrier(); \ +} while (0) + #define preempt_enable_notrace() \ do { \ barrier(); \ @@ -294,6 +332,9 @@ do { \ #define preempt_enable_notrace() barrier() #define preemptible() 0 +#define preempt_lazy_disable() barrier() +#define preempt_lazy_enable() barrier() + #endif /* CONFIG_PREEMPT_COUNT */ #ifdef MODULE @@ -312,7 +353,7 @@ do { \ } while (0) #define preempt_fold_need_resched() \ do { \ - if (tif_need_resched()) \ + if (tif_need_resched_now()) \ set_preempt_need_resched(); \ } while (0) @@ -428,8 +469,15 @@ extern void migrate_enable(void); #else -static inline void migrate_disable(void) { } -static inline void migrate_enable(void) { } +static inline void migrate_disable(void) +{ + preempt_lazy_disable(); +} + +static inline void migrate_enable(void) +{ + preempt_lazy_enable(); +} #endif /* CONFIG_SMP */ Index: linux-6.1.90-rt30/include/linux/printk.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/printk.h +++ linux-6.1.90-rt30/include/linux/printk.h @@ -168,6 +168,9 @@ extern void __printk_safe_exit(void); */ #define printk_deferred_enter __printk_safe_enter #define printk_deferred_exit __printk_safe_exit +extern void printk_prefer_direct_enter(void); +extern void printk_prefer_direct_exit(void); +extern void try_block_console_kthreads(int timeout_ms); /* * Please don't use printk_ratelimit(), because it shares ratelimiting state @@ -219,6 +222,18 @@ static inline void printk_deferred_exit( { } +static inline void printk_prefer_direct_enter(void) +{ +} + +static inline void printk_prefer_direct_exit(void) +{ +} + +static inline void try_block_console_kthreads(int timeout_ms) +{ +} + static inline int printk_ratelimit(void) { return 0; Index: linux-6.1.90-rt30/include/linux/sched.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/sched.h +++ linux-6.1.90-rt30/include/linux/sched.h @@ -2061,6 +2061,43 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } +#ifdef CONFIG_PREEMPT_LAZY +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) +{ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); +} + +static inline int need_resched_lazy(void) +{ + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +} + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +#else +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } +static inline int need_resched_lazy(void) { return 0; } + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +#endif + /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return Index: linux-6.1.90-rt30/include/linux/sched/signal.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/sched/signal.h +++ linux-6.1.90-rt30/include/linux/sched/signal.h @@ -135,7 +135,7 @@ struct signal_struct { #ifdef CONFIG_POSIX_TIMERS /* POSIX.1b Interval Timers */ - int posix_timer_id; + unsigned int next_posix_timer_id; struct list_head posix_timers; /* ITIMER_REAL timer for the process */ Index: linux-6.1.90-rt30/include/linux/sched/task.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/sched/task.h +++ linux-6.1.90-rt30/include/linux/sched/task.h @@ -141,8 +141,12 @@ static inline void put_task_struct(struc */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) call_rcu(&t->rcu, __put_task_struct_rcu_cb); - else + else { + static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP); + lock_map_acquire_try(&put_task_map); __put_task_struct(t); + lock_map_release(&put_task_map); + } } DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T)) Index: linux-6.1.90-rt30/include/linux/serial_8250.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/serial_8250.h +++ linux-6.1.90-rt30/include/linux/serial_8250.h @@ -7,6 +7,7 @@ #ifndef _LINUX_SERIAL_8250_H #define _LINUX_SERIAL_8250_H +#include #include #include #include @@ -124,6 +125,8 @@ struct uart_8250_port { #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA unsigned char msr_saved_flags; + atomic_t console_printing; + struct uart_8250_dma *dma; const struct uart_8250_ops *ops; @@ -179,6 +182,8 @@ void serial8250_init_port(struct uart_82 void serial8250_set_defaults(struct uart_8250_port *up); void serial8250_console_write(struct uart_8250_port *up, const char *s, unsigned int count); +void serial8250_console_write_atomic(struct uart_8250_port *up, const char *s, + unsigned int count); int serial8250_console_setup(struct uart_port *port, char *options, bool probe); int serial8250_console_exit(struct uart_port *port); Index: linux-6.1.90-rt30/include/linux/thread_info.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/thread_info.h +++ linux-6.1.90-rt30/include/linux/thread_info.h @@ -177,7 +177,17 @@ static __always_inline unsigned long rea clear_ti_thread_flag(task_thread_info(t), TIF_##fl) #endif /* !CONFIG_GENERIC_ENTRY */ -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) +#ifdef CONFIG_PREEMPT_LAZY +#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ + test_thread_flag(TIF_NEED_RESCHED_LAZY)) +#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) +#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY) + +#else +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) +#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) +#define tif_need_resched_lazy() 0 +#endif #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, Index: linux-6.1.90-rt30/include/linux/trace_events.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/trace_events.h +++ linux-6.1.90-rt30/include/linux/trace_events.h @@ -70,6 +70,7 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; + unsigned char preempt_lazy_count; }; #define TRACE_EVENT_TYPE_MAX \ @@ -159,9 +160,10 @@ static inline void tracing_generic_entry unsigned int trace_ctx) { entry->preempt_count = trace_ctx & 0xff; + entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff; entry->pid = current->pid; entry->type = type; - entry->flags = trace_ctx >> 16; + entry->flags = trace_ctx >> 24; } unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); @@ -172,7 +174,13 @@ enum trace_flag_type { TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, +#ifdef CONFIG_PREEMPT_LAZY + TRACE_FLAG_PREEMPT_RESCHED = 0x00, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x20, +#else + TRACE_FLAG_NEED_RESCHED_LAZY = 0x00, TRACE_FLAG_PREEMPT_RESCHED = 0x20, +#endif TRACE_FLAG_NMI = 0x40, TRACE_FLAG_BH_OFF = 0x80, }; Index: linux-6.1.90-rt30/include/linux/u64_stats_sync.h =================================================================== --- linux-6.1.90-rt30.orig/include/linux/u64_stats_sync.h +++ linux-6.1.90-rt30/include/linux/u64_stats_sync.h @@ -214,16 +214,4 @@ static inline bool u64_stats_fetch_retry return __u64_stats_fetch_retry(syncp, start); } -/* Obsolete interfaces */ -static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) -{ - return u64_stats_fetch_begin(syncp); -} - -static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, - unsigned int start) -{ - return u64_stats_fetch_retry(syncp, start); -} - #endif /* _LINUX_U64_STATS_SYNC_H */ Index: linux-6.1.90-rt30/init/Kconfig =================================================================== --- linux-6.1.90-rt30.orig/init/Kconfig +++ linux-6.1.90-rt30/init/Kconfig @@ -1591,6 +1591,10 @@ config PRINTK very difficult to diagnose system problems, saying N here is strongly discouraged. +config HAVE_ATOMIC_CONSOLE + bool + default n + config BUG bool "BUG() support" if EXPERT default y Index: linux-6.1.90-rt30/kernel/Kconfig.preempt =================================================================== --- linux-6.1.90-rt30.orig/kernel/Kconfig.preempt +++ linux-6.1.90-rt30/kernel/Kconfig.preempt @@ -1,5 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only +config HAVE_PREEMPT_LAZY + bool + +config PREEMPT_LAZY + def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT + config PREEMPT_NONE_BUILD bool Index: linux-6.1.90-rt30/kernel/bpf/syscall.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/bpf/syscall.c +++ linux-6.1.90-rt30/kernel/bpf/syscall.c @@ -2143,11 +2143,11 @@ static void bpf_prog_get_stats(const str st = per_cpu_ptr(prog->stats, cpu); do { - start = u64_stats_fetch_begin_irq(&st->syncp); + start = u64_stats_fetch_begin(&st->syncp); tnsecs = u64_stats_read(&st->nsecs); tcnt = u64_stats_read(&st->cnt); tmisses = u64_stats_read(&st->misses); - } while (u64_stats_fetch_retry_irq(&st->syncp, start)); + } while (u64_stats_fetch_retry(&st->syncp, start)); nsecs += tnsecs; cnt += tcnt; misses += tmisses; @@ -2760,28 +2760,31 @@ static void bpf_link_put_deferred(struct bpf_link_free(link); } -/* bpf_link_put can be called from atomic context, but ensures that resources - * are freed from process context +/* bpf_link_put might be called from atomic context. It needs to be called + * from sleepable context in order to acquire sleeping locks during the process. */ void bpf_link_put(struct bpf_link *link) { if (!atomic64_dec_and_test(&link->refcnt)) return; - if (in_atomic()) { - INIT_WORK(&link->work, bpf_link_put_deferred); - schedule_work(&link->work); - } else { - bpf_link_free(link); - } + INIT_WORK(&link->work, bpf_link_put_deferred); + schedule_work(&link->work); } EXPORT_SYMBOL(bpf_link_put); +static void bpf_link_put_direct(struct bpf_link *link) +{ + if (!atomic64_dec_and_test(&link->refcnt)) + return; + bpf_link_free(link); +} + static int bpf_link_release(struct inode *inode, struct file *filp) { struct bpf_link *link = filp->private_data; - bpf_link_put(link); + bpf_link_put_direct(link); return 0; } @@ -4711,7 +4714,7 @@ out_put_progs: if (ret) bpf_prog_put(new_prog); out_put_link: - bpf_link_put(link); + bpf_link_put_direct(link); return ret; } @@ -4734,7 +4737,7 @@ static int link_detach(union bpf_attr *a else ret = -EOPNOTSUPP; - bpf_link_put(link); + bpf_link_put_direct(link); return ret; } @@ -4804,7 +4807,7 @@ static int bpf_link_get_fd_by_id(const u fd = bpf_link_new_fd(link); if (fd < 0) - bpf_link_put(link); + bpf_link_put_direct(link); return fd; } @@ -4881,7 +4884,7 @@ static int bpf_iter_create(union bpf_att return PTR_ERR(link); err = bpf_iter_new_fd(link); - bpf_link_put(link); + bpf_link_put_direct(link); return err; } Index: linux-6.1.90-rt30/kernel/entry/common.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/entry/common.c +++ linux-6.1.90-rt30/kernel/entry/common.c @@ -161,7 +161,7 @@ static unsigned long exit_to_user_mode_l local_irq_enable_exit_to_user(ti_work); - if (ti_work & _TIF_NEED_RESCHED) + if (ti_work & _TIF_NEED_RESCHED_MASK) schedule(); if (ti_work & _TIF_UPROBE) @@ -392,7 +392,7 @@ void raw_irqentry_exit_cond_resched(void rcu_irq_exit_check_preempt(); if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) WARN_ON_ONCE(!on_thread_stack()); - if (need_resched()) + if (should_resched(0)) preempt_schedule_irq(); } } Index: linux-6.1.90-rt30/kernel/hung_task.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/hung_task.c +++ linux-6.1.90-rt30/kernel/hung_task.c @@ -127,6 +127,8 @@ static void check_hung_task(struct task_ * complain: */ if (sysctl_hung_task_warnings) { + printk_prefer_direct_enter(); + if (sysctl_hung_task_warnings > 0) sysctl_hung_task_warnings--; pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", @@ -142,6 +144,8 @@ static void check_hung_task(struct task_ if (sysctl_hung_task_all_cpu_backtrace) hung_task_show_all_bt = true; + + printk_prefer_direct_exit(); } touch_nmi_watchdog(); @@ -212,12 +216,17 @@ static void check_hung_uninterruptible_t } unlock: rcu_read_unlock(); - if (hung_task_show_lock) + if (hung_task_show_lock) { + printk_prefer_direct_enter(); debug_show_all_locks(); + printk_prefer_direct_exit(); + } if (hung_task_show_all_bt) { hung_task_show_all_bt = false; + printk_prefer_direct_enter(); trigger_all_cpu_backtrace(); + printk_prefer_direct_exit(); } if (hung_task_call_panic) Index: linux-6.1.90-rt30/kernel/ksysfs.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/ksysfs.c +++ linux-6.1.90-rt30/kernel/ksysfs.c @@ -142,6 +142,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_CRASH_CORE */ +#if defined(CONFIG_PREEMPT_RT) +static ssize_t realtime_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", 1); +} +KERNEL_ATTR_RO(realtime); +#endif + /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -233,6 +242,9 @@ static struct attribute * kernel_attrs[] &rcu_expedited_attr.attr, &rcu_normal_attr.attr, #endif +#ifdef CONFIG_PREEMPT_RT + &realtime_attr.attr, +#endif NULL }; Index: linux-6.1.90-rt30/kernel/locking/lockdep.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/locking/lockdep.c +++ linux-6.1.90-rt30/kernel/locking/lockdep.c @@ -2245,6 +2245,9 @@ static inline bool usage_match(struct lo static inline bool usage_skip(struct lock_list *entry, void *mask) { + if (entry->class->lock_type == LD_LOCK_NORMAL) + return false; + /* * Skip local_lock() for irq inversion detection. * @@ -2271,14 +2274,16 @@ static inline bool usage_skip(struct loc * As a result, we will skip local_lock(), when we search for irq * inversion bugs. */ - if (entry->class->lock_type == LD_LOCK_PERCPU) { - if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) - return false; + if (entry->class->lock_type == LD_LOCK_PERCPU && + DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) + return false; - return true; - } + /* + * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually + * a lock and only used to override the wait_type. + */ - return false; + return true; } /* @@ -4745,7 +4750,8 @@ static int check_wait_context(struct tas for (; depth < curr->lockdep_depth; depth++) { struct held_lock *prev = curr->held_locks + depth; - u8 prev_inner = hlock_class(prev)->wait_type_inner; + struct lock_class *class = hlock_class(prev); + u8 prev_inner = class->wait_type_inner; if (prev_inner) { /* @@ -4755,6 +4761,14 @@ static int check_wait_context(struct tas * Also due to trylocks. */ curr_inner = min(curr_inner, prev_inner); + + /* + * Allow override for annotations -- this is typically + * only valid/needed for code that only exists when + * CONFIG_PREEMPT_RT=n. + */ + if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE)) + curr_inner = prev_inner; } } Index: linux-6.1.90-rt30/kernel/locking/rwbase_rt.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/locking/rwbase_rt.c +++ linux-6.1.90-rt30/kernel/locking/rwbase_rt.c @@ -72,15 +72,6 @@ static int __sched __rwbase_read_lock(st int ret; raw_spin_lock_irq(&rtm->wait_lock); - /* - * Allow readers, as long as the writer has not completely - * acquired the semaphore for write. - */ - if (atomic_read(&rwb->readers) != WRITER_BIAS) { - atomic_inc(&rwb->readers); - raw_spin_unlock_irq(&rtm->wait_lock); - return 0; - } /* * Call into the slow lock path with the rtmutex->wait_lock Index: linux-6.1.90-rt30/kernel/panic.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/panic.c +++ linux-6.1.90-rt30/kernel/panic.c @@ -322,7 +322,6 @@ void panic(const char *fmt, ...) panic_smp_self_stop(); console_verbose(); - bust_spinlocks(1); va_start(args, fmt); len = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); @@ -339,6 +338,11 @@ void panic(const char *fmt, ...) dump_stack(); #endif + /* If atomic consoles are available, flush the kernel log. */ + console_flush_on_panic(CONSOLE_ATOMIC_FLUSH_PENDING); + + bust_spinlocks(1); + /* * If kgdb is enabled, give it a chance to run before we stop all * the other CPUs or else we won't be able to debug processes left @@ -661,6 +665,8 @@ void __warn(const char *file, int line, { disable_trace_on_warning(); + printk_prefer_direct_enter(); + if (file) pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", raw_smp_processor_id(), current->pid, file, line, @@ -689,6 +695,8 @@ void __warn(const char *file, int line, /* Just a warning, don't kill lockdep. */ add_taint(taint, LOCKDEP_STILL_OK); + + printk_prefer_direct_exit(); } #ifndef __WARN_FLAGS Index: linux-6.1.90-rt30/kernel/printk/internal.h =================================================================== --- linux-6.1.90-rt30.orig/kernel/printk/internal.h +++ linux-6.1.90-rt30/kernel/printk/internal.h @@ -20,6 +20,8 @@ enum printk_info_flags { LOG_CONT = 8, /* text is a fragment of a continuation line */ }; +extern bool block_console_kthreads; + __printf(4, 0) int vprintk_store(int facility, int level, const struct dev_printk_info *dev_info, Index: linux-6.1.90-rt30/kernel/printk/printk.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/printk/printk.c +++ linux-6.1.90-rt30/kernel/printk/printk.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -221,6 +222,36 @@ int devkmsg_sysctl_set_loglvl(struct ctl #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */ /* + * Used to synchronize printing kthreads against direct printing via + * console_trylock/console_unlock. + * + * Values: + * -1 = console kthreads atomically blocked (via global trylock) + * 0 = no kthread printing, console not locked (via trylock) + * >0 = kthread(s) actively printing + * + * Note: For synchronizing against direct printing via + * console_lock/console_unlock, see the @lock variable in + * struct console. + */ +static atomic_t console_kthreads_active = ATOMIC_INIT(0); + +#define console_kthreads_atomic_tryblock() \ + (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0) +#define console_kthreads_atomic_unblock() \ + atomic_cmpxchg(&console_kthreads_active, -1, 0) +#define console_kthreads_atomically_blocked() \ + (atomic_read(&console_kthreads_active) == -1) + +#define console_kthread_printing_tryenter() \ + atomic_inc_unless_negative(&console_kthreads_active) +#define console_kthread_printing_exit() \ + atomic_dec(&console_kthreads_active) + +/* Block console kthreads to avoid processing new messages. */ +bool block_console_kthreads; + +/* * Helper macros to handle lockdep when locking/unlocking console_sem. We use * macros instead of functions so that _RET_IP_ contains useful information. */ @@ -268,14 +299,49 @@ static bool panic_in_progress(void) } /* - * This is used for debugging the mess that is the VT code by - * keeping track if we have the console semaphore held. It's - * definitely not the perfect debug tool (we don't know if _WE_ - * hold it and are racing, but it helps tracking those weird code - * paths in the console code where we end up in places I want - * locked without the console semaphore held). + * Tracks whether kthread printers are all blocked. A value of true implies + * that the console is locked via console_lock() or the console is suspended. + * Writing to this variable requires holding @console_sem. + */ +static bool console_kthreads_blocked; + +/* + * Block all kthread printers from a schedulable context. + * + * Requires holding @console_sem. */ -static int console_locked, console_suspended; +static void console_kthreads_block(void) +{ + struct console *con; + + for_each_console(con) { + mutex_lock(&con->lock); + con->blocked = true; + mutex_unlock(&con->lock); + } + + console_kthreads_blocked = true; +} + +/* + * Unblock all kthread printers from a schedulable context. + * + * Requires holding @console_sem. + */ +static void console_kthreads_unblock(void) +{ + struct console *con; + + for_each_console(con) { + mutex_lock(&con->lock); + con->blocked = false; + mutex_unlock(&con->lock); + } + + console_kthreads_blocked = false; +} + +static int console_suspended; /* * Array of consoles built from command line options (console=) @@ -358,7 +424,75 @@ static int console_msg_format = MSG_FORM /* syslog_lock protects syslog_* variables and write access to clear_seq. */ static DEFINE_MUTEX(syslog_lock); +/* + * A flag to signify if printk_activate_kthreads() has already started the + * kthread printers. If true, any later registered consoles must start their + * own kthread directly. The flag is write protected by the console_lock. + */ +static bool printk_kthreads_available; + #ifdef CONFIG_PRINTK +static atomic_t printk_prefer_direct = ATOMIC_INIT(0); + +/** + * printk_prefer_direct_enter - cause printk() calls to attempt direct + * printing to all enabled consoles + * + * Since it is not possible to call into the console printing code from any + * context, there is no guarantee that direct printing will occur. + * + * This globally effects all printk() callers. + * + * Context: Any context. + */ +void printk_prefer_direct_enter(void) +{ + atomic_inc(&printk_prefer_direct); +} + +/** + * printk_prefer_direct_exit - restore printk() behavior + * + * Context: Any context. + */ +void printk_prefer_direct_exit(void) +{ + WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0); +} + +/* + * Calling printk() always wakes kthread printers so that they can + * flush the new message to their respective consoles. Also, if direct + * printing is allowed, printk() tries to flush the messages directly. + * + * Direct printing is allowed in situations when the kthreads + * are not available or the system is in a problematic state. + * + * See the implementation about possible races. + */ +static inline bool allow_direct_printing(void) +{ + /* + * Checking kthread availability is a possible race because the + * kthread printers can become permanently disabled during runtime. + * However, doing that requires holding the console_lock, so any + * pending messages will be direct printed by console_unlock(). + */ + if (!printk_kthreads_available) + return true; + + /* + * Prefer direct printing when the system is in a problematic state. + * The context that sets this state will always see the updated value. + * The other contexts do not care. Anyway, direct printing is just a + * best effort. The direct output is only possible when console_lock + * is not already taken and no kthread printers are actively printing. + */ + return (system_state > SYSTEM_RUNNING || + oops_in_progress || + atomic_read(&printk_prefer_direct)); +} + DECLARE_WAIT_QUEUE_HEAD(log_wait); /* All 3 protected by @syslog_lock. */ /* the next printk record to read by syslog(READ) or /proc/kmsg */ @@ -1876,6 +2010,7 @@ static int console_lock_spinning_disable return 1; } +#if !IS_ENABLED(CONFIG_PREEMPT_RT) /** * console_trylock_spinning - try to get console_lock by busy waiting * @@ -1955,6 +2090,7 @@ static int console_trylock_spinning(void return 1; } +#endif /* CONFIG_PREEMPT_RT */ /* * Call the specified console driver, asking it to write out the specified @@ -1962,19 +2098,28 @@ static int console_trylock_spinning(void * dropped, a dropped message will be written out first. */ static void call_console_driver(struct console *con, const char *text, size_t len, - char *dropped_text) + char *dropped_text, bool atomic_printing) { + unsigned long dropped = 0; size_t dropped_len; - if (con->dropped && dropped_text) { + if (dropped_text) + dropped = atomic_long_xchg_relaxed(&con->dropped, 0); + + if (dropped) { dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX, "** %lu printk messages dropped **\n", - con->dropped); - con->dropped = 0; - con->write(con, dropped_text, dropped_len); + dropped); + if (atomic_printing) + con->write_atomic(con, dropped_text, dropped_len); + else + con->write(con, dropped_text, dropped_len); } - con->write(con, text, len); + if (atomic_printing) + con->write_atomic(con, text, len); + else + con->write(con, text, len); } /* @@ -2284,10 +2429,22 @@ asmlinkage int vprintk_emit(int facility printed_len = vprintk_store(facility, level, dev_info, fmt, args); /* If called from the scheduler, we can not call up(). */ - if (!in_sched) { + if (!in_sched && allow_direct_printing()) { +#if IS_ENABLED(CONFIG_PREEMPT_RT) + /* + * Use the non-spinning trylock since PREEMPT_RT does not + * support console lock handovers. + * + * Direct printing will most likely involve taking spinlocks. + * For PREEMPT_RT, this is only allowed if in a preemptible + * context. + */ + if (preemptible() && console_trylock()) + console_unlock(); +#else /* * The caller may be holding system-critical or - * timing-sensitive locks. Disable preemption during + * timing-sensitive locks. Disable preemption during direct * printing of all remaining records to all consoles so that * this context can return as soon as possible. Hopefully * another printk() caller will take over the printing. @@ -2302,6 +2459,7 @@ asmlinkage int vprintk_emit(int facility if (console_trylock_spinning()) console_unlock(); preempt_enable(); +#endif } if (in_sched) @@ -2332,9 +2490,81 @@ asmlinkage __visible int _printk(const c } EXPORT_SYMBOL(_printk); +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE +static void __free_atomic_data(struct console_atomic_data *d) +{ + kfree(d->text); + kfree(d->ext_text); + kfree(d->dropped_text); +} + +static void free_atomic_data(struct console_atomic_data *d) +{ + int count = 1; + int i; + + if (!d) + return; + +#ifdef CONFIG_HAVE_NMI + count = 2; +#endif + + for (i = 0; i < count; i++) + __free_atomic_data(&d[i]); + kfree(d); +} + +static int __alloc_atomic_data(struct console_atomic_data *d, short flags) +{ + d->text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL); + if (!d->text) + return -1; + + if (flags & CON_EXTENDED) { + d->ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); + if (!d->ext_text) + return -1; + } else { + d->dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL); + if (!d->dropped_text) + return -1; + } + + return 0; +} + +static struct console_atomic_data *alloc_atomic_data(short flags) +{ + struct console_atomic_data *d; + int count = 1; + int i; + +#ifdef CONFIG_HAVE_NMI + count = 2; +#endif + + d = kzalloc(sizeof(*d) * count, GFP_KERNEL); + if (!d) + goto err_out; + + for (i = 0; i < count; i++) { + if (__alloc_atomic_data(&d[i], flags) != 0) + goto err_out; + } + + return d; +err_out: + free_atomic_data(d); + return NULL; +} +#endif /* CONFIG_HAVE_ATOMIC_CONSOLE */ + static bool pr_flush(int timeout_ms, bool reset_on_progress); static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); +static void printk_start_kthread(struct console *con); + #else /* CONFIG_PRINTK */ #define CONSOLE_LOG_MAX 0 @@ -2345,6 +2575,8 @@ static bool __pr_flush(struct console *c #define prb_first_valid_seq(rb) 0 #define prb_next_seq(rb) 0 +#define free_atomic_data(d) + static u64 syslog_seq; static size_t record_print_text(const struct printk_record *r, @@ -2363,12 +2595,14 @@ static ssize_t msg_print_ext_body(char * static void console_lock_spinning_enable(void) { } static int console_lock_spinning_disable_and_check(void) { return 0; } static void call_console_driver(struct console *con, const char *text, size_t len, - char *dropped_text) + char *dropped_text, bool atomic_printing) { } static bool suppress_message_printing(int level) { return false; } static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } +static void printk_start_kthread(struct console *con) { } +static bool allow_direct_printing(void) { return true; } #endif /* CONFIG_PRINTK */ @@ -2587,6 +2821,14 @@ static int console_cpu_notify(unsigned i /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); + else { + /* + * If a new CPU comes online, the conditions for + * printer_should_wake() may have changed for some + * kthread printer with !CON_ANYTIME. + */ + wake_up_klogd(); + } } return 0; } @@ -2629,7 +2871,7 @@ void console_lock(void) down_console_sem(); if (console_suspended) return; - console_locked = 1; + console_kthreads_block(); console_may_schedule = 1; } EXPORT_SYMBOL(console_lock); @@ -2653,15 +2895,30 @@ int console_trylock(void) up_console_sem(); return 0; } - console_locked = 1; + if (!console_kthreads_atomic_tryblock()) { + up_console_sem(); + return 0; + } console_may_schedule = 0; return 1; } EXPORT_SYMBOL(console_trylock); +/* + * This is used to help to make sure that certain paths within the VT code are + * running with the console lock held. It is definitely not the perfect debug + * tool (it is not known if the VT code is the task holding the console lock), + * but it helps tracking those weird code paths in the console code such as + * when the console is suspended: where the console is not locked but no + * console printing may occur. + * + * Note: This returns true when the console is suspended but is not locked. + * This is intentional because the VT code must consider that situation + * the same as if the console was locked. + */ int is_console_locked(void) { - return console_locked; + return (console_kthreads_blocked || atomic_read(&console_kthreads_active)); } EXPORT_SYMBOL(is_console_locked); @@ -2671,12 +2928,9 @@ EXPORT_SYMBOL(is_console_locked); * * Requires the console_lock. */ -static inline bool console_is_usable(struct console *con) +static inline bool __console_is_usable(short flags) { - if (!(con->flags & CON_ENABLED)) - return false; - - if (!con->write) + if (!(flags & CON_ENABLED)) return false; /* @@ -2685,18 +2939,116 @@ static inline bool console_is_usable(str * cope (CON_ANYTIME) don't call them until this CPU is officially up. */ if (!cpu_online(raw_smp_processor_id()) && - !(con->flags & CON_ANYTIME)) + !(flags & CON_ANYTIME)) return false; return true; } +/* + * Check if the given console is currently capable and allowed to print + * records. + * + * Requires holding the console_lock. + */ +static inline bool console_is_usable(struct console *con, bool atomic_printing) +{ + if (atomic_printing) { +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE + if (!con->write_atomic) + return false; + if (!con->atomic_data) + return false; +#else + return false; +#endif + } else if (!con->write) { + return false; + } + + return __console_is_usable(con->flags); +} + static void __console_unlock(void) { - console_locked = 0; + /* + * Depending on whether console_lock() or console_trylock() was used, + * appropriately allow the kthread printers to continue. + */ + if (console_kthreads_blocked) + console_kthreads_unblock(); + else + console_kthreads_atomic_unblock(); + + /* + * New records may have arrived while the console was locked. + * Wake the kthread printers to print them. + */ + wake_up_klogd(); + up_console_sem(); } +static u64 read_console_seq(struct console *con) +{ +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE + unsigned long flags; + u64 seq2; + u64 seq; + + if (!con->atomic_data) + return con->seq; + + printk_cpu_sync_get_irqsave(flags); + + seq = con->seq; + seq2 = con->atomic_data[0].seq; + if (seq2 > seq) + seq = seq2; +#ifdef CONFIG_HAVE_NMI + seq2 = con->atomic_data[1].seq; + if (seq2 > seq) + seq = seq2; +#endif + + printk_cpu_sync_put_irqrestore(flags); + + return seq; +#else /* CONFIG_HAVE_ATOMIC_CONSOLE */ + return con->seq; +#endif +} + +static void write_console_seq(struct console *con, u64 val, bool atomic_printing) +{ +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE + unsigned long flags; + u64 *seq; + + if (!con->atomic_data) { + con->seq = val; + return; + } + + printk_cpu_sync_get_irqsave(flags); + + if (atomic_printing) { + seq = &con->atomic_data[0].seq; +#ifdef CONFIG_HAVE_NMI + if (in_nmi()) + seq = &con->atomic_data[1].seq; +#endif + } else { + seq = &con->seq; + } + *seq = val; + + printk_cpu_sync_put_irqrestore(flags); +#else /* CONFIG_HAVE_ATOMIC_CONSOLE */ + con->seq = val; +#endif +} + /* * Print one record for the given console. The record printed is whatever * record is the next available record for the given console. @@ -2709,36 +3061,47 @@ static void __console_unlock(void) * If dropped messages should be printed, @dropped_text is a buffer of size * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL. * + * @atomic_printing specifies if atomic printing should be used. + * * @handover will be set to true if a printk waiter has taken over the * console_lock, in which case the caller is no longer holding the - * console_lock. Otherwise it is set to false. + * console_lock. Otherwise it is set to false. A NULL pointer may be provided + * to disable allowing the console_lock to be taken over by a printk waiter. * * Returns false if the given console has no next record to print, otherwise * true. * - * Requires the console_lock. + * Requires the console_lock if @handover is non-NULL. + * Requires con->lock otherwise. */ -static bool console_emit_next_record(struct console *con, char *text, char *ext_text, - char *dropped_text, bool *handover) +static bool __console_emit_next_record(struct console *con, char *text, char *ext_text, + char *dropped_text, bool atomic_printing, + bool *handover) { - static int panic_console_dropped; + static atomic_t panic_console_dropped = ATOMIC_INIT(0); struct printk_info info; struct printk_record r; unsigned long flags; char *write_text; size_t len; + u64 seq; prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX); - *handover = false; + if (handover) + *handover = false; + + seq = read_console_seq(con); - if (!prb_read_valid(prb, con->seq, &r)) + if (!prb_read_valid(prb, seq, &r)) return false; - if (con->seq != r.info->seq) { - con->dropped += r.info->seq - con->seq; - con->seq = r.info->seq; - if (panic_in_progress() && panic_console_dropped++ > 10) { + if (seq != r.info->seq) { + atomic_long_add((unsigned long)(r.info->seq - seq), &con->dropped); + write_console_seq(con, r.info->seq, atomic_printing); + seq = r.info->seq; + if (panic_in_progress() && + atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) { suppress_panic_printk = 1; pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n"); } @@ -2746,7 +3109,7 @@ static bool console_emit_next_record(str /* Skip record that has level above the console loglevel. */ if (suppress_message_printing(r.info->level)) { - con->seq++; + write_console_seq(con, seq + 1, atomic_printing); goto skip; } @@ -2760,32 +3123,66 @@ static bool console_emit_next_record(str len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); } - /* - * While actively printing out messages, if another printk() - * were to occur on another CPU, it may wait for this one to - * finish. This task can not be preempted if there is a - * waiter waiting to take over. - * - * Interrupts are disabled because the hand over to a waiter - * must not be interrupted until the hand over is completed - * (@console_waiter is cleared). - */ - printk_safe_enter_irqsave(flags); - console_lock_spinning_enable(); + if (handover) { + /* + * While actively printing out messages, if another printk() + * were to occur on another CPU, it may wait for this one to + * finish. This task can not be preempted if there is a + * waiter waiting to take over. + * + * Interrupts are disabled because the hand over to a waiter + * must not be interrupted until the hand over is completed + * (@console_waiter is cleared). + */ + printk_safe_enter_irqsave(flags); + console_lock_spinning_enable(); - stop_critical_timings(); /* don't trace print latency */ - call_console_driver(con, write_text, len, dropped_text); - start_critical_timings(); + /* don't trace irqsoff print latency */ + stop_critical_timings(); + } - con->seq++; + call_console_driver(con, write_text, len, dropped_text, atomic_printing); - *handover = console_lock_spinning_disable_and_check(); - printk_safe_exit_irqrestore(flags); + write_console_seq(con, seq + 1, atomic_printing); + + if (handover) { + start_critical_timings(); + *handover = console_lock_spinning_disable_and_check(); + printk_safe_exit_irqrestore(flags); + } skip: return true; } /* + * Print a record for a given console, but allow another printk() caller to + * take over the console_lock and continue printing. + * + * Requires the console_lock, but depending on @handover after the call, the + * caller may no longer have the console_lock. + * + * See __console_emit_next_record() for argument and return details. + */ +static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text, + char *dropped_text, bool *handover) +{ + /* + * Handovers are only supported if threaded printers are atomically + * blocked. The context taking over the console_lock may be atomic. + * + * PREEMPT_RT also does not support handovers because the spinning + * waiter can cause large latencies. + */ + if (!console_kthreads_atomically_blocked() || + IS_ENABLED(CONFIG_PREEMPT_RT)) { + *handover = false; + handover = NULL; + } + + return __console_emit_next_record(con, text, ext_text, dropped_text, false, handover); +} + +/* * Print out all remaining records to all consoles. * * @do_cond_resched is set by the caller. It can be true only in schedulable @@ -2803,8 +3200,8 @@ skip: * were flushed to all usable consoles. A returned false informs the caller * that everything was not flushed (either there were no usable consoles or * another context has taken over printing or it is a panic situation and this - * is not the panic CPU). Regardless the reason, the caller should assume it - * is not useful to immediately try again. + * is not the panic CPU or direct printing is not preferred). Regardless the + * reason, the caller should assume it is not useful to immediately try again. * * Requires the console_lock. */ @@ -2821,24 +3218,26 @@ static bool console_flush_all(bool do_co *handover = false; do { + /* Let the kthread printers do the work if they can. */ + if (!allow_direct_printing()) + return false; + any_progress = false; for_each_console(con) { bool progress; - if (!console_is_usable(con)) + if (!console_is_usable(con, false)) continue; any_usable = true; if (con->flags & CON_EXTENDED) { /* Extended consoles do not print "dropped messages". */ - progress = console_emit_next_record(con, &text[0], - &ext_text[0], NULL, - handover); + progress = console_emit_next_record_transferable(con, &text[0], + &ext_text[0], NULL, handover); } else { - progress = console_emit_next_record(con, &text[0], - NULL, &dropped_text[0], - handover); + progress = console_emit_next_record_transferable(con, &text[0], + NULL, &dropped_text[0], handover); } if (*handover) return false; @@ -2863,6 +3262,68 @@ static bool console_flush_all(bool do_co return any_usable; } +#if defined(CONFIG_HAVE_ATOMIC_CONSOLE) && defined(CONFIG_PRINTK) +static bool console_emit_next_record(struct console *con, char *text, char *ext_text, + char *dropped_text, bool atomic_printing); + +static void atomic_console_flush_all(void) +{ + unsigned long flags; + struct console *con; + bool any_progress; + int index = 0; + + if (console_suspended) + return; + +#ifdef CONFIG_HAVE_NMI + if (in_nmi()) + index = 1; +#endif + + printk_cpu_sync_get_irqsave(flags); + + do { + any_progress = false; + + for_each_console(con) { + bool progress; + + if (!console_is_usable(con, true)) + continue; + + if (con->flags & CON_EXTENDED) { + /* Extended consoles do not print "dropped messages". */ + progress = console_emit_next_record(con, + &con->atomic_data->text[index], + &con->atomic_data->ext_text[index], + NULL, + true); + } else { + progress = console_emit_next_record(con, + &con->atomic_data->text[index], + NULL, + &con->atomic_data->dropped_text[index], + true); + } + + if (!progress) + continue; + any_progress = true; + + touch_softlockup_watchdog_sync(); + clocksource_touch_watchdog(); + rcu_cpu_stall_reset(); + touch_nmi_watchdog(); + } + } while (any_progress); + + printk_cpu_sync_put_irqrestore(flags); +} +#else /* CONFIG_HAVE_ATOMIC_CONSOLE && CONFIG_PRINTK */ +#define atomic_console_flush_all() +#endif + /** * console_unlock - unlock the console system * @@ -2953,10 +3414,13 @@ void console_unblank(void) if (oops_in_progress) { if (down_trylock_console_sem() != 0) return; + if (!console_kthreads_atomic_tryblock()) { + up_console_sem(); + return; + } } else console_lock(); - console_locked = 1; console_may_schedule = 0; for_each_console(c) if ((c->flags & CON_ENABLED) && c->unblank) @@ -2975,6 +3439,11 @@ void console_unblank(void) */ void console_flush_on_panic(enum con_flush_mode mode) { + if (mode == CONSOLE_ATOMIC_FLUSH_PENDING) { + atomic_console_flush_all(); + return; + } + /* * If someone else is holding the console lock, trylock will fail * and may_schedule may be set. Ignore and proceed to unlock so @@ -2991,7 +3460,7 @@ void console_flush_on_panic(enum con_flu seq = prb_first_valid_seq(prb); for_each_console(c) - c->seq = seq; + write_console_seq(c, seq, false); } console_unlock(); } @@ -3246,16 +3715,27 @@ void register_console(struct console *ne console_drivers->next = newcon; } - newcon->dropped = 0; + atomic_long_set(&newcon->dropped, 0); + newcon->thread = NULL; + newcon->blocked = true; + mutex_init(&newcon->lock); +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE + newcon->atomic_data = NULL; +#endif + if (newcon->flags & CON_PRINTBUFFER) { /* Get a consistent copy of @syslog_seq. */ mutex_lock(&syslog_lock); - newcon->seq = syslog_seq; + write_console_seq(newcon, syslog_seq, false); mutex_unlock(&syslog_lock); } else { /* Begin with next message. */ - newcon->seq = prb_next_seq(prb); + write_console_seq(newcon, prb_next_seq(prb), false); } + + if (printk_kthreads_available) + printk_start_kthread(newcon); + console_unlock(); console_sysfs_notify(); @@ -3279,6 +3759,7 @@ EXPORT_SYMBOL(register_console); int unregister_console(struct console *console) { + struct task_struct *thd; struct console *con; int res; @@ -3316,9 +3797,26 @@ int unregister_console(struct console *c console_drivers->flags |= CON_CONSDEV; console->flags &= ~CON_ENABLED; + + /* + * console->thread can only be cleared under the console lock. But + * stopping the thread must be done without the console lock. The + * task that clears @thread is the task that stops the kthread. + */ + thd = console->thread; + console->thread = NULL; + console_unlock(); + + if (thd) + kthread_stop(thd); + console_sysfs_notify(); +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE + free_atomic_data(console->atomic_data); +#endif + if (console->exit) res = console->exit(console); @@ -3412,6 +3910,20 @@ static int __init printk_late_init(void) } late_initcall(printk_late_init); +static int __init printk_activate_kthreads(void) +{ + struct console *con; + + console_lock(); + printk_kthreads_available = true; + for_each_console(con) + printk_start_kthread(con); + console_unlock(); + + return 0; +} +early_initcall(printk_activate_kthreads); + #if defined CONFIG_PRINTK /* If @con is specified, only wait for that console. Otherwise wait for all. */ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) @@ -3435,7 +3947,7 @@ static bool __pr_flush(struct console *c for_each_console(c) { if (con && con != c) continue; - if (!console_is_usable(c)) + if (!console_is_usable(c, false)) continue; printk_seq = c->seq; if (printk_seq < seq) @@ -3494,11 +4006,214 @@ static bool pr_flush(int timeout_ms, boo return __pr_flush(NULL, timeout_ms, reset_on_progress); } +static void __printk_fallback_preferred_direct(void) +{ + printk_prefer_direct_enter(); + pr_err("falling back to preferred direct printing\n"); + printk_kthreads_available = false; +} + +/* + * Enter preferred direct printing, but never exit. Mark console threads as + * unavailable. The system is then forever in preferred direct printing and + * any printing threads will exit. + * + * Must *not* be called under console_lock. Use + * __printk_fallback_preferred_direct() if already holding console_lock. + */ +static void printk_fallback_preferred_direct(void) +{ + console_lock(); + __printk_fallback_preferred_direct(); + console_unlock(); +} + +/* + * Print a record for a given console, not allowing another printk() caller + * to take over. This is appropriate for contexts that do not have the + * console_lock. + * + * See __console_emit_next_record() for argument and return details. + */ +static bool console_emit_next_record(struct console *con, char *text, char *ext_text, + char *dropped_text, bool atomic_printing) +{ + return __console_emit_next_record(con, text, ext_text, dropped_text, + atomic_printing, NULL); +} + +static bool printer_should_wake(struct console *con, u64 seq) +{ + short flags; + + if (kthread_should_stop() || !printk_kthreads_available) + return true; + + if (con->blocked || + console_kthreads_atomically_blocked() || + block_console_kthreads || + system_state > SYSTEM_RUNNING || + oops_in_progress) { + return false; + } + + /* + * This is an unsafe read from con->flags, but a false positive is + * not a problem. Worst case it would allow the printer to wake up + * although it is disabled. But the printer will notice that when + * attempting to print and instead go back to sleep. + */ + flags = data_race(READ_ONCE(con->flags)); + + if (!__console_is_usable(flags)) + return false; + + return prb_read_valid(prb, seq, NULL); +} + +static int printk_kthread_func(void *data) +{ + struct console *con = data; + char *dropped_text = NULL; + char *ext_text = NULL; + u64 seq = 0; + char *text; + int error; + +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE + if (con->write_atomic) + con->atomic_data = alloc_atomic_data(con->flags); +#endif + + text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL); + if (!text) { + con_printk(KERN_ERR, con, "failed to allocate text buffer\n"); + printk_fallback_preferred_direct(); + goto out; + } + + if (con->flags & CON_EXTENDED) { + ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); + if (!ext_text) { + con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n"); + printk_fallback_preferred_direct(); + goto out; + } + } else { + dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL); + if (!dropped_text) { + con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n"); + printk_fallback_preferred_direct(); + goto out; + } + } + + con_printk(KERN_INFO, con, "printing thread started\n"); + for (;;) { + /* + * Guarantee this task is visible on the waitqueue before + * checking the wake condition. + * + * The full memory barrier within set_current_state() of + * prepare_to_wait_event() pairs with the full memory barrier + * within wq_has_sleeper(). + * + * This pairs with __wake_up_klogd:A. + */ + error = wait_event_interruptible(log_wait, + printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */ + + if (kthread_should_stop() || !printk_kthreads_available) + break; + + if (error) + continue; + + error = mutex_lock_interruptible(&con->lock); + if (error) + continue; + + if (con->blocked || + !console_kthread_printing_tryenter()) { + /* Another context has locked the console_lock. */ + mutex_unlock(&con->lock); + continue; + } + + /* + * Although this context has not locked the console_lock, it + * is known that the console_lock is not locked and it is not + * possible for any other context to lock the console_lock. + * Therefore it is safe to read con->flags. + */ + + if (!__console_is_usable(con->flags)) { + console_kthread_printing_exit(); + mutex_unlock(&con->lock); + continue; + } + + /* + * Even though the printk kthread is always preemptible, it is + * still not allowed to call cond_resched() from within + * console drivers. The task may become non-preemptible in the + * console driver call chain. For example, vt_console_print() + * takes a spinlock and then can call into fbcon_redraw(), + * which can conditionally invoke cond_resched(). + */ + console_may_schedule = 0; + console_emit_next_record(con, text, ext_text, dropped_text, false); + + seq = con->seq; + + console_kthread_printing_exit(); + + mutex_unlock(&con->lock); + } + + con_printk(KERN_INFO, con, "printing thread stopped\n"); +out: + kfree(dropped_text); + kfree(ext_text); + kfree(text); + + console_lock(); + /* + * If this kthread is being stopped by another task, con->thread will + * already be NULL. That is fine. The important thing is that it is + * NULL after the kthread exits. + */ + con->thread = NULL; + console_unlock(); + + return 0; +} + +/* Must be called under console_lock. */ +static void printk_start_kthread(struct console *con) +{ + /* + * Do not start a kthread if there is no write() callback. The + * kthreads assume the write() callback exists. + */ + if (!con->write) + return; + + con->thread = kthread_run(printk_kthread_func, con, + "pr/%s%d", con->name, con->index); + if (IS_ERR(con->thread)) { + con->thread = NULL; + con_printk(KERN_ERR, con, "unable to start printing thread\n"); + __printk_fallback_preferred_direct(); + return; + } +} + /* * Delayed printk version, for scheduler-internal messages: */ -#define PRINTK_PENDING_WAKEUP 0x01 -#define PRINTK_PENDING_OUTPUT 0x02 +#define PRINTK_PENDING_WAKEUP 0x01 +#define PRINTK_PENDING_DIRECT_OUTPUT 0x02 static DEFINE_PER_CPU(int, printk_pending); @@ -3506,10 +4221,14 @@ static void wake_up_klogd_work_func(stru { int pending = this_cpu_xchg(printk_pending, 0); - if (pending & PRINTK_PENDING_OUTPUT) { + if (pending & PRINTK_PENDING_DIRECT_OUTPUT) { + printk_prefer_direct_enter(); + /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); + + printk_prefer_direct_exit(); } if (pending & PRINTK_PENDING_WAKEUP) @@ -3534,10 +4253,11 @@ static void __wake_up_klogd(int val) * prepare_to_wait_event(), which is called after ___wait_event() adds * the waiter but before it has checked the wait condition. * - * This pairs with devkmsg_read:A and syslog_print:A. + * This pairs with devkmsg_read:A, syslog_print:A, and + * printk_kthread_func:A. */ if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ - (val & PRINTK_PENDING_OUTPUT)) { + (val & PRINTK_PENDING_DIRECT_OUTPUT)) { this_cpu_or(printk_pending, val); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); } @@ -3577,7 +4297,17 @@ void defer_console_output(void) * New messages may have been added directly to the ringbuffer * using vprintk_store(), so wake any waiters as well. */ - __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); + int val = PRINTK_PENDING_WAKEUP; + + /* + * Make sure that some context will print the messages when direct + * printing is allowed. This happens in situations when the kthreads + * may not be as reliable or perhaps unusable. + */ + if (allow_direct_printing()) + val |= PRINTK_PENDING_DIRECT_OUTPUT; + + __wake_up_klogd(val); } void printk_trigger_flush(void) Index: linux-6.1.90-rt30/kernel/printk/printk_safe.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/printk/printk_safe.c +++ linux-6.1.90-rt30/kernel/printk/printk_safe.c @@ -8,7 +8,9 @@ #include #include #include +#include #include +#include #include "internal.h" @@ -45,3 +47,33 @@ asmlinkage int vprintk(const char *fmt, return vprintk_default(fmt, args); } EXPORT_SYMBOL(vprintk); + +/** + * try_block_console_kthreads() - Try to block console kthreads and + * make the global console_lock() avaialble + * + * @timeout_ms: The maximum time (in ms) to wait. + * + * Prevent console kthreads from starting processing new messages. Wait + * until the global console_lock() become available. + * + * Context: Can be called in any context. + */ +void try_block_console_kthreads(int timeout_ms) +{ + block_console_kthreads = true; + + /* Do not wait when the console lock could not be safely taken. */ + if (this_cpu_read(printk_context) || in_nmi()) + return; + + while (timeout_ms > 0) { + if (console_trylock()) { + console_unlock(); + return; + } + + udelay(1000); + timeout_ms -= 1; + } +} Index: linux-6.1.90-rt30/kernel/rcu/rcutorture.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/rcu/rcutorture.c +++ linux-6.1.90-rt30/kernel/rcu/rcutorture.c @@ -2363,6 +2363,12 @@ static int rcutorture_booster_init(unsig WARN_ON_ONCE(!t); sp.sched_priority = 2; sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); +#ifdef CONFIG_PREEMPT_RT + t = per_cpu(timersd, cpu); + WARN_ON_ONCE(!t); + sp.sched_priority = 2; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); +#endif } /* Don't allow time recalculation while creating a new task. */ Index: linux-6.1.90-rt30/kernel/rcu/tree_stall.h =================================================================== --- linux-6.1.90-rt30.orig/kernel/rcu/tree_stall.h +++ linux-6.1.90-rt30/kernel/rcu/tree_stall.h @@ -649,6 +649,7 @@ static void print_cpu_stall(unsigned lon * See Documentation/RCU/stallwarn.rst for info on how to debug * RCU CPU stall warnings. */ + printk_prefer_direct_enter(); trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); @@ -683,6 +684,7 @@ static void print_cpu_stall(unsigned lon */ set_tsk_need_resched(current); set_preempt_need_resched(); + printk_prefer_direct_exit(); } static void check_cpu_stall(struct rcu_data *rdp) Index: linux-6.1.90-rt30/kernel/reboot.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/reboot.c +++ linux-6.1.90-rt30/kernel/reboot.c @@ -83,6 +83,7 @@ void kernel_restart_prepare(char *cmd) { blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; + try_block_console_kthreads(10000); usermodehelper_disable(); device_shutdown(); } @@ -283,6 +284,7 @@ static void kernel_shutdown_prepare(enum blocking_notifier_call_chain(&reboot_notifier_list, (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL); system_state = state; + try_block_console_kthreads(10000); usermodehelper_disable(); device_shutdown(); } @@ -837,9 +839,11 @@ static int __orderly_reboot(void) ret = run_cmd(reboot_cmd); if (ret) { + printk_prefer_direct_enter(); pr_warn("Failed to start orderly reboot: forcing the issue\n"); emergency_sync(); kernel_restart(NULL); + printk_prefer_direct_exit(); } return ret; @@ -852,6 +856,7 @@ static int __orderly_poweroff(bool force ret = run_cmd(poweroff_cmd); if (ret && force) { + printk_prefer_direct_enter(); pr_warn("Failed to start orderly shutdown: forcing the issue\n"); /* @@ -861,6 +866,7 @@ static int __orderly_poweroff(bool force */ emergency_sync(); kernel_power_off(); + printk_prefer_direct_exit(); } return ret; @@ -918,6 +924,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot); */ static void hw_failure_emergency_poweroff_func(struct work_struct *work) { + printk_prefer_direct_enter(); + /* * We have reached here after the emergency shutdown waiting period has * expired. This means orderly_poweroff has not been able to shut off @@ -934,6 +942,8 @@ static void hw_failure_emergency_powerof */ pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); emergency_restart(); + + printk_prefer_direct_exit(); } static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, @@ -972,11 +982,13 @@ void hw_protection_shutdown(const char * { static atomic_t allow_proceed = ATOMIC_INIT(1); + printk_prefer_direct_enter(); + pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); /* Shutdown should be initiated only once. */ if (!atomic_dec_and_test(&allow_proceed)) - return; + goto out; /* * Queue a backup emergency shutdown in the event of @@ -984,6 +996,8 @@ void hw_protection_shutdown(const char * */ hw_failure_emergency_poweroff(ms_until_forced); orderly_poweroff(true); +out: + printk_prefer_direct_exit(); } EXPORT_SYMBOL_GPL(hw_protection_shutdown); Index: linux-6.1.90-rt30/kernel/sched/core.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/sched/core.c +++ linux-6.1.90-rt30/kernel/sched/core.c @@ -1040,6 +1040,46 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } +#ifdef CONFIG_PREEMPT_LAZY + +static int tsk_is_polling(struct task_struct *p) +{ +#ifdef TIF_POLLING_NRFLAG + return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); +#else + return 0; +#endif +} + +void resched_curr_lazy(struct rq *rq) +{ + struct task_struct *curr = rq->curr; + int cpu; + + if (!sched_feat(PREEMPT_LAZY)) { + resched_curr(rq); + return; + } + + if (test_tsk_need_resched(curr)) + return; + + if (test_tsk_need_resched_lazy(curr)) + return; + + set_tsk_need_resched_lazy(curr); + + cpu = cpu_of(rq); + if (cpu == smp_processor_id()) + return; + + /* NEED_RESCHED_LAZY must be visible before we test polling */ + smp_mb(); + if (!tsk_is_polling(curr)) + smp_send_reschedule(cpu); +} +#endif + void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -2224,6 +2264,7 @@ void migrate_disable(void) preempt_disable(); this_rq()->nr_pinned++; p->migration_disabled = 1; + preempt_lazy_disable(); preempt_enable(); } EXPORT_SYMBOL_GPL(migrate_disable); @@ -2255,6 +2296,7 @@ void migrate_enable(void) barrier(); p->migration_disabled = 0; this_rq()->nr_pinned--; + preempt_lazy_enable(); preempt_enable(); } EXPORT_SYMBOL_GPL(migrate_enable); @@ -3281,6 +3323,76 @@ out: } #endif /* CONFIG_NUMA_BALANCING */ +#ifdef CONFIG_PREEMPT_RT + +/* + * Consider: + * + * set_special_state(X); + * + * do_things() + * // Somewhere in there is an rtlock that can be contended: + * current_save_and_set_rtlock_wait_state(); + * [...] + * schedule_rtlock(); (A) + * [...] + * current_restore_rtlock_saved_state(); + * + * schedule(); (B) + * + * If p->saved_state is anything else than TASK_RUNNING, then p blocked on an + * rtlock (A) *before* voluntarily calling into schedule() (B) after setting its + * state to X. For things like ptrace (X=TASK_TRACED), the task could have more + * work to do upon acquiring the lock in do_things() before whoever called + * wait_task_inactive() should return. IOW, we have to wait for: + * + * p.saved_state = TASK_RUNNING + * p.__state = X + * + * which implies the task isn't blocked on an RT lock and got to schedule() (B). + * + * Also see comments in ttwu_state_match(). + */ + +static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state) +{ + unsigned long flags; + bool mismatch; + + raw_spin_lock_irqsave(&p->pi_lock, flags); + if (READ_ONCE(p->__state) & match_state) + mismatch = false; + else if (READ_ONCE(p->saved_state) & match_state) + mismatch = false; + else + mismatch = true; + + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + return mismatch; +} +static __always_inline bool state_match(struct task_struct *p, unsigned int match_state, + bool *wait) +{ + if (READ_ONCE(p->__state) & match_state) + return true; + if (READ_ONCE(p->saved_state) & match_state) { + *wait = true; + return true; + } + return false; +} +#else +static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state) +{ + return !(READ_ONCE(p->__state) & match_state); +} +static __always_inline bool state_match(struct task_struct *p, unsigned int match_state, + bool *wait) +{ + return (READ_ONCE(p->__state) & match_state); +} +#endif + /* * wait_task_inactive - wait for a thread to unschedule. * @@ -3299,7 +3411,7 @@ out: */ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) { - int running, queued; + bool running, wait; struct rq_flags rf; unsigned long ncsw; struct rq *rq; @@ -3325,7 +3437,7 @@ unsigned long wait_task_inactive(struct * is actually now running somewhere else! */ while (task_on_cpu(rq, p)) { - if (!(READ_ONCE(p->__state) & match_state)) + if (state_mismatch(p, match_state)) return 0; cpu_relax(); } @@ -3338,9 +3450,10 @@ unsigned long wait_task_inactive(struct rq = task_rq_lock(p, &rf); trace_sched_wait_task(p); running = task_on_cpu(rq, p); - queued = task_on_rq_queued(p); + wait = task_on_rq_queued(p); ncsw = 0; - if (READ_ONCE(p->__state) & match_state) + + if (state_match(p, match_state, &wait)) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); @@ -3370,7 +3483,7 @@ unsigned long wait_task_inactive(struct * running right now), it's preempted, and we should * yield - it could be a while. */ - if (unlikely(queued)) { + if (unlikely(wait)) { ktime_t to = NSEC_PER_SEC / HZ; set_current_state(TASK_UNINTERRUPTIBLE); @@ -4651,6 +4764,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(p)->preempt_lazy_count = 0; +#endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); @@ -6521,6 +6637,7 @@ static void __sched notrace __schedule(u next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); + clear_tsk_need_resched_lazy(prev); clear_preempt_need_resched(); #ifdef CONFIG_SCHED_DEBUG rq->last_seen_need_resched_ns = 0; @@ -6735,6 +6852,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } +#ifdef CONFIG_PREEMPT_LAZY +/* + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as + * preempt_lazy_count counter >0. + */ +static __always_inline int preemptible_lazy(void) +{ + if (test_thread_flag(TIF_NEED_RESCHED)) + return 1; + if (current_thread_info()->preempt_lazy_count) + return 0; + return 1; +} + +#else + +static inline int preemptible_lazy(void) +{ + return 1; +} + +#endif + #ifdef CONFIG_PREEMPTION /* * This is the entry point to schedule() from in-kernel preemption @@ -6748,6 +6889,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; + if (!preemptible_lazy()) + return; preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); @@ -6795,6 +6938,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; + if (!preemptible_lazy()) + return; + do { /* * Because the function tracer can trace preempt_count_sub() @@ -9060,7 +9206,9 @@ void __init init_idle(struct task_struct /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); - +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(idle)->preempt_lazy_count = 0; +#endif /* * The idle tasks have their own, simple scheduling class: */ Index: linux-6.1.90-rt30/kernel/sched/fair.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/sched/fair.c +++ linux-6.1.90-rt30/kernel/sched/fair.c @@ -4914,7 +4914,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. @@ -4938,7 +4938,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); } static void @@ -5084,7 +5084,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); return; } /* @@ -5233,7 +5233,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); } static __always_inline @@ -5984,7 +5984,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (task_current(rq, p)) - resched_curr(rq); + resched_curr_lazy(rq); return; } hrtick_start(rq, delta); @@ -7738,7 +7738,7 @@ static void check_preempt_wakeup(struct return; preempt: - resched_curr(rq); + resched_curr_lazy(rq); /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved @@ -11898,7 +11898,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); - resched_curr(rq); + resched_curr_lazy(rq); } se->vruntime -= cfs_rq->min_vruntime; @@ -11925,7 +11925,7 @@ prio_changed_fair(struct rq *rq, struct */ if (task_current(rq, p)) { if (p->prio > oldprio) - resched_curr(rq); + resched_curr_lazy(rq); } else check_preempt_curr(rq, p, 0); } Index: linux-6.1.90-rt30/kernel/sched/features.h =================================================================== --- linux-6.1.90-rt30.orig/kernel/sched/features.h +++ linux-6.1.90-rt30/kernel/sched/features.h @@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true) #ifdef CONFIG_PREEMPT_RT SCHED_FEAT(TTWU_QUEUE, false) +# ifdef CONFIG_PREEMPT_LAZY +SCHED_FEAT(PREEMPT_LAZY, true) +# endif #else /* Index: linux-6.1.90-rt30/kernel/sched/sched.h =================================================================== --- linux-6.1.90-rt30.orig/kernel/sched/sched.h +++ linux-6.1.90-rt30/kernel/sched/sched.h @@ -2350,6 +2350,15 @@ extern void reweight_task(struct task_st extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); +#ifdef CONFIG_PREEMPT_LAZY +extern void resched_curr_lazy(struct rq *rq); +#else +static inline void resched_curr_lazy(struct rq *rq) +{ + resched_curr(rq); +} +#endif + extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); Index: linux-6.1.90-rt30/kernel/signal.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/signal.c +++ linux-6.1.90-rt30/kernel/signal.c @@ -2302,13 +2302,13 @@ static int ptrace_stop(int exit_code, in /* * Don't want to allow preemption here, because * sys_ptrace() needs this task to be inactive. - * - * XXX: implement read_unlock_no_resched(). */ - preempt_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); read_unlock(&tasklist_lock); cgroup_enter_frozen(); - preempt_enable_no_resched(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable_no_resched(); schedule(); cgroup_leave_frozen(true); Index: linux-6.1.90-rt30/kernel/softirq.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/softirq.c +++ linux-6.1.90-rt30/kernel/softirq.c @@ -80,21 +80,6 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } -/* - * If ksoftirqd is scheduled, we do not want to process pending softirqs - * right now. Let ksoftirqd handle this at its own rate, to get fairness, - * unless we're doing some of the synchronous softirqs. - */ -#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) -static bool ksoftirqd_running(unsigned long pending) -{ - struct task_struct *tsk = __this_cpu_read(ksoftirqd); - - if (pending & SOFTIRQ_NOW_MASK) - return false; - return tsk && task_is_running(tsk) && !__kthread_should_park(tsk); -} - #ifdef CONFIG_TRACE_IRQFLAGS DEFINE_PER_CPU(int, hardirqs_enabled); DEFINE_PER_CPU(int, hardirq_context); @@ -236,7 +221,7 @@ void __local_bh_enable_ip(unsigned long goto out; pending = local_softirq_pending(); - if (!pending || ksoftirqd_running(pending)) + if (!pending) goto out; /* @@ -432,9 +417,6 @@ static inline bool should_wake_ksoftirqd static inline void invoke_softirq(void) { - if (ksoftirqd_running(local_softirq_pending())) - return; - if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* @@ -468,7 +450,7 @@ asmlinkage __visible void do_softirq(voi pending = local_softirq_pending(); - if (pending && !ksoftirqd_running(pending)) + if (pending) do_softirq_own_stack(); local_irq_restore(flags); @@ -641,6 +623,24 @@ static inline void tick_irq_exit(void) #endif } +#ifdef CONFIG_PREEMPT_RT +DEFINE_PER_CPU(struct task_struct *, timersd); +DEFINE_PER_CPU(unsigned long, pending_timer_softirq); + +static void wake_timersd(void) +{ + struct task_struct *tsk = __this_cpu_read(timersd); + + if (tsk) + wake_up_process(tsk); +} + +#else + +static inline void wake_timersd(void) { } + +#endif + static inline void __irq_exit_rcu(void) { #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED @@ -653,6 +653,10 @@ static inline void __irq_exit_rcu(void) if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); + if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() && + !(in_nmi() | in_hardirq())) + wake_timersd(); + tick_irq_exit(); } @@ -980,12 +984,70 @@ static struct smp_hotplug_thread softirq .thread_comm = "ksoftirqd/%u", }; +#ifdef CONFIG_PREEMPT_RT +static void timersd_setup(unsigned int cpu) +{ + sched_set_fifo_low(current); +} + +static int timersd_should_run(unsigned int cpu) +{ + return local_pending_timers(); +} + +static void run_timersd(unsigned int cpu) +{ + unsigned int timer_si; + + ksoftirqd_run_begin(); + + timer_si = local_pending_timers(); + __this_cpu_write(pending_timer_softirq, 0); + or_softirq_pending(timer_si); + + __do_softirq(); + + ksoftirqd_run_end(); +} + +static void raise_ktimers_thread(unsigned int nr) +{ + trace_softirq_raise(nr); + __this_cpu_or(pending_timer_softirq, 1 << nr); +} + +void raise_hrtimer_softirq(void) +{ + raise_ktimers_thread(HRTIMER_SOFTIRQ); +} + +void raise_timer_softirq(void) +{ + unsigned long flags; + + local_irq_save(flags); + raise_ktimers_thread(TIMER_SOFTIRQ); + wake_timersd(); + local_irq_restore(flags); +} + +static struct smp_hotplug_thread timer_threads = { + .store = &timersd, + .setup = timersd_setup, + .thread_should_run = timersd_should_run, + .thread_fn = run_timersd, + .thread_comm = "ktimers/%u", +}; +#endif + static __init int spawn_ksoftirqd(void) { cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, takeover_tasklets); BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); - +#ifdef CONFIG_PREEMPT_RT + BUG_ON(smpboot_register_percpu_thread(&timer_threads)); +#endif return 0; } early_initcall(spawn_ksoftirqd); Index: linux-6.1.90-rt30/kernel/time/hrtimer.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/time/hrtimer.c +++ linux-6.1.90-rt30/kernel/time/hrtimer.c @@ -1806,7 +1806,7 @@ retry: if (!ktime_before(now, cpu_base->softirq_expires_next)) { cpu_base->softirq_expires_next = KTIME_MAX; cpu_base->softirq_activated = 1; - raise_softirq_irqoff(HRTIMER_SOFTIRQ); + raise_hrtimer_softirq(); } __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); @@ -1919,7 +1919,7 @@ void hrtimer_run_queues(void) if (!ktime_before(now, cpu_base->softirq_expires_next)) { cpu_base->softirq_expires_next = KTIME_MAX; cpu_base->softirq_activated = 1; - raise_softirq_irqoff(HRTIMER_SOFTIRQ); + raise_hrtimer_softirq(); } __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); Index: linux-6.1.90-rt30/kernel/time/posix-timers.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/time/posix-timers.c +++ linux-6.1.90-rt30/kernel/time/posix-timers.c @@ -140,25 +140,30 @@ static struct k_itimer *posix_timer_by_i static int posix_timer_add(struct k_itimer *timer) { struct signal_struct *sig = current->signal; - int first_free_id = sig->posix_timer_id; struct hlist_head *head; - int ret = -ENOENT; + unsigned int cnt, id; - do { + /* + * FIXME: Replace this by a per signal struct xarray once there is + * a plan to handle the resulting CRIU regression gracefully. + */ + for (cnt = 0; cnt <= INT_MAX; cnt++) { spin_lock(&hash_lock); - head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; - if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { + id = sig->next_posix_timer_id; + + /* Write the next ID back. Clamp it to the positive space */ + sig->next_posix_timer_id = (id + 1) & INT_MAX; + + head = &posix_timers_hashtable[hash(sig, id)]; + if (!__posix_timers_find(head, sig, id)) { hlist_add_head_rcu(&timer->t_hash, head); - ret = sig->posix_timer_id; + spin_unlock(&hash_lock); + return id; } - if (++sig->posix_timer_id < 0) - sig->posix_timer_id = 0; - if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT)) - /* Loop over all possible ids completed */ - ret = -EAGAIN; spin_unlock(&hash_lock); - } while (ret == -ENOENT); - return ret; + } + /* POSIX return code when no timer ID could be allocated */ + return -EAGAIN; } static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) Index: linux-6.1.90-rt30/kernel/time/tick-sched.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/time/tick-sched.c +++ linux-6.1.90-rt30/kernel/time/tick-sched.c @@ -800,7 +800,7 @@ static void tick_nohz_restart(struct tic static inline bool local_timer_softirq_pending(void) { - return local_softirq_pending() & BIT(TIMER_SOFTIRQ); + return local_pending_timers() & BIT(TIMER_SOFTIRQ); } static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) Index: linux-6.1.90-rt30/kernel/time/timer.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/time/timer.c +++ linux-6.1.90-rt30/kernel/time/timer.c @@ -1852,7 +1852,7 @@ static void run_local_timers(void) if (time_before(jiffies, base->next_expiry)) return; } - raise_softirq(TIMER_SOFTIRQ); + raise_timer_softirq(); } /* Index: linux-6.1.90-rt30/kernel/trace/trace.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/trace/trace.c +++ linux-6.1.90-rt30/kernel/trace/trace.c @@ -2644,11 +2644,19 @@ unsigned int tracing_gen_ctx_irq_test(un if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) trace_flags |= TRACE_FLAG_BH_OFF; - if (tif_need_resched()) + if (tif_need_resched_now()) trace_flags |= TRACE_FLAG_NEED_RESCHED; +#ifdef CONFIG_PREEMPT_LAZY + /* Run out of bits. Share the LAZY and PREEMPT_RESCHED */ + if (need_resched_lazy()) + trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; +#else if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; - return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | +#endif + + return (trace_flags << 24) | (min_t(unsigned int, pc & 0xff, 0xf)) | + (preempt_lazy_count() & 0xff) << 16 | (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; } @@ -4240,15 +4248,17 @@ unsigned long trace_total_entries(struct static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n" - "# / _-----=> irqs-off/BH-disabled\n" - "# | / _----=> need-resched \n" - "# || / _---=> hardirq/softirq \n" - "# ||| / _--=> preempt-depth \n" - "# |||| / _-=> migrate-disable \n" - "# ||||| / delay \n" - "# cmd pid |||||| time | caller \n" - "# \\ / |||||| \\ | / \n"); + seq_puts(m, "# _--------=> CPU# \n" + "# / _-------=> irqs-off/BH-disabled\n" + "# | / _------=> need-resched \n" + "# || / _-----=> need-resched-lazy\n" + "# ||| / _----=> hardirq/softirq \n" + "# |||| / _---=> preempt-depth \n" + "# ||||| / _--=> preempt-lazy-depth\n" + "# |||||| / _-=> migrate-disable \n" + "# ||||||| / delay \n" + "# cmd pid |||||||| time | caller \n" + "# \\ / |||||||| \\ | / \n"); } static void print_event_info(struct array_buffer *buf, struct seq_file *m) @@ -4282,14 +4292,16 @@ static void print_func_help_header_irq(s print_event_info(buf, m); - seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); - seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); - seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); - seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); - seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); - seq_printf(m, "# %.*s|||| / delay\n", prec, space); - seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); - seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); + seq_printf(m, "# %.*s _-------=> irqs-off/BH-disabled\n", prec, space); + seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space); + seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space); + seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space); + seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space); + seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space); + seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space); + seq_printf(m, "# %.*s|||||| / delay\n", prec, space); + seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID "); + seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | "); } void Index: linux-6.1.90-rt30/kernel/trace/trace_events.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/trace/trace_events.c +++ linux-6.1.90-rt30/kernel/trace/trace_events.c @@ -208,6 +208,7 @@ static int trace_define_common_fields(vo /* Holds both preempt_count and migrate_disable */ __common_field(unsigned char, preempt_count); __common_field(int, pid); + __common_field(unsigned char, preempt_lazy_count); return ret; } Index: linux-6.1.90-rt30/kernel/trace/trace_output.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/trace/trace_output.c +++ linux-6.1.90-rt30/kernel/trace/trace_output.c @@ -442,6 +442,7 @@ int trace_print_lat_fmt(struct trace_seq { char hardsoft_irq; char need_resched; + char need_resched_lazy; char irqs_off; int hardirq; int softirq; @@ -462,20 +463,27 @@ int trace_print_lat_fmt(struct trace_seq switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED)) { +#ifndef CONFIG_PREEMPT_LAZY case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: need_resched = 'N'; break; +#endif case TRACE_FLAG_NEED_RESCHED: need_resched = 'n'; break; +#ifndef CONFIG_PREEMPT_LAZY case TRACE_FLAG_PREEMPT_RESCHED: need_resched = 'p'; break; +#endif default: need_resched = '.'; break; } + need_resched_lazy = + (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; + hardsoft_irq = (nmi && hardirq) ? 'Z' : nmi ? 'z' : @@ -484,14 +492,20 @@ int trace_print_lat_fmt(struct trace_seq softirq ? 's' : '.' ; - trace_seq_printf(s, "%c%c%c", - irqs_off, need_resched, hardsoft_irq); + trace_seq_printf(s, "%c%c%c%c", + irqs_off, need_resched, need_resched_lazy, + hardsoft_irq); if (entry->preempt_count & 0xf) trace_seq_printf(s, "%x", entry->preempt_count & 0xf); else trace_seq_putc(s, '.'); + if (entry->preempt_lazy_count) + trace_seq_printf(s, "%x", entry->preempt_lazy_count); + else + trace_seq_putc(s, '.'); + if (entry->preempt_count & 0xf0) trace_seq_printf(s, "%x", entry->preempt_count >> 4); else Index: linux-6.1.90-rt30/kernel/watchdog.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/watchdog.c +++ linux-6.1.90-rt30/kernel/watchdog.c @@ -431,6 +431,8 @@ static enum hrtimer_restart watchdog_tim /* Start period for the next softlockup warning. */ update_report_ts(); + printk_prefer_direct_enter(); + pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", smp_processor_id(), duration, current->comm, task_pid_nr(current)); @@ -449,6 +451,8 @@ static enum hrtimer_restart watchdog_tim add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); if (softlockup_panic) panic("softlockup: hung tasks"); + + printk_prefer_direct_exit(); } return HRTIMER_RESTART; Index: linux-6.1.90-rt30/kernel/watchdog_hld.c =================================================================== --- linux-6.1.90-rt30.orig/kernel/watchdog_hld.c +++ linux-6.1.90-rt30/kernel/watchdog_hld.c @@ -135,6 +135,8 @@ static void watchdog_overflow_callback(s if (__this_cpu_read(hard_watchdog_warn) == true) return; + printk_prefer_direct_enter(); + pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", this_cpu); print_modules(); @@ -155,6 +157,8 @@ static void watchdog_overflow_callback(s if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); + printk_prefer_direct_exit(); + __this_cpu_write(hard_watchdog_warn, true); return; } Index: linux-6.1.90-rt30/lib/debugobjects.c =================================================================== --- linux-6.1.90-rt30.orig/lib/debugobjects.c +++ linux-6.1.90-rt30/lib/debugobjects.c @@ -600,10 +600,21 @@ static void debug_objects_fill_pool(void { /* * On RT enabled kernels the pool refill must happen in preemptible - * context: + * context -- for !RT kernels we rely on the fact that spinlock_t and + * raw_spinlock_t are basically the same type and this lock-type + * inversion works just fine. */ - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { + /* + * Annotate away the spinlock_t inside raw_spinlock_t warning + * by temporarily raising the wait-type to WAIT_SLEEP, matching + * the preemptible() condition above. + */ + static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); + lock_map_acquire_try(&fill_pool_map); fill_pool(); + lock_map_release(&fill_pool_map); + } } static void Index: linux-6.1.90-rt30/localversion-rt =================================================================== --- /dev/null +++ linux-6.1.90-rt30/localversion-rt @@ -0,0 +1 @@ +-rt34 Index: linux-6.1.90-rt30/mm/page_alloc.c =================================================================== --- linux-6.1.90-rt30.orig/mm/page_alloc.c +++ linux-6.1.90-rt30/mm/page_alloc.c @@ -6590,19 +6590,17 @@ static void __build_all_zonelists(void * unsigned long flags; /* - * Explicitly disable this CPU's interrupts before taking seqlock - * to prevent any IRQ handler from calling into the page allocator - * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock. + * The zonelist_update_seq must be acquired with irqsave because the + * reader can be invoked from IRQ with GFP_ATOMIC. */ - local_irq_save(flags); + write_seqlock_irqsave(&zonelist_update_seq, flags); /* - * Explicitly disable this CPU's synchronous printk() before taking - * seqlock to prevent any printk() from trying to hold port->lock, for + * Also disable synchronous printk() to prevent any printk() from + * trying to hold port->lock, for * tty_insert_flip_string_and_push_buffer() on other CPU might be * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. */ printk_deferred_enter(); - write_seqlock(&zonelist_update_seq); #ifdef CONFIG_NUMA memset(node_load, 0, sizeof(node_load)); @@ -6639,9 +6637,8 @@ static void __build_all_zonelists(void * #endif } - write_sequnlock(&zonelist_update_seq); printk_deferred_exit(); - local_irq_restore(flags); + write_sequnlock_irqrestore(&zonelist_update_seq, flags); } static noinline void __init Index: linux-6.1.90-rt30/net/8021q/vlan_dev.c =================================================================== --- linux-6.1.90-rt30.orig/net/8021q/vlan_dev.c +++ linux-6.1.90-rt30/net/8021q/vlan_dev.c @@ -712,13 +712,13 @@ static void vlan_dev_get_stats64(struct p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); rxpackets = u64_stats_read(&p->rx_packets); rxbytes = u64_stats_read(&p->rx_bytes); rxmulticast = u64_stats_read(&p->rx_multicast); txpackets = u64_stats_read(&p->tx_packets); txbytes = u64_stats_read(&p->tx_bytes); - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rxpackets; stats->rx_bytes += rxbytes; Index: linux-6.1.90-rt30/net/bridge/br_multicast.c =================================================================== --- linux-6.1.90-rt30.orig/net/bridge/br_multicast.c +++ linux-6.1.90-rt30/net/bridge/br_multicast.c @@ -4909,9 +4909,9 @@ void br_multicast_get_stats(const struct unsigned int start; do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); Index: linux-6.1.90-rt30/net/bridge/br_vlan.c =================================================================== --- linux-6.1.90-rt30.orig/net/bridge/br_vlan.c +++ linux-6.1.90-rt30/net/bridge/br_vlan.c @@ -1389,12 +1389,12 @@ void br_vlan_get_stats(const struct net_ cpu_stats = per_cpu_ptr(v->stats, i); do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); rxpackets = u64_stats_read(&cpu_stats->rx_packets); rxbytes = u64_stats_read(&cpu_stats->rx_bytes); txbytes = u64_stats_read(&cpu_stats->tx_bytes); txpackets = u64_stats_read(&cpu_stats->tx_packets); - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); u64_stats_add(&stats->rx_packets, rxpackets); u64_stats_add(&stats->rx_bytes, rxbytes); Index: linux-6.1.90-rt30/net/core/dev.c =================================================================== --- linux-6.1.90-rt30.orig/net/core/dev.c +++ linux-6.1.90-rt30/net/core/dev.c @@ -4621,15 +4621,6 @@ static void rps_trigger_softirq(void *da #endif /* CONFIG_RPS */ -/* Called from hardirq (IPI) context */ -static void trigger_rx_softirq(void *data) -{ - struct softnet_data *sd = data; - - __raise_softirq_irqoff(NET_RX_SOFTIRQ); - smp_store_release(&sd->defer_ipi_scheduled, 0); -} - /* * Check if this softnet_data structure is another cpu one * If yes, queue it to our IPI list and return 1 @@ -6690,6 +6681,30 @@ static void skb_defer_free_flush(struct } } +#ifndef CONFIG_PREEMPT_RT +/* Called from hardirq (IPI) context */ +static void trigger_rx_softirq(void *data) +{ + struct softnet_data *sd = data; + + __raise_softirq_irqoff(NET_RX_SOFTIRQ); + smp_store_release(&sd->defer_ipi_scheduled, 0); +} + +#else + +static void trigger_rx_softirq(struct work_struct *defer_work) +{ + struct softnet_data *sd; + + sd = container_of(defer_work, struct softnet_data, defer_work); + smp_store_release(&sd->defer_ipi_scheduled, 0); + local_bh_disable(); + skb_defer_free_flush(sd); + local_bh_enable(); +} +#endif + static __latent_entropy void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); @@ -10512,12 +10527,12 @@ void dev_fetch_sw_netstats(struct rtnl_l stats = per_cpu_ptr(netstats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); rx_packets = u64_stats_read(&stats->rx_packets); rx_bytes = u64_stats_read(&stats->rx_bytes); tx_packets = u64_stats_read(&stats->tx_packets); tx_bytes = u64_stats_read(&stats->tx_bytes); - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); s->rx_packets += rx_packets; s->rx_bytes += rx_bytes; @@ -11451,7 +11466,11 @@ static int __init net_dev_init(void) INIT_CSD(&sd->csd, rps_trigger_softirq, sd); sd->cpu = i; #endif +#ifndef CONFIG_PREEMPT_RT INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); +#else + INIT_WORK(&sd->defer_work, trigger_rx_softirq); +#endif spin_lock_init(&sd->defer_lock); init_gro_hash(&sd->backlog); Index: linux-6.1.90-rt30/net/core/drop_monitor.c =================================================================== --- linux-6.1.90-rt30.orig/net/core/drop_monitor.c +++ linux-6.1.90-rt30/net/core/drop_monitor.c @@ -1432,9 +1432,9 @@ static void net_dm_stats_read(struct net u64 dropped; do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); dropped = u64_stats_read(&cpu_stats->dropped); - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); u64_stats_add(&stats->dropped, dropped); } @@ -1476,9 +1476,9 @@ static void net_dm_hw_stats_read(struct u64 dropped; do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); dropped = u64_stats_read(&cpu_stats->dropped); - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); u64_stats_add(&stats->dropped, dropped); } Index: linux-6.1.90-rt30/net/core/gen_stats.c =================================================================== --- linux-6.1.90-rt30.orig/net/core/gen_stats.c +++ linux-6.1.90-rt30/net/core/gen_stats.c @@ -135,10 +135,10 @@ static void gnet_stats_add_basic_cpu(str u64 bytes, packets; do { - start = u64_stats_fetch_begin_irq(&bcpu->syncp); + start = u64_stats_fetch_begin(&bcpu->syncp); bytes = u64_stats_read(&bcpu->bytes); packets = u64_stats_read(&bcpu->packets); - } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); + } while (u64_stats_fetch_retry(&bcpu->syncp, start)); t_bytes += bytes; t_packets += packets; @@ -162,10 +162,10 @@ void gnet_stats_add_basic(struct gnet_st } do { if (running) - start = u64_stats_fetch_begin_irq(&b->syncp); + start = u64_stats_fetch_begin(&b->syncp); bytes = u64_stats_read(&b->bytes); packets = u64_stats_read(&b->packets); - } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); + } while (running && u64_stats_fetch_retry(&b->syncp, start)); _bstats_update(bstats, bytes, packets); } @@ -187,10 +187,10 @@ static void gnet_stats_read_basic(u64 *r u64 bytes, packets; do { - start = u64_stats_fetch_begin_irq(&bcpu->syncp); + start = u64_stats_fetch_begin(&bcpu->syncp); bytes = u64_stats_read(&bcpu->bytes); packets = u64_stats_read(&bcpu->packets); - } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); + } while (u64_stats_fetch_retry(&bcpu->syncp, start)); t_bytes += bytes; t_packets += packets; @@ -201,10 +201,10 @@ static void gnet_stats_read_basic(u64 *r } do { if (running) - start = u64_stats_fetch_begin_irq(&b->syncp); + start = u64_stats_fetch_begin(&b->syncp); *ret_bytes = u64_stats_read(&b->bytes); *ret_packets = u64_stats_read(&b->packets); - } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); + } while (running && u64_stats_fetch_retry(&b->syncp, start)); } static int Index: linux-6.1.90-rt30/net/core/skbuff.c =================================================================== --- linux-6.1.90-rt30.orig/net/core/skbuff.c +++ linux-6.1.90-rt30/net/core/skbuff.c @@ -6702,6 +6702,11 @@ nodefer: __kfree_skb(skb); /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU * if we are unlucky enough (this seems very unlikely). */ - if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) + if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { +#ifndef CONFIG_PREEMPT_RT smp_call_function_single_async(cpu, &sd->defer_csd); +#else + schedule_work_on(cpu, &sd->defer_work); +#endif + } } Index: linux-6.1.90-rt30/net/devlink/leftover.c =================================================================== --- linux-6.1.90-rt30.orig/net/devlink/leftover.c +++ linux-6.1.90-rt30/net/devlink/leftover.c @@ -8307,10 +8307,10 @@ static void devlink_trap_stats_read(stru cpu_stats = per_cpu_ptr(trap_stats, i); do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + start = u64_stats_fetch_begin(&cpu_stats->syncp); rx_packets = u64_stats_read(&cpu_stats->rx_packets); rx_bytes = u64_stats_read(&cpu_stats->rx_bytes); - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); u64_stats_add(&stats->rx_packets, rx_packets); u64_stats_add(&stats->rx_bytes, rx_bytes); Index: linux-6.1.90-rt30/net/dsa/slave.c =================================================================== --- linux-6.1.90-rt30.orig/net/dsa/slave.c +++ linux-6.1.90-rt30/net/dsa/slave.c @@ -976,12 +976,12 @@ static void dsa_slave_get_ethtool_stats( s = per_cpu_ptr(dev->tstats, i); do { - start = u64_stats_fetch_begin_irq(&s->syncp); + start = u64_stats_fetch_begin(&s->syncp); tx_packets = u64_stats_read(&s->tx_packets); tx_bytes = u64_stats_read(&s->tx_bytes); rx_packets = u64_stats_read(&s->rx_packets); rx_bytes = u64_stats_read(&s->rx_bytes); - } while (u64_stats_fetch_retry_irq(&s->syncp, start)); + } while (u64_stats_fetch_retry(&s->syncp, start)); data[0] += tx_packets; data[1] += tx_bytes; data[2] += rx_packets; Index: linux-6.1.90-rt30/net/ipv4/af_inet.c =================================================================== --- linux-6.1.90-rt30.orig/net/ipv4/af_inet.c +++ linux-6.1.90-rt30/net/ipv4/af_inet.c @@ -1736,9 +1736,9 @@ u64 snmp_get_cpu_field64(void __percpu * bhptr = per_cpu_ptr(mib, cpu); syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); do { - start = u64_stats_fetch_begin_irq(syncp); + start = u64_stats_fetch_begin(syncp); v = *(((u64 *)bhptr) + offt); - } while (u64_stats_fetch_retry_irq(syncp, start)); + } while (u64_stats_fetch_retry(syncp, start)); return v; } Index: linux-6.1.90-rt30/net/ipv6/seg6_local.c =================================================================== --- linux-6.1.90-rt30.orig/net/ipv6/seg6_local.c +++ linux-6.1.90-rt30/net/ipv6/seg6_local.c @@ -1644,13 +1644,13 @@ static int put_nla_counters(struct sk_bu pcounters = per_cpu_ptr(slwt->pcpu_counters, i); do { - start = u64_stats_fetch_begin_irq(&pcounters->syncp); + start = u64_stats_fetch_begin(&pcounters->syncp); packets = u64_stats_read(&pcounters->packets); bytes = u64_stats_read(&pcounters->bytes); errors = u64_stats_read(&pcounters->errors); - } while (u64_stats_fetch_retry_irq(&pcounters->syncp, start)); + } while (u64_stats_fetch_retry(&pcounters->syncp, start)); counters.packets += packets; counters.bytes += bytes; Index: linux-6.1.90-rt30/net/mac80211/sta_info.c =================================================================== --- linux-6.1.90-rt30.orig/net/mac80211/sta_info.c +++ linux-6.1.90-rt30/net/mac80211/sta_info.c @@ -2402,9 +2402,9 @@ static inline u64 sta_get_tidstats_msdu( u64 value; do { - start = u64_stats_fetch_begin_irq(&rxstats->syncp); + start = u64_stats_fetch_begin(&rxstats->syncp); value = rxstats->msdu[tid]; - } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry(&rxstats->syncp, start)); return value; } @@ -2470,9 +2470,9 @@ static inline u64 sta_get_stats_bytes(st u64 value; do { - start = u64_stats_fetch_begin_irq(&rxstats->syncp); + start = u64_stats_fetch_begin(&rxstats->syncp); value = rxstats->bytes; - } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry(&rxstats->syncp, start)); return value; } Index: linux-6.1.90-rt30/net/mpls/af_mpls.c =================================================================== --- linux-6.1.90-rt30.orig/net/mpls/af_mpls.c +++ linux-6.1.90-rt30/net/mpls/af_mpls.c @@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_d p = per_cpu_ptr(mdev->stats, i); do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); local = p->stats; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += local.rx_packets; stats->rx_bytes += local.rx_bytes; Index: linux-6.1.90-rt30/net/netfilter/ipvs/ip_vs_ctl.c =================================================================== --- linux-6.1.90-rt30.orig/net/netfilter/ipvs/ip_vs_ctl.c +++ linux-6.1.90-rt30/net/netfilter/ipvs/ip_vs_ctl.c @@ -2299,13 +2299,13 @@ static int ip_vs_stats_percpu_show(struc u64 conns, inpkts, outpkts, inbytes, outbytes; do { - start = u64_stats_fetch_begin_irq(&u->syncp); + start = u64_stats_fetch_begin(&u->syncp); conns = u64_stats_read(&u->cnt.conns); inpkts = u64_stats_read(&u->cnt.inpkts); outpkts = u64_stats_read(&u->cnt.outpkts); inbytes = u64_stats_read(&u->cnt.inbytes); outbytes = u64_stats_read(&u->cnt.outbytes); - } while (u64_stats_fetch_retry_irq(&u->syncp, start)); + } while (u64_stats_fetch_retry(&u->syncp, start)); seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n", i, (u64)conns, (u64)inpkts, Index: linux-6.1.90-rt30/net/netfilter/nf_tables_api.c =================================================================== --- linux-6.1.90-rt30.orig/net/netfilter/nf_tables_api.c +++ linux-6.1.90-rt30/net/netfilter/nf_tables_api.c @@ -1713,10 +1713,10 @@ static int nft_dump_stats(struct sk_buff for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); do { - seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + seq = u64_stats_fetch_begin(&cpu_stats->syncp); pkts = cpu_stats->pkts; bytes = cpu_stats->bytes; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); + } while (u64_stats_fetch_retry(&cpu_stats->syncp, seq)); total.pkts += pkts; total.bytes += bytes; } Index: linux-6.1.90-rt30/net/openvswitch/datapath.c =================================================================== --- linux-6.1.90-rt30.orig/net/openvswitch/datapath.c +++ linux-6.1.90-rt30/net/openvswitch/datapath.c @@ -716,9 +716,9 @@ static void get_dp_stats(const struct da percpu_stats = per_cpu_ptr(dp->stats_percpu, i); do { - start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); + start = u64_stats_fetch_begin(&percpu_stats->syncp); local_stats = *percpu_stats; - } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); + } while (u64_stats_fetch_retry(&percpu_stats->syncp, start)); stats->n_hit += local_stats.n_hit; stats->n_missed += local_stats.n_missed; Index: linux-6.1.90-rt30/net/openvswitch/flow_table.c =================================================================== --- linux-6.1.90-rt30.orig/net/openvswitch/flow_table.c +++ linux-6.1.90-rt30/net/openvswitch/flow_table.c @@ -205,9 +205,9 @@ static void tbl_mask_array_reset_counter stats = per_cpu_ptr(ma->masks_usage_stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); counter = stats->usage_cntrs[i]; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); ma->masks_usage_zero_cntr[i] += counter; } @@ -1136,10 +1136,9 @@ void ovs_flow_masks_rebalance(struct flo stats = per_cpu_ptr(ma->masks_usage_stats, cpu); do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin(&stats->syncp); counter = stats->usage_cntrs[i]; - } while (u64_stats_fetch_retry_irq(&stats->syncp, - start)); + } while (u64_stats_fetch_retry(&stats->syncp, start)); masks_and_count[i].counter += counter; }