Subject: arm: Add support for lazy preemption From: Thomas Gleixner <tglx@linutronix.de> Date: Wed Oct 31 12:04:11 2012 +0100 From: Thomas Gleixner <tglx@linutronix.de> Implement the arm pieces for lazy preempt. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- arch/arm/Kconfig | 1 + arch/arm/include/asm/thread_info.h | 6 +++++- arch/arm/kernel/asm-offsets.c | 1 + arch/arm/kernel/entry-armv.S | 19 ++++++++++++++++--- arch/arm/kernel/signal.c | 3 ++- 5 files changed, 25 insertions(+), 5 deletions(-) --- Index: linux-6.3.0-rt11/arch/arm/Kconfig =================================================================== @ linux-6.3.0-rt11/arch/arm/Kconfig:120 @ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ Index: linux-6.3.0-rt11/arch/arm/include/asm/thread_info.h =================================================================== --- linux-6.3.0-rt11.orig/arch/arm/include/asm/thread_info.h +++ linux-6.3.0-rt11/arch/arm/include/asm/thread_info.h @ linux-6.3.0-rt11/arch/arm/Kconfig:65 @ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ __u32 cpu; /* cpu */ __u32 cpu_domain; /* cpu domain */ struct cpu_context_save cpu_context; /* cpu context */ @ linux-6.3.0-rt11/arch/arm/Kconfig:133 @ extern int vfp_restore_user_hwstate(stru #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */ #define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ +#define TIF_NEED_RESCHED_LAZY 5 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @ linux-6.3.0-rt11/arch/arm/Kconfig:153 @ extern int vfp_restore_user_hwstate(stru #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ @ linux-6.3.0-rt11/arch/arm/Kconfig:163 @ extern int vfp_restore_user_hwstate(stru /* * Change these and you break ASM code in entry-common.S */ -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ + _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ _TIF_NOTIFY_SIGNAL) Index: linux-6.3.0-rt11/arch/arm/kernel/asm-offsets.c =================================================================== --- linux-6.3.0-rt11.orig/arch/arm/kernel/asm-offsets.c +++ linux-6.3.0-rt11/arch/arm/kernel/asm-offsets.c @ linux-6.3.0-rt11/arch/arm/Kconfig:46 @ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); Index: linux-6.3.0-rt11/arch/arm/kernel/entry-armv.S =================================================================== --- linux-6.3.0-rt11.orig/arch/arm/kernel/entry-armv.S +++ linux-6.3.0-rt11/arch/arm/kernel/entry-armv.S @ linux-6.3.0-rt11/arch/arm/Kconfig:225 @ __irq_svc: #ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 + bne 1f @ return from exeption + ldr r0, [tsk, #TI_FLAGS] @ get flags + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set + blne svc_preempt @ preempt! + + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r8, #0 @ if preempt lazy count != 0 movne r0, #0 @ force flags to 0 - tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED_LAZY blne svc_preempt +1: #endif svc_exit r5, irq = 1 @ return from exception @ linux-6.3.0-rt11/arch/arm/Kconfig:251 @ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED + bne 1b + tst r0, #_TIF_NEED_RESCHED_LAZY reteq r8 @ go again - b 1b + ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r0, #0 @ if preempt lazy count != 0 + beq 1b + ret r8 @ go again + #endif __und_fault: Index: linux-6.3.0-rt11/arch/arm/kernel/signal.c =================================================================== --- linux-6.3.0-rt11.orig/arch/arm/kernel/signal.c +++ linux-6.3.0-rt11/arch/arm/kernel/signal.c @ linux-6.3.0-rt11/arch/arm/Kconfig:610 @ do_work_pending(struct pt_regs *regs, un */ trace_hardirqs_off(); do { - if (likely(thread_flags & _TIF_NEED_RESCHED)) { + if (likely(thread_flags & (_TIF_NEED_RESCHED | + _TIF_NEED_RESCHED_LAZY))) { schedule(); } else { if (unlikely(!user_mode(regs)))