From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Mon, 12 Aug 2024 12:39:04 +0200 Subject: [PATCH 3/4] locking/rt: Add sparse annotation for RCU. Every lock, that becomes a sleeping on PREEMPT_RT, starts a RCU read section. There is no sparse annotation for this and sparse complains about unbalanced locking. Add __acquires/ __releases for the RCU lock. This covers all but the trylock functions. I tried the __cond_acquires() annotation but it didn't work. Link: https://lore.kernel.org/r/20240812104200.2239232-4-bigeasy@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- kernel/locking/spinlock_rt.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) @ kernel/locking/spinlock_rt.c:54 @ static __always_inline void __rt_spin_lo migrate_disable(); } -void __sched rt_spin_lock(spinlock_t *lock) +void __sched rt_spin_lock(spinlock_t *lock) __acquires(RCU) { spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); __rt_spin_lock(lock); @ kernel/locking/spinlock_rt.c:78 @ void __sched rt_spin_lock_nest_lock(spin EXPORT_SYMBOL(rt_spin_lock_nest_lock); #endif -void __sched rt_spin_unlock(spinlock_t *lock) +void __sched rt_spin_unlock(spinlock_t *lock) __releases(RCU) { spin_release(&lock->dep_map, _RET_IP_); migrate_enable(); @ kernel/locking/spinlock_rt.c:228 @ int __sched rt_write_trylock(rwlock_t *r } EXPORT_SYMBOL(rt_write_trylock); -void __sched rt_read_lock(rwlock_t *rwlock) +void __sched rt_read_lock(rwlock_t *rwlock) __acquires(RCU) { rtlock_might_resched(); rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); @ kernel/locking/spinlock_rt.c:238 @ void __sched rt_read_lock(rwlock_t *rwlo } EXPORT_SYMBOL(rt_read_lock); -void __sched rt_write_lock(rwlock_t *rwlock) +void __sched rt_write_lock(rwlock_t *rwlock) __acquires(RCU) { rtlock_might_resched(); rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); @ kernel/locking/spinlock_rt.c:249 @ void __sched rt_write_lock(rwlock_t *rwl EXPORT_SYMBOL(rt_write_lock); #ifdef CONFIG_DEBUG_LOCK_ALLOC -void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) +void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(RCU) { rtlock_might_resched(); rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_); @ kernel/locking/spinlock_rt.c:260 @ void __sched rt_write_lock_nested(rwlock EXPORT_SYMBOL(rt_write_lock_nested); #endif -void __sched rt_read_unlock(rwlock_t *rwlock) +void __sched rt_read_unlock(rwlock_t *rwlock) __releases(RCU) { rwlock_release(&rwlock->dep_map, _RET_IP_); migrate_enable(); @ kernel/locking/spinlock_rt.c:269 @ void __sched rt_read_unlock(rwlock_t *rw } EXPORT_SYMBOL(rt_read_unlock); -void __sched rt_write_unlock(rwlock_t *rwlock) +void __sched rt_write_unlock(rwlock_t *rwlock) __releases(RCU) { rwlock_release(&rwlock->dep_map, _RET_IP_); rcu_read_unlock();