mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
locking/mutex: Expose __mutex_owner()
Implementing proxy execution requires that scheduler code be able to identify the current owner of a mutex. Expose __mutex_owner() for this purpose (alone!). Includes a null mutex check, so that users of the function can be simplified. [Removed the EXPORT_SYMBOL] [jstultz: Reworked per Peter's suggestions] Signed-off-by: Juri Lelli <juri.lelli@redhat.com> Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Connor O'Brien <connoro@google.com> Signed-off-by: John Stultz <jstultz@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Metin Kaya <metin.kaya@arm.com> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Tested-by: Metin Kaya <metin.kaya@arm.com> Link: https://lore.kernel.org/r/20241009235352.1614323-4-jstultz@google.com
This commit is contained in:
parent
5ec58525a1
commit
3a9320ecb0
@ -56,31 +56,6 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
||||
}
|
||||
EXPORT_SYMBOL(__mutex_init);
|
||||
|
||||
/*
|
||||
* @owner: contains: 'struct task_struct *' to the current lock owner,
|
||||
* NULL means not owned. Since task_struct pointers are aligned at
|
||||
* at least L1_CACHE_BYTES, we have low bits to store extra state.
|
||||
*
|
||||
* Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
|
||||
* Bit1 indicates unlock needs to hand the lock to the top-waiter
|
||||
* Bit2 indicates handoff has been done and we're waiting for pickup.
|
||||
*/
|
||||
#define MUTEX_FLAG_WAITERS 0x01
|
||||
#define MUTEX_FLAG_HANDOFF 0x02
|
||||
#define MUTEX_FLAG_PICKUP 0x04
|
||||
|
||||
#define MUTEX_FLAGS 0x07
|
||||
|
||||
/*
|
||||
* Internal helper function; C doesn't allow us to hide it :/
|
||||
*
|
||||
* DO NOT USE (outside of mutex code).
|
||||
*/
|
||||
static inline struct task_struct *__mutex_owner(struct mutex *lock)
|
||||
{
|
||||
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
|
||||
}
|
||||
|
||||
static inline struct task_struct *__owner_task(unsigned long owner)
|
||||
{
|
||||
return (struct task_struct *)(owner & ~MUTEX_FLAGS);
|
||||
|
@ -20,6 +20,33 @@ struct mutex_waiter {
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* @owner: contains: 'struct task_struct *' to the current lock owner,
|
||||
* NULL means not owned. Since task_struct pointers are aligned at
|
||||
* at least L1_CACHE_BYTES, we have low bits to store extra state.
|
||||
*
|
||||
* Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
|
||||
* Bit1 indicates unlock needs to hand the lock to the top-waiter
|
||||
* Bit2 indicates handoff has been done and we're waiting for pickup.
|
||||
*/
|
||||
#define MUTEX_FLAG_WAITERS 0x01
|
||||
#define MUTEX_FLAG_HANDOFF 0x02
|
||||
#define MUTEX_FLAG_PICKUP 0x04
|
||||
|
||||
#define MUTEX_FLAGS 0x07
|
||||
|
||||
/*
|
||||
* Internal helper function; C doesn't allow us to hide it :/
|
||||
*
|
||||
* DO NOT USE (outside of mutex & scheduler code).
|
||||
*/
|
||||
static inline struct task_struct *__mutex_owner(struct mutex *lock)
|
||||
{
|
||||
if (!lock)
|
||||
return NULL;
|
||||
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
extern void debug_mutex_lock_common(struct mutex *lock,
|
||||
struct mutex_waiter *waiter);
|
||||
|
Loading…
Reference in New Issue
Block a user