152c8e07fSIngo Weinhold/*
24e08fb85SIngo Weinhold * Copyright 2002-2010, Haiku Inc. All Rights Reserved.
352c8e07fSIngo Weinhold * Distributed under the terms of the MIT license.
452c8e07fSIngo Weinhold *
552c8e07fSIngo Weinhold * Authors:
652c8e07fSIngo Weinhold *		Ingo Weinhold, bonefish@cs.tu-berlin.de.
752c8e07fSIngo Weinhold *		Axel D��rfler, axeld@pinc-software.de.
852c8e07fSIngo Weinhold */
952c8e07fSIngo Weinhold
1052c8e07fSIngo Weinhold#include <lock.h>
1152c8e07fSIngo Weinhold
125c20c5a5SIngo Weinhold#include <stdlib.h>
135c20c5a5SIngo Weinhold#include <string.h>
1452c8e07fSIngo Weinhold
155c20c5a5SIngo Weinhold#include <AutoLocker.h>
165c20c5a5SIngo Weinhold
175c20c5a5SIngo Weinhold// libroot
185c20c5a5SIngo Weinhold#include <user_thread.h>
195c20c5a5SIngo Weinhold
205c20c5a5SIngo Weinhold// system
215c20c5a5SIngo Weinhold#include <syscalls.h>
225c20c5a5SIngo Weinhold#include <user_thread_defs.h>
235c20c5a5SIngo Weinhold
245c20c5a5SIngo Weinhold#include "thread.h"
255c20c5a5SIngo Weinhold
265c20c5a5SIngo Weinhold
275c20c5a5SIngo Weinholdstruct mutex_waiter {
284535495dSIngo Weinhold	Thread*			thread;
295c20c5a5SIngo Weinhold	mutex_waiter*	next;		// next in queue
305c20c5a5SIngo Weinhold	mutex_waiter*	last;		// last in queue (valid for the first in queue)
315c20c5a5SIngo Weinhold};
325c20c5a5SIngo Weinhold
335c20c5a5SIngo Weinholdstruct rw_lock_waiter {
344535495dSIngo Weinhold	Thread*			thread;
355c20c5a5SIngo Weinhold	rw_lock_waiter*	next;		// next in queue
365c20c5a5SIngo Weinhold	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
375c20c5a5SIngo Weinhold	bool			writer;
385c20c5a5SIngo Weinhold};
395c20c5a5SIngo Weinhold
415c20c5a5SIngo Weinhold#define MUTEX_FLAG_RELEASED		0x2
425c20c5a5SIngo Weinhold
445c20c5a5SIngo Weinhold
455c20c5a5SIngo Weinhold
46893367cfSMichael Lotzstatic void _rw_lock_read_unlock_threads_locked(rw_lock* lock);
47893367cfSMichael Lotzstatic void _rw_lock_write_unlock_threads_locked(rw_lock* lock);
48893367cfSMichael Lotz
49604770b3SJulian Harnathstatic status_t _mutex_lock_threads_locked(mutex* lock);
50604770b3SJulian Harnathstatic void _mutex_unlock_threads_locked(mutex* lock);
51604770b3SJulian Harnath
52604770b3SJulian Harnath
535c20c5a5SIngo Weinhold/*!	Helper class playing the role of the kernel's thread spinlock. We don't use
545c20c5a5SIngo Weinhold	as spinlock as that could be expensive in userland (due to spinlock holder
555c20c5a5SIngo Weinhold	potentially being unscheduled), but a benaphore.
565c20c5a5SIngo Weinhold*/
575c20c5a5SIngo Weinholdstruct ThreadSpinlock {
585c20c5a5SIngo Weinhold	ThreadSpinlock()
595c20c5a5SIngo Weinhold		:
605c20c5a5SIngo Weinhold		fCount(1),
615c20c5a5SIngo Weinhold		fSemaphore(create_sem(0, "thread spinlock"))
625c20c5a5SIngo Weinhold	{
635c20c5a5SIngo Weinhold		if (fSemaphore < 0)
645c20c5a5SIngo Weinhold			panic("Failed to create thread spinlock semaphore!");
655c20c5a5SIngo Weinhold	}
665c20c5a5SIngo Weinhold
675c20c5a5SIngo Weinhold	~ThreadSpinlock()
685c20c5a5SIngo Weinhold	{
695c20c5a5SIngo Weinhold		if (fSemaphore >= 0)
705c20c5a5SIngo Weinhold			delete_sem(fSemaphore);
715c20c5a5SIngo Weinhold	}
725c20c5a5SIngo Weinhold
735c20c5a5SIngo Weinhold	bool Lock()
745c20c5a5SIngo Weinhold	{
755c20c5a5SIngo Weinhold		if (atomic_add(&fCount, -1) > 0)
765c20c5a5SIngo Weinhold			return true;
775c20c5a5SIngo Weinhold
785c20c5a5SIngo Weinhold		status_t error;
795c20c5a5SIngo Weinhold		do {
805c20c5a5SIngo Weinhold			error = acquire_sem(fSemaphore);
815c20c5a5SIngo Weinhold		} while (error == B_INTERRUPTED);
825c20c5a5SIngo Weinhold
835c20c5a5SIngo Weinhold		return error == B_OK;
845c20c5a5SIngo Weinhold	}
855c20c5a5SIngo Weinhold
865c20c5a5SIngo Weinhold	void Unlock()
875c20c5a5SIngo Weinhold	{
885c20c5a5SIngo Weinhold		if (atomic_add(&fCount, 1) < 0)
895c20c5a5SIngo Weinhold			release_sem(fSemaphore);
905c20c5a5SIngo Weinhold	}
915c20c5a5SIngo Weinhold
925c20c5a5SIngo Weinholdprivate:
93604770b3SJulian Harnath	int32	fCount;
945c20c5a5SIngo Weinhold	sem_id	fSemaphore;
955c20c5a5SIngo Weinhold};
965c20c5a5SIngo Weinhold
975c20c5a5SIngo Weinholdstatic ThreadSpinlock sThreadSpinlock;
985c20c5a5SIngo Weinhold
995c20c5a5SIngo Weinhold
1005c20c5a5SIngo Weinhold// #pragma mark -
10152c8e07fSIngo Weinhold
10252c8e07fSIngo Weinhold
10352c8e07fSIngo Weinholdint32
10452c8e07fSIngo Weinholdrecursive_lock_get_recursion(recursive_lock *lock)
10552c8e07fSIngo Weinhold{
10651ecdb00SIngo Weinhold	if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
10752c8e07fSIngo Weinhold		return lock->recursion;
10852c8e07fSIngo Weinhold
10952c8e07fSIngo Weinhold	return -1;
11052c8e07fSIngo Weinhold}
11152c8e07fSIngo Weinhold
11252c8e07fSIngo Weinhold
11352c8e07fSIngo Weinholdvoid
11451ecdb00SIngo Weinholdrecursive_lock_init(recursive_lock *lock, const char *name)
11552c8e07fSIngo Weinhold{
11651ecdb00SIngo Weinhold	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
11751ecdb00SIngo Weinhold	RECURSIVE_LOCK_HOLDER(lock) = -1;
11852c8e07fSIngo Weinhold	lock->recursion = 0;
11952c8e07fSIngo Weinhold}
12052c8e07fSIngo Weinhold
12152c8e07fSIngo Weinhold
12252c8e07fSIngo Weinholdvoid
12351ecdb00SIngo Weinholdrecursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
12452c8e07fSIngo Weinhold{
12551ecdb00SIngo Weinhold	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
12651ecdb00SIngo Weinhold	RECURSIVE_LOCK_HOLDER(lock) = -1;
12751ecdb00SIngo Weinhold	lock->recursion = 0;
12852c8e07fSIngo Weinhold}
12952c8e07fSIngo Weinhold
13052c8e07fSIngo Weinhold
13152c8e07fSIngo Weinholdvoid
13252c8e07fSIngo Weinholdrecursive_lock_destroy(recursive_lock *lock)
13352c8e07fSIngo Weinhold{
13452c8e07fSIngo Weinhold	if (lock == NULL)
13552c8e07fSIngo Weinhold		return;
13652c8e07fSIngo Weinhold
13751ecdb00SIngo Weinhold	mutex_destroy(&lock->lock);
13852c8e07fSIngo Weinhold}
13952c8e07fSIngo Weinhold
14052c8e07fSIngo Weinhold
14152c8e07fSIngo Weinholdstatus_t
14252c8e07fSIngo Weinholdrecursive_lock_lock(recursive_lock *lock)
14352c8e07fSIngo Weinhold{
14452c8e07fSIngo Weinhold	thread_id thread = find_thread(NULL);
14552c8e07fSIngo Weinhold
14651ecdb00SIngo Weinhold	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
14751ecdb00SIngo Weinhold		mutex_lock(&lock->lock);
14852c8e07fSIngo Weinhold#if !KDEBUG
14952c8e07fSIngo Weinhold		lock->holder = thread;
15051ecdb00SIngo Weinhold#endif
15152c8e07fSIngo Weinhold	}
15251ecdb00SIngo Weinhold
15351ecdb00SIngo Weinhold	lock->recursion++;
15451ecdb00SIngo Weinhold	return B_OK;
15551ecdb00SIngo Weinhold}
15651ecdb00SIngo Weinhold
15751ecdb00SIngo Weinhold
15851ecdb00SIngo Weinholdstatus_t
15951ecdb00SIngo Weinholdrecursive_lock_trylock(recursive_lock *lock)
16051ecdb00SIngo Weinhold{
16151ecdb00SIngo Weinhold	thread_id thread = find_thread(NULL);
16251ecdb00SIngo Weinhold
16351ecdb00SIngo Weinhold	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
16451ecdb00SIngo Weinhold		status_t status = mutex_trylock(&lock->lock);
16551ecdb00SIngo Weinhold		if (status != B_OK)
16652c8e07fSIngo Weinhold			return status;
16752c8e07fSIngo Weinhold
16851ecdb00SIngo Weinhold#if !KDEBUG
16951ecdb00SIngo Weinhold		lock->holder = thread;
17052c8e07fSIngo Weinhold#endif
17151ecdb00SIngo Weinhold	}
17251ecdb00SIngo Weinhold
17352c8e07fSIngo Weinhold	lock->recursion++;
17452c8e07fSIngo Weinhold	return B_OK;
17552c8e07fSIngo Weinhold}
17652c8e07fSIngo Weinhold
17752c8e07fSIngo Weinhold
17852c8e07fSIngo Weinholdvoid
17952c8e07fSIngo Weinholdrecursive_lock_unlock(recursive_lock *lock)
18052c8e07fSIngo Weinhold{
18151ecdb00SIngo Weinhold	if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
18252c8e07fSIngo Weinhold		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
18352c8e07fSIngo Weinhold
18452c8e07fSIngo Weinhold	if (--lock->recursion == 0) {
18552c8e07fSIngo Weinhold#if !KDEBUG
18652c8e07fSIngo Weinhold		lock->holder = -1;
18752c8e07fSIngo Weinhold#endif
18851ecdb00SIngo Weinhold		mutex_unlock(&lock->lock);
18952c8e07fSIngo Weinhold	}
19052c8e07fSIngo Weinhold}
19152c8e07fSIngo Weinhold
19252c8e07fSIngo Weinhold
19352c8e07fSIngo Weinhold//	#pragma mark -
19452c8e07fSIngo Weinhold
19552c8e07fSIngo Weinhold
1965c20c5a5SIngo Weinholdstatic status_t
1975c20c5a5SIngo Weinholdrw_lock_wait(rw_lock* lock, bool writer)
19852c8e07fSIngo Weinhold{
1995c20c5a5SIngo Weinhold	// enqueue in waiter list
2005c20c5a5SIngo Weinhold	rw_lock_waiter waiter;
2015c20c5a5SIngo Weinhold	waiter.thread = get_current_thread();
2025c20c5a5SIngo Weinhold	waiter.next = NULL;
2035c20c5a5SIngo Weinhold	waiter.writer = writer;
2045c20c5a5SIngo Weinhold
2055c20c5a5SIngo Weinhold	if (lock->waiters != NULL)
2065c20c5a5SIngo Weinhold		lock->waiters->last->next = &waiter;
2075c20c5a5SIngo Weinhold	else
2085c20c5a5SIngo Weinhold		lock->waiters = &waiter;
2095c20c5a5SIngo Weinhold
2105c20c5a5SIngo Weinhold	lock->waiters->last = &waiter;
2115c20c5a5SIngo Weinhold
2125c20c5a5SIngo Weinhold	// block
2135c20c5a5SIngo Weinhold	get_user_thread()->wait_status = 1;
2145c20c5a5SIngo Weinhold	sThreadSpinlock.Unlock();
2155c20c5a5SIngo Weinhold
2165c20c5a5SIngo Weinhold	status_t error;
2175c20c5a5SIngo Weinhold	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
2185c20c5a5SIngo Weinhold	}
2195c20c5a5SIngo Weinhold
2205c20c5a5SIngo Weinhold	sThreadSpinlock.Lock();
2215c20c5a5SIngo Weinhold	return error;
2225c20c5a5SIngo Weinhold}
2235c20c5a5SIngo Weinhold
2245c20c5a5SIngo Weinhold
2252ea2527fSIngo Weinholdstatic int32
2265c20c5a5SIngo Weinholdrw_lock_unblock(rw_lock* lock)
2275c20c5a5SIngo Weinhold{
2285c20c5a5SIngo Weinhold	// Check whether there any waiting threads at all and whether anyone
2295c20c5a5SIngo Weinhold	// has the write lock.
2305c20c5a5SIngo Weinhold	rw_lock_waiter* waiter = lock->waiters;
2315c20c5a5SIngo Weinhold	if (waiter == NULL || lock->holder > 0)
2322ea2527fSIngo Weinhold		return 0;
23352c8e07fSIngo Weinhold
2345c20c5a5SIngo Weinhold	// writer at head of queue?
2355c20c5a5SIngo Weinhold	if (waiter->writer) {
2362ea2527fSIngo Weinhold		if (lock->active_readers > 0 || lock->pending_readers > 0)
2372ea2527fSIngo Weinhold			return 0;
2382ea2527fSIngo Weinhold
2392ea2527fSIngo Weinhold		// dequeue writer
2402ea2527fSIngo Weinhold		lock->waiters = waiter->next;
2412ea2527fSIngo Weinhold		if (lock->waiters != NULL)
2422ea2527fSIngo Weinhold			lock->waiters->last = waiter->last;
2435c20c5a5SIngo Weinhold
2442ea2527fSIngo Weinhold		lock->holder = get_thread_id(waiter->thread);
2455c20c5a5SIngo Weinhold
2462ea2527fSIngo Weinhold		// unblock thread
2472ea2527fSIngo Weinhold		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
2484e08fb85SIngo Weinhold		waiter->thread = NULL;
2492ea2527fSIngo Weinhold		return RW_LOCK_WRITER_COUNT_BASE;
2505c20c5a5SIngo Weinhold	}
25152c8e07fSIngo Weinhold
2525c20c5a5SIngo Weinhold	// wake up one or more readers
2532ea2527fSIngo Weinhold	uint32 readerCount = 0;
2542ea2527fSIngo Weinhold	do {
2555c20c5a5SIngo Weinhold		// dequeue reader
2565c20c5a5SIngo Weinhold		lock->waiters = waiter->next;
2575c20c5a5SIngo Weinhold		if (lock->waiters != NULL)
2585c20c5a5SIngo Weinhold			lock->waiters->last = waiter->last;
2595c20c5a5SIngo Weinhold
2602ea2527fSIngo Weinhold		readerCount++;
2615c20c5a5SIngo Weinhold
2625c20c5a5SIngo Weinhold		// unblock thread
2635c20c5a5SIngo Weinhold		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
2644e08fb85SIngo Weinhold		waiter->thread = NULL;
2652ea2527fSIngo Weinhold	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
2662ea2527fSIngo Weinhold
2672ea2527fSIngo Weinhold	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
2682ea2527fSIngo Weinhold		lock->active_readers += readerCount;
2692ea2527fSIngo Weinhold
2702ea2527fSIngo Weinhold	return readerCount;
27152c8e07fSIngo Weinhold}
27252c8e07fSIngo Weinhold
27352c8e07fSIngo Weinhold
27452c8e07fSIngo Weinholdvoid
2755c20c5a5SIngo Weinholdrw_lock_init(rw_lock* lock, const char* name)
27652c8e07fSIngo Weinhold{
2775c20c5a5SIngo Weinhold	lock->name = name;
2785c20c5a5SIngo Weinhold	lock->waiters = NULL;
2795c20c5a5SIngo Weinhold	lock->holder = -1;
2802ea2527fSIngo Weinhold	lock->count = 0;
2815c20c5a5SIngo Weinhold	lock->owner_count = 0;
2822ea2527fSIngo Weinhold	lock->active_readers = 0;
2832ea2527fSIngo Weinhold	lock->pending_readers = 0;
2845c20c5a5SIngo Weinhold	lock->flags = 0;
2855c20c5a5SIngo Weinhold}
28652c8e07fSIngo Weinhold
28752c8e07fSIngo Weinhold
2885c20c5a5SIngo Weinholdvoid
2895c20c5a5SIngo Weinholdrw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
2905c20c5a5SIngo Weinhold{
2915c20c5a5SIngo Weinhold	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
2925c20c5a5SIngo Weinhold	lock->waiters = NULL;
2935c20c5a5SIngo Weinhold	lock->holder = -1;
2942ea2527fSIngo Weinhold	lock->count = 0;
2955c20c5a5SIngo Weinhold	lock->owner_count = 0;
2962ea2527fSIngo Weinhold	lock->active_readers = 0;
2972ea2527fSIngo Weinhold	lock->pending_readers = 0;
2985c20c5a5SIngo Weinhold	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
29952c8e07fSIngo Weinhold}
30052c8e07fSIngo Weinhold
30152c8e07fSIngo Weinhold
30252c8e07fSIngo Weinholdvoid
3035c20c5a5SIngo Weinholdrw_lock_destroy(rw_lock* lock)
30452c8e07fSIngo Weinhold{
3055c20c5a5SIngo Weinhold	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
3065c20c5a5SIngo Weinhold		? (char*)lock->name : NULL;
30752c8e07fSIngo Weinhold
3085c20c5a5SIngo Weinhold	// unblock all waiters
3095c20c5a5SIngo Weinhold	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
3105c20c5a5SIngo Weinhold
3115c20c5a5SIngo Weinhold#if KDEBUG
3125c20c5a5SIngo Weinhold	if (lock->waiters != NULL && find_thread(NULL)
3132ea2527fSIngo Weinhold			!= lock->holder) {
3145c20c5a5SIngo Weinhold		panic("rw_lock_destroy(): there are blocking threads, but the caller "
3155c20c5a5SIngo Weinhold			"doesn't hold the write lock (%p)", lock);
3165c20c5a5SIngo Weinhold
3175c20c5a5SIngo Weinhold		locker.Unlock();
3185c20c5a5SIngo Weinhold		if (rw_lock_write_lock(lock) != B_OK)
3195c20c5a5SIngo Weinhold			return;
3205c20c5a5SIngo Weinhold		locker.Lock();
32152c8e07fSIngo Weinhold	}
3225c20c5a5SIngo Weinhold#endif
3235c20c5a5SIngo Weinhold
3245c20c5a5SIngo Weinhold	while (rw_lock_waiter* waiter = lock->waiters) {
3255c20c5a5SIngo Weinhold		// dequeue
3265c20c5a5SIngo Weinhold		lock->waiters = waiter->next;
3275c20c5a5SIngo Weinhold
3285c20c5a5SIngo Weinhold		// unblock thread
3295c20c5a5SIngo Weinhold		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
3305c20c5a5SIngo Weinhold	}
3315c20c5a5SIngo Weinhold
3325c20c5a5SIngo Weinhold	lock->name = NULL;
3335c20c5a5SIngo Weinhold
3345c20c5a5SIngo Weinhold	locker.Unlock();
3355c20c5a5SIngo Weinhold
3365c20c5a5SIngo Weinhold	free(name);
33752c8e07fSIngo Weinhold}
33852c8e07fSIngo Weinhold
33952c8e07fSIngo Weinhold
3402ea2527fSIngo Weinhold#if !KDEBUG_RW_LOCK_DEBUG
3412ea2527fSIngo Weinhold
34252c8e07fSIngo Weinholdstatus_t
3432ea2527fSIngo Weinhold_rw_lock_read_lock(rw_lock* lock)
34452c8e07fSIngo Weinhold{
3455c20c5a5SIngo Weinhold	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
3465c20c5a5SIngo Weinhold
3472ea2527fSIngo Weinhold	// We might be the writer ourselves.
3485c20c5a5SIngo Weinhold	if (lock->holder == find_thread(NULL)) {
3495c20c5a5SIngo Weinhold		lock->owner_count++;
3505c20c5a5SIngo Weinhold		return B_OK;
3515c20c5a5SIngo Weinhold	}
3524a92b613SIngo Weinhold
3532ea2527fSIngo Weinhold	// The writer that originally had the lock when we called atomic_add() might
3542ea2527fSIngo Weinhold	// already have gone and another writer could have overtaken us. In this
3552ea2527fSIngo Weinhold	// case the original writer set pending_readers, so we know that we don't
3562ea2527fSIngo Weinhold	// have to wait.
3572ea2527fSIngo Weinhold	if (lock->pending_readers > 0) {
3582ea2527fSIngo Weinhold		lock->pending_readers--;
3592ea2527fSIngo Weinhold
3602ea2527fSIngo Weinhold		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
3612ea2527fSIngo Weinhold			lock->active_readers++;
3622ea2527fSIngo Weinhold
3632ea2527fSIngo Weinhold		return B_OK;
3642ea2527fSIngo Weinhold	}
3652ea2527fSIngo Weinhold
3662ea2527fSIngo Weinhold	// we need to wait
3675c20c5a5SIngo Weinhold	return rw_lock_wait(lock, false);
36852c8e07fSIngo Weinhold}
36952c8e07fSIngo Weinhold
37052c8e07fSIngo Weinhold
3714e08fb85SIngo Weinholdstatus_t
3724e08fb85SIngo Weinhold_rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
3734e08fb85SIngo Weinhold	bigtime_t timeout)
3744e08fb85SIngo Weinhold{
3754e08fb85SIngo Weinhold	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
3764e08fb85SIngo Weinhold
3774e08fb85SIngo Weinhold	// We might be the writer ourselves.
3784e08fb85SIngo Weinhold	if (lock->holder == find_thread(NULL)) {
3794e08fb85SIngo Weinhold		lock->owner_count++;
3804e08fb85SIngo Weinhold		return B_OK;
3814e08fb85SIngo Weinhold	}
3824e08fb85SIngo Weinhold
3834e08fb85SIngo Weinhold	// The writer that originally had the lock when we called atomic_add() might
3844e08fb85SIngo Weinhold	// already have gone and another writer could have overtaken us. In this
3854e08fb85SIngo Weinhold	// case the original writer set pending_readers, so we know that we don't
3864e08fb85SIngo Weinhold	// have to wait.
3874e08fb85SIngo Weinhold	if (lock->pending_readers > 0) {
3884e08fb85SIngo Weinhold		lock->pending_readers--;
3894e08fb85SIngo Weinhold
3904e08fb85SIngo Weinhold		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
3914e08fb85SIngo Weinhold			lock->active_readers++;
3924e08fb85SIngo Weinhold
3934e08fb85SIngo Weinhold		return B_OK;
3944e08fb85SIngo Weinhold	}
3954e08fb85SIngo Weinhold
3964e08fb85SIngo Weinhold	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
3974e08fb85SIngo Weinhold
3984e08fb85SIngo Weinhold	// we need to wait
3994e08fb85SIngo Weinhold
4004e08fb85SIngo Weinhold	// enqueue in waiter list
4014e08fb85SIngo Weinhold	rw_lock_waiter waiter;
4024e08fb85SIngo Weinhold	waiter.thread = get_current_thread();
4034e08fb85SIngo Weinhold	waiter.next = NULL;
4044e08fb85SIngo Weinhold	waiter.writer = false;
4054e08fb85SIngo Weinhold
4064e08fb85SIngo Weinhold	if (lock->waiters != NULL)
4074e08fb85SIngo Weinhold		lock->waiters->last->next = &waiter;
4084e08fb85SIngo Weinhold	else
4094e08fb85SIngo Weinhold		lock->waiters = &waiter;
4104e08fb85SIngo Weinhold
4114e08fb85SIngo Weinhold	lock->waiters->last = &waiter;
4124e08fb85SIngo Weinhold
4134e08fb85SIngo Weinhold	// block
4144e08fb85SIngo Weinhold	get_user_thread()->wait_status = 1;
4154e08fb85SIngo Weinhold	sThreadSpinlock.Unlock();
4164e08fb85SIngo Weinhold
4174e08fb85SIngo Weinhold	status_t error;
4184e08fb85SIngo Weinhold	while ((error = _kern_block_thread(timeoutFlags, timeout))
4194e08fb85SIngo Weinhold			== B_INTERRUPTED) {
4204e08fb85SIngo Weinhold	}
4214e08fb85SIngo Weinhold
4224e08fb85SIngo Weinhold	sThreadSpinlock.Lock();
4234e08fb85SIngo Weinhold
4244e08fb85SIngo Weinhold	if (error == B_OK || waiter.thread == NULL) {
4254e08fb85SIngo Weinhold		// We were unblocked successfully -- potentially our unblocker overtook
4264e08fb85SIngo Weinhold		// us after we already failed. In either case, we've got the lock, now.
4274e08fb85SIngo Weinhold		return B_OK;
4284e08fb85SIngo Weinhold	}
4294e08fb85SIngo Weinhold
4304e08fb85SIngo Weinhold	// We failed to get the lock -- dequeue from waiter list.
4314e08fb85SIngo Weinhold	rw_lock_waiter* previous = NULL;
4324e08fb85SIngo Weinhold	rw_lock_waiter* other = lock->waiters;
4334e08fb85SIngo Weinhold	while (other != &waiter) {
4344e08fb85SIngo Weinhold		previous = other;
4354e08fb85SIngo Weinhold		other = other->next;
4364e08fb85SIngo Weinhold	}
4374e08fb85SIngo Weinhold
4384e08fb85SIngo Weinhold	if (previous == NULL) {
4394e08fb85SIngo Weinhold		// we are the first in line
4404e08fb85SIngo Weinhold		lock->waiters = waiter.next;
4414e08fb85SIngo Weinhold		if (lock->waiters != NULL)
4424e08fb85SIngo Weinhold			lock->waiters->last = waiter.last;
4434e08fb85SIngo Weinhold	} else {
4444e08fb85SIngo Weinhold		// one or more other waiters are before us in the queue
4454e08fb85SIngo Weinhold		previous->next = waiter.next;
4464e08fb85SIngo Weinhold		if (lock->waiters->last == &waiter)
4474e08fb85SIngo Weinhold			lock->waiters->last = previous;
4484e08fb85SIngo Weinhold	}
4494e08fb85SIngo Weinhold
4504e08fb85SIngo Weinhold	// Decrement the count. ATM this is all we have to do. There's at least
4514e08fb85SIngo Weinhold	// one writer ahead of us -- otherwise the last writer would have unblocked
4524e08fb85SIngo Weinhold	// us (writers only manipulate the lock data with thread spinlock being
4534e08fb85SIngo Weinhold	// held) -- so our leaving doesn't make a difference to the ones behind us
4544e08fb85SIngo Weinhold	// in the queue.
4554e08fb85SIngo Weinhold	atomic_add(&lock->count, -1);
4564e08fb85SIngo Weinhold
4574e08fb85SIngo Weinhold	return error;
4584e08fb85SIngo Weinhold}
4594e08fb85SIngo Weinhold
4604e08fb85SIngo Weinhold
4612ea2527fSIngo Weinholdvoid
462893367cfSMichael Lotz_rw_lock_read_unlock(rw_lock* lock)
46352c8e07fSIngo Weinhold{
464893367cfSMichael Lotz	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
465893367cfSMichael Lotz	_rw_lock_read_unlock_threads_locked(lock);
466893367cfSMichael Lotz}
4675c20c5a5SIngo Weinhold
468893367cfSMichael Lotz
469893367cfSMichael Lotzstatic void
470893367cfSMichael Lotz_rw_lock_read_unlock_threads_locked(rw_lock* lock)
471893367cfSMichael Lotz{
4722ea2527fSIngo Weinhold	// If we're still holding the write lock or if there are other readers,
4732ea2527fSIngo Weinhold	// no-one can be woken up.
4745c20c5a5SIngo Weinhold	if (lock->holder == find_thread(NULL)) {
4752ea2527fSIngo Weinhold		lock->owner_count--;
4762ea2527fSIngo Weinhold		return;
4775c20c5a5SIngo Weinhold	}
4785c20c5a5SIngo Weinhold
4792ea2527fSIngo Weinhold	if (--lock->active_readers > 0)
4802ea2527fSIngo Weinhold		return;
48152c8e07fSIngo Weinhold
4825d4501aaSMichael Lotz	if (lock->active_readers < 0) {
4835d4501aaSMichael Lotz		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
4842ea2527fSIngo Weinhold		lock->active_readers = 0;
4855d4501aaSMichael Lotz		return;
4865d4501aaSMichael Lotz	}
48752c8e07fSIngo Weinhold
4885c20c5a5SIngo Weinhold	rw_lock_unblock(lock);
4895c20c5a5SIngo Weinhold}
4905c20c5a5SIngo Weinhold
4912ea2527fSIngo Weinhold#endif	// !KDEBUG_RW_LOCK_DEBUG
4922ea2527fSIngo Weinhold
4935c20c5a5SIngo Weinhold
4945c20c5a5SIngo Weinholdstatus_t
4955c20c5a5SIngo Weinholdrw_lock_write_lock(rw_lock* lock)
4965c20c5a5SIngo Weinhold{
4975c20c5a5SIngo Weinhold	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
4985c20c5a5SIngo Weinhold
4992ea2527fSIngo Weinhold	// If we're already the lock holder, we just need to increment the owner
5002ea2527fSIngo Weinhold	// count.
5012ea2527fSIngo Weinhold	thread_id thread = find_thread(NULL);
5022ea2527fSIngo Weinhold	if (lock->holder == thread) {
5032ea2527fSIngo Weinhold		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
5045c20c5a5SIngo Weinhold		return B_OK;
5055c20c5a5SIngo Weinhold	}
5062ea2527fSIngo Weinhold
5072ea2527fSIngo Weinhold	// announce our claim
5082ea2527fSIngo Weinhold	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
5092ea2527fSIngo Weinhold
5102ea2527fSIngo Weinhold	if (oldCount == 0) {
5112ea2527fSIngo Weinhold		// No-one else held a read or write lock, so it's ours now.
5122ea2527fSIngo Weinhold		lock->holder = thread;
5132ea2527fSIngo Weinhold		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
5145c20c5a5SIngo Weinhold		return B_OK;
5155c20c5a5SIngo Weinhold	}
5165c20c5a5SIngo Weinhold
5172ea2527fSIngo Weinhold	// We have to wait. If we're the first writer, note the current reader
5182ea2527fSIngo Weinhold	// count.
5192ea2527fSIngo Weinhold	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
5202ea2527fSIngo Weinhold		lock->active_readers = oldCount - lock->pending_readers;
5215c20c5a5SIngo Weinhold
5225c20c5a5SIngo Weinhold	status_t status = rw_lock_wait(lock, true);
5235c20c5a5SIngo Weinhold	if (status == B_OK) {
5242ea2527fSIngo Weinhold		lock->holder = thread;
5252ea2527fSIngo Weinhold		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
5265c20c5a5SIngo Weinhold	}
5272ea2527fSIngo Weinhold
52852c8e07fSIngo Weinhold	return status;
52952c8e07fSIngo Weinhold}
53052c8e07fSIngo Weinhold
53152c8e07fSIngo Weinhold
5322ea2527fSIngo Weinholdvoid
533893367cfSMichael Lotz_rw_lock_write_unlock(rw_lock* lock)
53452c8e07fSIngo Weinhold{
535893367cfSMichael Lotz	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
536893367cfSMichael Lotz	_rw_lock_write_unlock_threads_locked(lock);
537893367cfSMichael Lotz}
5385c20c5a5SIngo Weinhold
539893367cfSMichael Lotz
540893367cfSMichael Lotzstatic void
541893367cfSMichael Lotz_rw_lock_write_unlock_threads_locked(rw_lock* lock)
542893367cfSMichael Lotz{
5435c20c5a5SIngo Weinhold	if (find_thread(NULL) != lock->holder) {
5445c20c5a5SIngo Weinhold		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
5455c20c5a5SIngo Weinhold			lock);
5462ea2527fSIngo Weinhold		return;
5475c20c5a5SIngo Weinhold	}
5485c20c5a5SIngo Weinhold
5492ea2527fSIngo Weinhold	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
5502ea2527fSIngo Weinhold	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
5512ea2527fSIngo Weinhold		return;
5525c20c5a5SIngo Weinhold
5532ea2527fSIngo Weinhold	// We gave up our last write lock -- clean up and unblock waiters.
5542ea2527fSIngo Weinhold	int32 readerCount = lock->owner_count;
5552ea2527fSIngo Weinhold	lock->holder = -1;
5562ea2527fSIngo Weinhold	lock->owner_count = 0;
5575c20c5a5SIngo Weinhold
5582ea2527fSIngo Weinhold	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
5592ea2527fSIngo Weinhold	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
5602ea2527fSIngo Weinhold
5612ea2527fSIngo Weinhold	if (oldCount != 0) {
5622ea2527fSIngo Weinhold		// If writers are waiting, take over our reader count.
5632ea2527fSIngo Weinhold		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
5642ea2527fSIngo Weinhold			lock->active_readers = readerCount;
5652ea2527fSIngo Weinhold			rw_lock_unblock(lock);
5662ea2527fSIngo Weinhold		} else {
5672ea2527fSIngo Weinhold			// No waiting writer, but there are one or more readers. We will
5682ea2527fSIngo Weinhold			// unblock all waiting readers -- that's the easy part -- and must
5692ea2527fSIngo Weinhold			// also make sure that all readers that haven't entered the critical
5702ea2527fSIngo Weinhold			// section yet, won't start to wait. Otherwise a writer overtaking
5712ea2527fSIngo Weinhold			// such a reader will correctly start to wait, but the reader,
5722ea2527fSIngo Weinhold			// seeing the writer count > 0, would also start to wait. We set
5732ea2527fSIngo Weinhold			// pending_readers to the number of readers that are still expected
5742ea2527fSIngo Weinhold			// to enter the critical section.
5752ea2527fSIngo Weinhold			lock->pending_readers = oldCount - readerCount
5762ea2527fSIngo Weinhold				- rw_lock_unblock(lock);
5772ea2527fSIngo Weinhold		}
5782ea2527fSIngo Weinhold	}
57952c8e07fSIngo Weinhold}
58052c8e07fSIngo Weinhold
58152c8e07fSIngo Weinhold
5825c20c5a5SIngo Weinhold// #pragma mark -
58352c8e07fSIngo Weinhold
58452c8e07fSIngo Weinhold
58552c8e07fSIngo Weinholdvoid
5865c20c5a5SIngo Weinholdmutex_init(mutex* lock, const char *name)
58752c8e07fSIngo Weinhold{
5885c20c5a5SIngo Weinhold	lock->name = name;
5895c20c5a5SIngo Weinhold	lock->waiters = NULL;
5905c20c5a5SIngo Weinhold#if KDEBUG
5915c20c5a5SIngo Weinhold	lock->holder = -1;
5925c20c5a5SIngo Weinhold#else
5935c20c5a5SIngo Weinhold	lock->count = 0;
5945c20c5a5SIngo Weinhold#endif
5955c20c5a5SIngo Weinhold	lock->flags = 0;
59652c8e07fSIngo Weinhold}
59752c8e07fSIngo Weinhold
59852c8e07fSIngo Weinhold
59952c8e07fSIngo Weinholdvoid
6005c20c5a5SIngo Weinholdmutex_init_etc(mutex* lock, const char *name, uint32 flags)
60152c8e07fSIngo Weinhold{
6025c20c5a5SIngo Weinhold	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;