1/*
2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10#ifndef _THREAD_H
11#define _THREAD_H
12
13
14#include <OS.h>
15
16#include <arch/atomic.h>
17#include <arch/thread.h>
18// For the thread blocking inline functions only.
19#include <kscheduler.h>
20#include <ksignal.h>
21#include <thread_types.h>
22
23
24struct arch_fork_arg;
25struct kernel_args;
26struct select_info;
27struct thread_creation_attributes;
28
29
30// thread notifications
31#define THREAD_MONITOR		'_tm_'
32#define THREAD_ADDED		0x01
33#define THREAD_REMOVED		0x02
34#define THREAD_NAME_CHANGED	0x04
35
36
37namespace BKernel {
38
39
40struct ThreadCreationAttributes : thread_creation_attributes {
41	// when calling from kernel only
42	team_id			team;
43	Thread*			thread;
44	sigset_t		signal_mask;
45	size_t			additional_stack_size;	// additional space in the stack
46											// area after the TLS region, not
47											// used as thread stack
48	thread_func		kernelEntry;
49	void*			kernelArgument;
50	arch_fork_arg*	forkArgs;				// If non-NULL, the userland thread
51											// will be started with this
52											// register context.
53
54public:
55								ThreadCreationAttributes() {}
56									// no-init constructor
57								ThreadCreationAttributes(
58									thread_func function, const char* name,
59									int32 priority, void* arg,
60									team_id team = -1, Thread* thread = NULL);
61
62			status_t			InitFromUserAttributes(
63									const thread_creation_attributes*
64										userAttributes,
65									char* nameBuffer);
66};
67
68
69}	// namespace BKernel
70
71using BKernel::ThreadCreationAttributes;
72
73
74extern spinlock gThreadCreationLock;
75
76
77#ifdef __cplusplus
78extern "C" {
79#endif
80
81void thread_at_kernel_entry(bigtime_t now);
82	// called when the thread enters the kernel on behalf of the thread
83void thread_at_kernel_exit(void);
84void thread_at_kernel_exit_no_signals(void);
85void thread_reset_for_exec(void);
86
87status_t thread_init(struct kernel_args *args);
88status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);
89void thread_yield(void);
90void thread_exit(void);
91
92void thread_map(void (*function)(Thread* thread, void* data), void* data);
93
94int32 thread_max_threads(void);
95int32 thread_used_threads(void);
96
97const char* thread_state_to_text(Thread* thread, int32 state);
98
99int32 thread_get_io_priority(thread_id id);
100void thread_set_io_priority(int32 priority);
101
102#define thread_get_current_thread arch_thread_get_current_thread
103
104static thread_id thread_get_current_thread_id(void);
105static inline thread_id
106thread_get_current_thread_id(void)
107{
108	Thread *thread = thread_get_current_thread();
109	return thread ? thread->id : 0;
110}
111
112static inline bool
113thread_is_idle_thread(Thread *thread)
114{
115	return thread->priority == B_IDLE_PRIORITY;
116}
117
118thread_id allocate_thread_id();
119thread_id peek_next_thread_id();
120
121status_t thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
122	void* argument1, void* argument2);
123status_t thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
124	size_t stackSize, size_t additionalSize);
125thread_id thread_create_thread(const ThreadCreationAttributes& attributes,
126	bool kernel);
127
128thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,
129	void *args, team_id team);
130status_t wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
131	status_t *_returnCode);
132
133status_t select_thread(int32 object, struct select_info *info, bool kernel);
134status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
135
136#define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
137
138status_t thread_block();
139status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
140void thread_unblock(Thread* thread, status_t status);
141
142// used in syscalls.c
143status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
144status_t _user_rename_thread(thread_id thread, const char *name);
145status_t _user_suspend_thread(thread_id thread);
146status_t _user_resume_thread(thread_id thread);
147status_t _user_rename_thread(thread_id thread, const char *name);
148thread_id _user_spawn_thread(struct thread_creation_attributes* attributes);
149status_t _user_wait_for_thread(thread_id id, status_t *_returnCode);
150status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
151	bigtime_t* _remainingTime);
152status_t _user_kill_thread(thread_id thread);
153status_t _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int));
154void _user_thread_yield(void);
155void _user_exit_thread(status_t return_value);
156bool _user_has_data(thread_id thread);
157status_t _user_send_data(thread_id thread, int32 code, const void *buffer, size_t buffer_size);
158status_t _user_receive_data(thread_id *_sender, void *buffer, size_t buffer_size);
159thread_id _user_find_thread(const char *name);
160status_t _user_get_thread_info(thread_id id, thread_info *info);
161status_t _user_get_next_thread_info(team_id team, int32 *cookie, thread_info *info);
162
163status_t _user_block_thread(uint32 flags, bigtime_t timeout);
164status_t _user_unblock_thread(thread_id thread, status_t status);
165status_t _user_unblock_threads(thread_id* threads, uint32 count,
166	status_t status);
167
168// ToDo: these don't belong here
169struct rlimit;
170int _user_getrlimit(int resource, struct rlimit * rlp);
171int _user_setrlimit(int resource, const struct rlimit * rlp);
172
173#ifdef __cplusplus
174}
175#endif
176
177
178/*!	Checks whether the current thread would immediately be interrupted when
179	blocking it with the given wait/interrupt flags.
180
181	The caller must hold the scheduler lock.
182
183	\param thread The current thread.
184	\param flags Wait/interrupt flags to be considered. Relevant are:
185		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
186			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
187		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
188			signal.
189	\return \c true, if the thread would be interrupted, \c false otherwise.
190*/
191static inline bool
192thread_is_interrupted(Thread* thread, uint32 flags)
193{
194	sigset_t pendingSignals = thread->AllPendingSignals();
195	return ((flags & B_CAN_INTERRUPT) != 0
196			&& (pendingSignals & ~thread->sig_block_mask) != 0)
197		|| ((flags & B_KILL_CAN_INTERRUPT) != 0
198			&& (pendingSignals & KILL_SIGNALS) != 0);
199}
200
201
202/*!	Checks whether the given thread is currently blocked (i.e. still waiting
203	for something).
204
205	If a stable answer is required, the caller must hold the scheduler lock.
206	Alternatively, if waiting is not interruptible and cannot time out, holding
207	the client lock held when calling thread_prepare_to_block() and the
208	unblocking functions works as well.
209
210	\param thread The thread in question.
211	\return \c true, if the thread is blocked, \c false otherwise.
212*/
213static inline bool
214thread_is_blocked(Thread* thread)
215{
216	return atomic_get(&thread->wait.status) == 1;
217}
218
219
220/*!	Prepares the current thread for waiting.
221
222	This is the first of two steps necessary to block the current thread
223	(IOW, to let it wait for someone else to unblock it or optionally time out
224	after a specified delay). The process consists of two steps to avoid race
225	conditions in case a lock other than the scheduler lock is involved.
226
227	Usually the thread waits for some condition to change and this condition is
228	something reflected in the caller's data structures which should be
229	protected by a client lock the caller knows about. E.g. in the semaphore
230	code that lock is a per-semaphore spinlock that protects the semaphore data,
231	including the semaphore count and the queue of waiting threads. For certain
232	low-level locking primitives (e.g. mutexes) that client lock is the
233	scheduler lock itself, which simplifies things a bit.
234
235	If a client lock other than the scheduler lock is used, this function must
236	be called with that lock being held. Afterwards that lock should be dropped
237	and the function that actually blocks the thread shall be invoked
238	(thread_block[_locked]() or thread_block_with_timeout()). In between these
239	two steps no functionality that uses the thread blocking API for this thread
240	shall be used.
241
242	When the caller determines that the condition for unblocking the thread
243	occurred, it calls thread_unblock_locked() to unblock the thread. At that
244	time one of locks that are held when calling thread_prepare_to_block() must
245	be held. Usually that would be the client lock. In two cases it generally
246	isn't, however, since the unblocking code doesn't know about the client
247	lock: 1. When thread_block_with_timeout() had been used and the timeout
248	occurs. 2. When thread_prepare_to_block() had been called with one or both
249	of the \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT flags specified and
250	someone calls thread_interrupt() that is supposed to wake up the thread.
251	In either of these two cases only the scheduler lock is held by the
252	unblocking code. A timeout can only happen after
253	thread_block_with_timeout() has been called, but an interruption is
254	possible at any time. The client code must deal with those situations.
255
256	Generally blocking and unblocking threads proceed in the following manner:
257
258	Blocking thread:
259	- Acquire client lock.
260	- Check client condition and decide whether blocking is necessary.
261	- Modify some client data structure to indicate that this thread is now
262		waiting.
263	- Release client lock (unless client lock is the scheduler lock).
264	- Block.
265	- Acquire client lock (unless client lock is the scheduler lock).
266	- Check client condition and compare with block result. E.g. if the wait was
267		interrupted or timed out, but the client condition indicates success, it
268		may be considered a success after all, since usually that happens when
269		another thread concurrently changed the client condition and also tried
270		to unblock the waiting thread. It is even necessary when that other
271		thread changed the client data structures in a way that associate some
272		resource with the unblocked thread, or otherwise the unblocked thread
273		would have to reverse that here.
274	- If still necessary -- i.e. not already taken care of by an unblocking
275		thread -- modify some client structure to indicate that the thread is no
276		longer waiting, so it isn't erroneously unblocked later.
277
278	Unblocking thread:
279	- Acquire client lock.
280	- Check client condition and decide whether a blocked thread can be woken
281		up.
282	- Check the client data structure that indicates whether one or more threads
283		are waiting and which thread(s) need(s) to be woken up.
284	- Unblock respective thread(s).
285	- Possibly change some client structure, so that an unblocked thread can
286		decide whether a concurrent timeout/interruption can be ignored, or
287		simply so that it doesn't have to do any more cleanup.
288
289	Note that in the blocking thread the steps after blocking are strictly
290	required only if timeouts or interruptions are possible. If they are not,
291	the blocking thread can only be woken up explicitly by an unblocking thread,
292	which could already take care of all the necessary client data structure
293	modifications, so that the blocking thread wouldn't have to do that.
294
295	Note that the client lock can but does not have to be a spinlock.
296	A mutex, a semaphore, or anything that doesn't try to use the thread
297	blocking API for the calling thread when releasing the lock is fine.
298	In particular that means in principle thread_prepare_to_block() can be
299	called with interrupts enabled.
300
301	Care must be taken when the wait can be interrupted or can time out,
302	especially with a client lock that uses the thread blocking API. After a
303	blocked thread has been interrupted or the the time out occurred it cannot
304	acquire the client lock (or any other lock using the thread blocking API)
305	without first making sure that the thread doesn't still appear to be
306	waiting to other client code. Otherwise another thread could try to unblock
307	it which could erroneously unblock the thread while already waiting on the
308	client lock. So usually when interruptions or timeouts are possible a
309	spinlock needs to be involved.
310
311	\param thread The current thread.
312	\param flags The blocking flags. Relevant are:
313		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
314			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
315		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
316			signal.
317	\param type The type of object the thread will be blocked at. Informative/
318		for debugging purposes. Must be one of the \c THREAD_BLOCK_TYPE_*
319		constants. \c THREAD_BLOCK_TYPE_OTHER implies that \a object is a
320		string.
321	\param object The object the thread will be blocked at.  Informative/for
322		debugging purposes.
323*/
324static inline void
325thread_prepare_to_block(Thread* thread, uint32 flags, uint32 type,
326	const void* object)
327{
328	thread->wait.flags = flags;
329	thread->wait.type = type;
330	thread->wait.object = object;
331	atomic_set(&thread->wait.status, 1);
332		// Set status last to guarantee that the other fields are initialized
333		// when a thread is waiting.
334}
335
336
337/*!	Unblocks the specified blocked thread.
338
339	If the thread is no longer waiting (e.g. because thread_unblock_locked() has
340	already been called in the meantime), this function does not have any
341	effect.
342
343	The caller must hold the scheduler lock and the client lock (might be the
344	same).
345
346	\param thread The thread to be unblocked.
347	\param status The unblocking status. That's what the unblocked thread's
348		call to thread_block_locked() will return.
349*/
350static inline void
351thread_unblock_locked(Thread* thread, status_t status)
352{
353	if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
354		return;
355
356	// wake up the thread, if it is sleeping
357	if (thread->state == B_THREAD_WAITING)
358		scheduler_enqueue_in_run_queue(thread);
359}
360
361
362/*!	Interrupts the specified blocked thread, if possible.
363
364	The function checks whether the thread can be interrupted and, if so, calls
365	\code thread_unblock_locked(thread, B_INTERRUPTED) \endcode. Otherwise the
366	function is a no-op.
367
368	The caller must hold the scheduler lock. Normally thread_unblock_locked()
369	also requires the client lock to be held, but in this case the caller
370	usually doesn't know it. This implies that the client code needs to take
371	special care, if waits are interruptible. See thread_prepare_to_block() for
372	more information.
373
374	\param thread The thread to be interrupted.
375	\param kill If \c false, the blocked thread is only interrupted, when the
376		flag \c B_CAN_INTERRUPT was specified for the blocked thread. If
377		\c true, it is only interrupted, when at least one of the flags
378		\c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT was specified for the
379		blocked thread.
380	\return \c B_OK, if the thread is interruptible and thread_unblock_locked()
381		was called, \c B_NOT_ALLOWED otherwise. \c B_OK doesn't imply that the
382		thread actually has been interrupted -- it could have been unblocked
383		before already.
384*/
385static inline status_t
386thread_interrupt(Thread* thread, bool kill)
387{
388	if (thread_is_blocked(thread)) {
389		if ((thread->wait.flags & B_CAN_INTERRUPT) != 0
390			|| (kill && (thread->wait.flags & B_KILL_CAN_INTERRUPT) != 0)) {
391			thread_unblock_locked(thread, B_INTERRUPTED);
392			return B_OK;
393		}
394	}
395
396	return B_NOT_ALLOWED;
397}
398
399
400static inline void
401thread_pin_to_current_cpu(Thread* thread)
402{
403	thread->pinned_to_cpu++;
404}
405
406
407static inline void
408thread_unpin_from_current_cpu(Thread* thread)
409{
410	thread->pinned_to_cpu--;
411}
412
413
414static inline void
415thread_prepare_suspend()
416{
417	Thread* thread = thread_get_current_thread();
418	thread->going_to_suspend = true;
419}
420
421
422static inline void
423thread_suspend(bool alreadyPrepared = false)
424{
425	Thread* thread = thread_get_current_thread();
426	if (!alreadyPrepared)
427		thread_prepare_suspend();
428
429	cpu_status state = disable_interrupts();
430	acquire_spinlock(&thread->scheduler_lock);
431
432	if (thread->going_to_suspend)
433		scheduler_reschedule(B_THREAD_SUSPENDED);
434
435	release_spinlock(&thread->scheduler_lock);
436	restore_interrupts(state);
437}
438
439
440static inline void
441thread_continue(Thread* thread)
442{
443	thread->going_to_suspend = false;
444
445	cpu_status state = disable_interrupts();
446	acquire_spinlock(&thread->scheduler_lock);
447
448	if (thread->state == B_THREAD_SUSPENDED)
449		scheduler_enqueue_in_run_queue(thread);
450
451	release_spinlock(&thread->scheduler_lock);
452	restore_interrupts(state);
453}
454
455
456#endif /* _THREAD_H */
457