1/*
2 * Copyright 2004-2016, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
4 *
5 * Thread definition and structures
6 */
7#ifndef _KERNEL_THREAD_TYPES_H
8#define _KERNEL_THREAD_TYPES_H
9
10
11#ifndef _ASSEMBLER
12
13#include <pthread.h>
14
15#include <arch/thread_types.h>
16#include <condition_variable.h>
17#include <heap.h>
18#include <ksignal.h>
19#include <lock.h>
20#include <smp.h>
21#include <thread_defs.h>
22#include <timer.h>
23#include <UserTimer.h>
24#include <user_debugger.h>
25#include <util/DoublyLinkedList.h>
26#include <util/KernelReferenceable.h>
27#include <util/list.h>
28
29
30enum additional_thread_state {
31	THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
32//	THREAD_STATE_BIRTH	// thread is being created
33};
34
35#define THREAD_MIN_SET_PRIORITY				B_LOWEST_ACTIVE_PRIORITY
36#define THREAD_MAX_SET_PRIORITY				B_REAL_TIME_PRIORITY
37
38enum team_state {
39	TEAM_STATE_NORMAL,		// normal state
40	TEAM_STATE_BIRTH,		// being constructed
41	TEAM_STATE_SHUTDOWN,	// still lives, but is going down
42	TEAM_STATE_DEATH		// only the Team object still exists, threads are
43							// gone
44};
45
46#define	TEAM_FLAG_EXEC_DONE	0x01
47	// team has executed exec*()
48#define	TEAM_FLAG_DUMP_CORE	0x02
49	// a core dump is in progress
50
51typedef enum job_control_state {
52	JOB_CONTROL_STATE_NONE,
53	JOB_CONTROL_STATE_STOPPED,
54	JOB_CONTROL_STATE_CONTINUED,
55	JOB_CONTROL_STATE_DEAD
56} job_control_state;
57
58
59struct cpu_ent;
60struct image;					// defined in image.c
61struct io_context;
62struct realtime_sem_context;	// defined in realtime_sem.cpp
63struct select_info;
64struct user_thread;				// defined in libroot/user_thread.h
65struct VMAddressSpace;
66struct xsi_sem_context;			// defined in xsi_semaphore.cpp
67
68namespace Scheduler {
69	struct ThreadData;
70}
71
72namespace BKernel {
73	struct Team;
74	struct Thread;
75	struct ProcessGroup;
76}
77
78
79struct thread_death_entry {
80	struct list_link	link;
81	thread_id			thread;
82	status_t			status;
83};
84
85struct team_loading_info {
86	ConditionVariable	condition;
87	status_t			result;		// the result of the loading
88};
89
90struct team_watcher {
91	struct list_link	link;
92	void				(*hook)(team_id team, void *data);
93	void				*data;
94};
95
96
97#define MAX_DEAD_CHILDREN	32
98	// this is a soft limit for the number of child death entries in a team
99#define MAX_DEAD_THREADS	32
100	// this is a soft limit for the number of thread death entries in a team
101
102
103struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
104	job_control_state	state;		// current team job control state
105	thread_id			thread;		// main thread ID == team ID
106	uint16				signal;		// signal causing the current state
107	bool				has_group_ref;
108	uid_t				signaling_user;
109
110	// valid while state != JOB_CONTROL_STATE_DEAD
111	BKernel::Team*		team;
112
113	// valid when state == JOB_CONTROL_STATE_DEAD
114	pid_t				group_id;
115	status_t			status;
116	uint16				reason;		// reason for the team's demise, one of the
117									// CLD_* values defined in <signal.h>
118	bigtime_t			user_time;
119	bigtime_t			kernel_time;
120
121	job_control_entry();
122	~job_control_entry();
123
124	void InitDeadState();
125
126	job_control_entry& operator=(const job_control_entry& other);
127};
128
129typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
130
131struct team_job_control_children {
132	JobControlEntryList		entries;
133};
134
135struct team_dead_children : team_job_control_children {
136	ConditionVariable	condition_variable;
137	uint32				count;
138	bigtime_t			kernel_time;
139	bigtime_t			user_time;
140};
141
142
143struct team_death_entry {
144	int32				remaining_threads;
145	ConditionVariable	condition;
146};
147
148
149struct free_user_thread {
150	struct free_user_thread*	next;
151	struct user_thread*			thread;
152};
153
154
155class AssociatedDataOwner;
156
157class AssociatedData : public BReferenceable,
158	public DoublyLinkedListLinkImpl<AssociatedData> {
159public:
160								AssociatedData();
161	virtual						~AssociatedData();
162
163			AssociatedDataOwner* Owner() const
164									{ return fOwner; }
165			void				SetOwner(AssociatedDataOwner* owner)
166									{ fOwner = owner; }
167
168	virtual	void				OwnerDeleted(AssociatedDataOwner* owner);
169
170private:
171			AssociatedDataOwner* fOwner;
172};
173
174
175class AssociatedDataOwner {
176public:
177								AssociatedDataOwner();
178								~AssociatedDataOwner();
179
180			bool				AddData(AssociatedData* data);
181			bool				RemoveData(AssociatedData* data);
182
183			void				PrepareForDeletion();
184
185private:
186			typedef DoublyLinkedList<AssociatedData> DataList;
187
188private:
189
190			mutex				fLock;
191			DataList			fList;
192};
193
194
195typedef int32 (*thread_entry_func)(thread_func, void *);
196
197
198namespace BKernel {
199
200
201template<typename IDType>
202struct TeamThreadIteratorEntry
203	: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
204	typedef IDType	id_type;
205	typedef TeamThreadIteratorEntry<id_type> iterator_type;
206
207	id_type	id;			// -1 for iterator entries, >= 0 for actual elements
208	bool	visible;	// the entry is publicly visible
209};
210
211
212struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
213		AssociatedDataOwner {
214	DoublyLinkedListLink<Team>	global_list_link;
215	Team			*hash_next;		// next in hash
216	Team			*siblings_next;	// next in parent's list; protected by
217									// parent's fLock
218	Team			*parent;		// write-protected by both parent (if any)
219									// and this team's fLock
220	Team			*children;		// protected by this team's fLock;
221									// adding/removing a child also requires the
222									// child's fLock
223	Team			*group_next;	// protected by the group's lock
224
225	int64			serial_number;	// immutable after adding team to hash
226
227	// process group info -- write-protected by both the group's lock, the
228	// team's lock, and the team's parent's lock
229	pid_t			group_id;
230	pid_t			session_id;
231	ProcessGroup	*group;
232
233	int				num_threads;	// number of threads in this team
234	int				state;			// current team state, see above
235	int32			flags;
236	struct io_context *io_context;
237	struct realtime_sem_context	*realtime_sem_context;
238	struct xsi_sem_context *xsi_sem_context;
239	struct team_death_entry *death_entry;	// protected by fLock
240	struct list		dead_threads;
241	int				dead_threads_count;
242
243	// protected by the team's fLock
244	team_dead_children dead_children;
245	team_job_control_children stopped_children;
246	team_job_control_children continued_children;
247
248	// protected by the parent team's fLock
249	struct job_control_entry* job_control_entry;
250
251	VMAddressSpace	*address_space;
252	Thread			*main_thread;	// protected by fLock, immutable
253									// after first set
254	Thread			*thread_list;	// protected by fLock, signal_lock and
255									// gThreadCreationLock
256	struct team_loading_info *loading_info;	// protected by fLock
257	struct list		image_list;		// protected by sImageMutex
258	struct list		watcher_list;
259	struct list		sem_list;		// protected by sSemsSpinlock
260	struct list		port_list;		// protected by sPortsLock
261	struct arch_team arch_info;
262
263	addr_t			user_data;
264	area_id			user_data_area;
265	size_t			user_data_size;
266	size_t			used_user_data;
267	struct free_user_thread* free_user_threads;
268
269	void*			commpage_address;
270
271	struct team_debug_info debug_info;
272
273	// protected by time_lock
274	bigtime_t		dead_threads_kernel_time;
275	bigtime_t		dead_threads_user_time;
276	bigtime_t		cpu_clock_offset;
277	spinlock		time_lock;
278
279	// user group information; protected by fLock
280	uid_t			saved_set_uid;
281	uid_t			real_uid;
282	uid_t			effective_uid;
283	gid_t			saved_set_gid;
284	gid_t			real_gid;
285	gid_t			effective_gid;
286	gid_t*			supplementary_groups;
287	int				supplementary_group_count;
288
289	// Exit status information. Set when the first terminal event occurs,
290	// immutable afterwards. Protected by fLock.
291	struct {
292		uint16		reason;			// reason for the team's demise, one of the
293									// CLD_* values defined in <signal.h>
294		uint16		signal;			// signal killing the team
295		uid_t		signaling_user;	// real UID of the signal sender
296		status_t	status;			// exit status, if normal team exit
297		bool		initialized;	// true when the state has been initialized
298	} exit;
299
300	spinlock		signal_lock;
301
302public:
303								~Team();
304
305	static	Team*				Create(team_id id, const char* name,
306									bool kernel);
307	static	Team*				Get(team_id id);
308	static	Team*				GetAndLock(team_id id);
309
310			bool				Lock()
311									{ mutex_lock(&fLock); return true; }
312			bool				TryLock()
313									{ return mutex_trylock(&fLock) == B_OK; }
314			void				Unlock()
315									{ mutex_unlock(&fLock); }
316
317			void				UnlockAndReleaseReference()
318									{ Unlock(); ReleaseReference(); }
319
320			void				LockTeamAndParent(bool dontLockParentIfKernel);
321			void				UnlockTeamAndParent();
322			void				LockTeamAndProcessGroup();
323			void				UnlockTeamAndProcessGroup();
324			void				LockTeamParentAndProcessGroup();
325			void				UnlockTeamParentAndProcessGroup();
326			void				LockProcessGroup()
327									{ LockTeamAndProcessGroup(); Unlock(); }
328
329			const char*			Name() const	{ return fName; }
330			void				SetName(const char* name);
331
332			const char*			Args() const	{ return fArgs; }
333			void				SetArgs(const char* args);
334			void				SetArgs(const char* path,
335									const char* const* otherArgs,
336									int otherArgCount);
337
338			BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
339									{ return fQueuedSignalsCounter; }
340			sigset_t			PendingSignals() const
341									{ return fPendingSignals.AllSignals(); }
342
343			void				AddPendingSignal(int signal)
344									{ fPendingSignals.AddSignal(signal); }
345			void				AddPendingSignal(Signal* signal)
346									{ fPendingSignals.AddSignal(signal); }
347			void				RemovePendingSignal(int signal)
348									{ fPendingSignals.RemoveSignal(signal); }
349			void				RemovePendingSignal(Signal* signal)
350									{ fPendingSignals.RemoveSignal(signal); }
351			void				RemovePendingSignals(sigset_t mask)
352									{ fPendingSignals.RemoveSignals(mask); }
353			void				ResetSignalsOnExec();
354
355	inline	int32				HighestPendingSignalPriority(
356									sigset_t nonBlocked) const;
357	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
358									Signal& buffer);
359
360			struct sigaction&	SignalActionFor(int32 signal)
361									{ return fSignalActions[signal - 1]; }
362			void				InheritSignalActions(Team* parent);
363
364			// user timers -- protected by fLock
365			UserTimer*			UserTimerFor(int32 id) const
366									{ return fUserTimers.TimerFor(id); }
367			status_t			AddUserTimer(UserTimer* timer);
368			void				RemoveUserTimer(UserTimer* timer);
369			void				DeleteUserTimers(bool userDefinedOnly);
370
371			bool				CheckAddUserDefinedTimer();
372			void				UserDefinedTimersRemoved(int32 count);
373
374			void				UserTimerActivated(TeamTimeUserTimer* timer)
375									{ fCPUTimeUserTimers.Add(timer); }
376			void				UserTimerActivated(TeamUserTimeUserTimer* timer)
377									{ fUserTimeUserTimers.Add(timer); }
378			void				UserTimerDeactivated(TeamTimeUserTimer* timer)
379									{ fCPUTimeUserTimers.Remove(timer); }
380			void				UserTimerDeactivated(
381									TeamUserTimeUserTimer* timer)
382									{ fUserTimeUserTimers.Remove(timer); }
383			void				DeactivateCPUTimeUserTimers();
384									// both total and user CPU timers
385			bool				HasActiveCPUTimeUserTimers() const
386									{ return !fCPUTimeUserTimers.IsEmpty(); }
387			bool				HasActiveUserTimeUserTimers() const
388									{ return !fUserTimeUserTimers.IsEmpty(); }
389			TeamTimeUserTimerList::ConstIterator
390									CPUTimeUserTimerIterator() const
391									{ return fCPUTimeUserTimers.GetIterator(); }
392	inline	TeamUserTimeUserTimerList::ConstIterator
393									UserTimeUserTimerIterator() const;
394
395			bigtime_t			CPUTime(bool ignoreCurrentRun,
396									Thread* lockedThread = NULL) const;
397			bigtime_t			UserCPUTime() const;
398
399			ConditionVariable*	CoreDumpCondition() const
400									{ return fCoreDumpCondition; }
401			void				SetCoreDumpCondition(
402									ConditionVariable* condition)
403									{ fCoreDumpCondition = condition; }
404private:
405								Team(team_id id, bool kernel);
406
407private:
408			mutex				fLock;
409			char				fName[B_OS_NAME_LENGTH];
410			char				fArgs[64];
411									// contents for the team_info::args field
412
413			BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
414			BKernel::PendingSignals	fPendingSignals;
415									// protected by signal_lock
416			struct sigaction 	fSignalActions[MAX_SIGNAL_NUMBER];
417									// indexed signal - 1, protected by fLock
418
419			UserTimerList		fUserTimers;			// protected by fLock
420			TeamTimeUserTimerList fCPUTimeUserTimers;
421									// protected by scheduler lock
422			TeamUserTimeUserTimerList fUserTimeUserTimers;
423			int32				fUserDefinedTimerCount;	// accessed atomically
424
425			ConditionVariable*	fCoreDumpCondition;
426									// protected by fLock
427};
428
429
430struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
431	int32			flags;			// summary of events relevant in interrupt
432									// handlers (signals pending, user debugging
433									// enabled, etc.)
434	int64			serial_number;	// immutable after adding thread to hash
435	Thread			*hash_next;		// protected by thread hash lock
436	Thread			*team_next;		// protected by team lock and fLock
437	char			name[B_OS_NAME_LENGTH];	// protected by fLock
438	bool			going_to_suspend;	// protected by scheduler lock
439	int32			priority;		// protected by scheduler lock
440	int32			io_priority;	// protected by fLock
441	int32			state;			// protected by scheduler lock
442	struct cpu_ent	*cpu;			// protected by scheduler lock
443	struct cpu_ent	*previous_cpu;	// protected by scheduler lock
444	int32			pinned_to_cpu;	// only accessed by this thread or in the
445									// scheduler, when thread is not running
446	spinlock		scheduler_lock;
447
448	sigset_t		sig_block_mask;	// protected by team->signal_lock,
449									// only modified by the thread itself
450	sigset_t		sigsuspend_original_unblocked_mask;
451		// non-0 after a return from _user_sigsuspend(), containing the inverted
452		// original signal mask, reset in handle_signals(); only accessed by
453		// this thread
454	ucontext_t*		user_signal_context;	// only accessed by this thread
455	addr_t			signal_stack_base;		// only accessed by this thread
456	size_t			signal_stack_size;		// only accessed by this thread
457	bool			signal_stack_enabled;	// only accessed by this thread
458
459	bool			in_kernel;		// protected by time_lock, only written by
460									// this thread
461	bool			has_yielded;	// protected by scheduler lock
462	Scheduler::ThreadData*	scheduler_data; // protected by scheduler lock
463
464	struct user_thread*	user_thread;	// write-protected by fLock, only
465										// modified by the thread itself and
466										// thus freely readable by it
467
468	void			(*cancel_function)(int);
469
470	struct {
471		uint8		parameters[SYSCALL_RESTART_PARAMETER_SIZE];
472	} syscall_restart;
473
474	struct {
475		status_t	status;				// current wait status
476		uint32		flags;				// interrupable flags
477		uint32		type;				// type of the object waited on
478		const void*	object;				// pointer to the object waited on
479		timer		unblock_timer;		// timer for block with timeout
480	} wait;
481
482	struct PrivateConditionVariableEntry *condition_variable_entry;
483
484	struct {
485		sem_id		write_sem;	// acquired by writers before writing
486		sem_id		read_sem;	// release by writers after writing, acquired
487								// by this thread when reading
488		thread_id	sender;
489		int32		code;
490		size_t		size;
491		void*		buffer;
492	} msg;	// write_sem/read_sem are protected by fLock when accessed by
493			// others, the other fields are protected by write_sem/read_sem
494
495	void			(*fault_handler)(void);
496	jmp_buf			fault_handler_state;
497	int32			page_faults_allowed;
498		/* this field may only stay in debug builds in the future */
499
500	BKernel::Team	*team;	// protected by team lock, thread lock, scheduler
501							// lock, team_lock
502	rw_spinlock		team_lock;
503
504	struct {
505		sem_id		sem;		// immutable after thread creation
506		status_t	status;		// accessed only by this thread
507		struct list	waiters;	// protected by fLock
508	} exit;
509
510	struct select_info *select_infos;	// protected by fLock
511
512	struct thread_debug_info debug_info;
513
514	// stack
515	area_id			kernel_stack_area;	// immutable after thread creation
516	addr_t			kernel_stack_base;	// immutable after thread creation
517	addr_t			kernel_stack_top;	// immutable after thread creation
518	area_id			user_stack_area;	// protected by thread lock
519	addr_t			user_stack_base;	// protected by thread lock
520	size_t			user_stack_size;	// protected by thread lock
521
522	addr_t			user_local_storage;
523		// usually allocated at the safe side of the stack
524	int				kernel_errno;
525		// kernel "errno" differs from its userspace alter ego
526
527	// user_time, kernel_time, and last_time are only written by the thread
528	// itself, so they can be read by the thread without lock. Holding the
529	// scheduler lock and checking that the thread does not run also guarantees
530	// that the times will not change.
531	spinlock		time_lock;
532	bigtime_t		user_time;			// protected by time_lock
533	bigtime_t		kernel_time;		// protected by time_lock
534	bigtime_t		last_time;			// protected by time_lock
535	bigtime_t		cpu_clock_offset;	// protected by time_lock
536
537	void			(*post_interrupt_callback)(void*);
538	void*			post_interrupt_data;
539
540	// architecture dependent section
541	struct arch_thread arch_info;
542
543public:
544								Thread() {}
545									// dummy for the idle threads
546								Thread(const char *name, thread_id threadID,
547									struct cpu_ent *cpu);
548								~Thread();
549
550	static	status_t			Create(const char* name, Thread*& _thread);
551
552	static	Thread*				Get(thread_id id);
553	static	Thread*				GetAndLock(thread_id id);
554	static	Thread*				GetDebug(thread_id id);
555									// in kernel debugger only
556
557	static	bool				IsAlive(thread_id id);
558
559			void*				operator new(size_t size);
560			void*				operator new(size_t, void* pointer);
561			void				operator delete(void* pointer, size_t size);
562
563			status_t			Init(bool idleThread);
564
565			bool				Lock()
566									{ mutex_lock(&fLock); return true; }
567			bool				TryLock()
568									{ return mutex_trylock(&fLock) == B_OK; }
569			void				Unlock()
570									{ mutex_unlock(&fLock); }
571
572			void				UnlockAndReleaseReference()
573									{ Unlock(); ReleaseReference(); }
574
575			bool				IsAlive() const;
576
577			bool				IsRunning() const
578									{ return cpu != NULL; }
579									// scheduler lock must be held
580
581			sigset_t			ThreadPendingSignals() const
582									{ return fPendingSignals.AllSignals(); }
583	inline	sigset_t			AllPendingSignals() const;
584			void				AddPendingSignal(int signal)
585									{ fPendingSignals.AddSignal(signal); }
586			void				AddPendingSignal(Signal* signal)
587									{ fPendingSignals.AddSignal(signal); }
588			void				RemovePendingSignal(int signal)
589									{ fPendingSignals.RemoveSignal(signal); }
590			void				RemovePendingSignal(Signal* signal)
591									{ fPendingSignals.RemoveSignal(signal); }
592			void				RemovePendingSignals(sigset_t mask)
593									{ fPendingSignals.RemoveSignals(mask); }
594			void				ResetSignalsOnExec();
595
596	inline	int32				HighestPendingSignalPriority(
597									sigset_t nonBlocked) const;
598	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
599									Signal& buffer);
600
601			// user timers -- protected by fLock
602			UserTimer*			UserTimerFor(int32 id) const
603									{ return fUserTimers.TimerFor(id); }
604			status_t			AddUserTimer(UserTimer* timer);
605			void				RemoveUserTimer(UserTimer* timer);
606			void				DeleteUserTimers(bool userDefinedOnly);
607
608			void				UserTimerActivated(ThreadTimeUserTimer* timer)
609									{ fCPUTimeUserTimers.Add(timer); }
610			void				UserTimerDeactivated(ThreadTimeUserTimer* timer)
611									{ fCPUTimeUserTimers.Remove(timer); }
612			void				DeactivateCPUTimeUserTimers();
613			bool				HasActiveCPUTimeUserTimers() const
614									{ return !fCPUTimeUserTimers.IsEmpty(); }
615			ThreadTimeUserTimerList::ConstIterator
616									CPUTimeUserTimerIterator() const
617									{ return fCPUTimeUserTimers.GetIterator(); }
618
619	inline	bigtime_t			CPUTime(bool ignoreCurrentRun) const;
620
621private:
622			mutex				fLock;
623
624			BKernel::PendingSignals	fPendingSignals;
625									// protected by team->signal_lock
626
627			UserTimerList		fUserTimers;			// protected by fLock
628			ThreadTimeUserTimerList fCPUTimeUserTimers;
629									// protected by time_lock
630};
631
632
633struct ProcessSession : BReferenceable {
634	pid_t				id;
635	int32				controlling_tty;	// index of the controlling tty,
636											// -1 if none
637	pid_t				foreground_group;
638
639public:
640								ProcessSession(pid_t id);
641								~ProcessSession();
642
643			bool				Lock()
644									{ mutex_lock(&fLock); return true; }
645			bool				TryLock()
646									{ return mutex_trylock(&fLock) == B_OK; }
647			void				Unlock()
648									{ mutex_unlock(&fLock); }
649
650private:
651			mutex				fLock;
652};
653
654
655struct ProcessGroup : KernelReferenceable {
656	struct ProcessGroup *next;		// next in hash
657	pid_t				id;
658	BKernel::Team		*teams;
659
660public:
661								ProcessGroup(pid_t id);
662								~ProcessGroup();
663
664	static	ProcessGroup*		Get(pid_t id);
665
666			bool				Lock()
667									{ mutex_lock(&fLock); return true; }
668			bool				TryLock()
669									{ return mutex_trylock(&fLock) == B_OK; }
670			void				Unlock()
671									{ mutex_unlock(&fLock); }
672
673			ProcessSession*		Session() const
674									{ return fSession; }
675			void				Publish(ProcessSession* session);
676			void				PublishLocked(ProcessSession* session);
677
678			bool				IsOrphaned() const;
679
680			void				ScheduleOrphanedCheck();
681			void				UnsetOrphanedCheck();
682
683public:
684			SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
685
686private:
687			mutex				fLock;
688			ProcessSession*		fSession;
689			bool				fInOrphanedCheckList;	// protected by
690														// sOrphanedCheckLock
691};
692
693typedef SinglyLinkedList<ProcessGroup,
694	SinglyLinkedListMemberGetLink<ProcessGroup,
695		&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
696
697
698/*!	\brief Allows to iterate through all teams.
699*/
700struct TeamListIterator {
701								TeamListIterator();
702								~TeamListIterator();
703
704			Team*				Next();
705
706private:
707			TeamThreadIteratorEntry<team_id> fEntry;
708};
709
710
711/*!	\brief Allows to iterate through all threads.
712*/
713struct ThreadListIterator {
714								ThreadListIterator();
715								~ThreadListIterator();
716
717			Thread*				Next();
718
719private:
720			TeamThreadIteratorEntry<thread_id> fEntry;
721};
722
723
724inline int32
725Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
726{
727	return fPendingSignals.HighestSignalPriority(nonBlocked);
728}
729
730
731inline Signal*
732Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
733{
734	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
735}
736
737
738inline TeamUserTimeUserTimerList::ConstIterator
739Team::UserTimeUserTimerIterator() const
740{
741	return fUserTimeUserTimers.GetIterator();
742}
743
744
745inline sigset_t
746Thread::AllPendingSignals() const
747{
748	return fPendingSignals.AllSignals() | team->PendingSignals();
749}
750
751
752inline int32
753Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
754{
755	return fPendingSignals.HighestSignalPriority(nonBlocked);
756}
757
758
759inline Signal*
760Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
761{
762	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
763}
764
765
766/*!	Returns the thread's current total CPU time (kernel + user + offset).
767
768	The caller must hold \c time_lock.
769
770	\param ignoreCurrentRun If \c true and the thread is currently running,
771		don't add the time since the last time \c last_time was updated. Should
772		be used in "thread unscheduled" scheduler callbacks, since although the
773		thread is still running at that time, its time has already been stopped.
774	\return The thread's current total CPU time.
775*/
776inline bigtime_t
777Thread::CPUTime(bool ignoreCurrentRun) const
778{
779	bigtime_t time = user_time + kernel_time + cpu_clock_offset;
780
781	// If currently running, also add the time since the last check, unless
782	// requested otherwise.
783	if (!ignoreCurrentRun && last_time != 0)
784		time += system_time() - last_time;
785
786	return time;
787}
788
789
790}	// namespace BKernel
791
792using BKernel::Team;
793using BKernel::TeamListIterator;
794using BKernel::Thread;
795using BKernel::ThreadListIterator;
796using BKernel::ProcessSession;
797using BKernel::ProcessGroup;
798using BKernel::ProcessGroupList;
799
800
801#endif	// !_ASSEMBLER
802
803
804// bits for the thread::flags field
805#define	THREAD_FLAGS_SIGNALS_PENDING		0x0001
806	// unblocked signals are pending (computed flag for optimization purposes)
807#define	THREAD_FLAGS_DEBUG_THREAD			0x0002
808	// forces the thread into the debugger as soon as possible (set by
809	// debug_thread())
810#define	THREAD_FLAGS_SINGLE_STEP			0x0004
811	// indicates that the thread is in single-step mode (in userland)
812#define	THREAD_FLAGS_DEBUGGER_INSTALLED		0x0008
813	// a debugger is installed for the current team (computed flag for
814	// optimization purposes)
815#define	THREAD_FLAGS_BREAKPOINTS_DEFINED	0x0010
816	// hardware breakpoints are defined for the current team (computed flag for
817	// optimization purposes)
818#define	THREAD_FLAGS_BREAKPOINTS_INSTALLED	0x0020
819	// breakpoints are currently installed for the thread (i.e. the hardware is
820	// actually set up to trigger debug events for them)
821#define	THREAD_FLAGS_64_BIT_SYSCALL_RETURN	0x0040
822	// set by 64 bit return value syscalls
823#define	THREAD_FLAGS_RESTART_SYSCALL		0x0080
824	// set by handle_signals(), if the current syscall shall be restarted
825#define	THREAD_FLAGS_DONT_RESTART_SYSCALL	0x0100
826	// explicitly disables automatic syscall restarts (e.g. resume_thread())
827#define	THREAD_FLAGS_ALWAYS_RESTART_SYSCALL	0x0200
828	// force syscall restart, even if a signal handler without SA_RESTART was
829	// invoked (e.g. sigwait())
830#define	THREAD_FLAGS_SYSCALL_RESTARTED		0x0400
831	// the current syscall has been restarted
832#define	THREAD_FLAGS_SYSCALL				0x0800
833	// the thread is currently in a syscall; set/reset only for certain
834	// functions (e.g. ioctl()) to allow inner functions to discriminate
835	// whether e.g. parameters were passed from userland or kernel
836#define	THREAD_FLAGS_TRAP_FOR_CORE_DUMP		0x1000
837	// core dump in progress; the thread shall not exit the kernel to userland,
838	// but shall invoke core_dump_trap_thread() instead.
839#ifdef _COMPAT_MODE
840#define	THREAD_FLAGS_COMPAT_MODE			0x2000
841	// the thread runs a compatibility mode (for instance IA32 on x86_64).
842#endif
843
844#endif	/* _KERNEL_THREAD_TYPES_H */
845