thread_types.h revision 52d500e5
1/*
2 * Copyright 2004-2011, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
4 *
5 * Thread definition and structures
6 */
7#ifndef _KERNEL_THREAD_TYPES_H
8#define _KERNEL_THREAD_TYPES_H
9
10
11#ifndef _ASSEMBLER
12
13#include <pthread.h>
14
15#include <arch/thread_types.h>
16#include <condition_variable.h>
17#include <heap.h>
18#include <ksignal.h>
19#include <lock.h>
20#include <smp.h>
21#include <thread_defs.h>
22#include <timer.h>
23#include <UserTimer.h>
24#include <user_debugger.h>
25#include <util/DoublyLinkedList.h>
26#include <util/KernelReferenceable.h>
27#include <util/list.h>
28
29
30enum additional_thread_state {
31	THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
32//	THREAD_STATE_BIRTH	// thread is being created
33};
34
35#define THREAD_MIN_SET_PRIORITY				B_LOWEST_ACTIVE_PRIORITY
36#define THREAD_MAX_SET_PRIORITY				B_REAL_TIME_PRIORITY
37
38enum team_state {
39	TEAM_STATE_NORMAL,		// normal state
40	TEAM_STATE_BIRTH,		// being constructed
41	TEAM_STATE_SHUTDOWN,	// still lives, but is going down
42	TEAM_STATE_DEATH		// only the Team object still exists, threads are
43							// gone
44};
45
46#define	TEAM_FLAG_EXEC_DONE	0x01
47
48typedef enum job_control_state {
49	JOB_CONTROL_STATE_NONE,
50	JOB_CONTROL_STATE_STOPPED,
51	JOB_CONTROL_STATE_CONTINUED,
52	JOB_CONTROL_STATE_DEAD
53} job_control_state;
54
55
56struct cpu_ent;
57struct image;					// defined in image.c
58struct io_context;
59struct realtime_sem_context;	// defined in realtime_sem.cpp
60struct select_info;
61struct user_thread;				// defined in libroot/user_thread.h
62struct VMAddressSpace;
63struct xsi_sem_context;			// defined in xsi_semaphore.cpp
64
65namespace Scheduler {
66	struct ThreadData;
67}
68
69namespace BKernel {
70	struct Team;
71	struct Thread;
72	struct ProcessGroup;
73}
74
75
76struct thread_death_entry {
77	struct list_link	link;
78	thread_id			thread;
79	status_t			status;
80};
81
82struct team_loading_info {
83	Thread*				thread;	// the waiting thread
84	status_t			result;		// the result of the loading
85	bool				done;		// set when loading is done/aborted
86};
87
88struct team_watcher {
89	struct list_link	link;
90	void				(*hook)(team_id team, void *data);
91	void				*data;
92};
93
94
95#define MAX_DEAD_CHILDREN	32
96	// this is a soft limit for the number of child death entries in a team
97#define MAX_DEAD_THREADS	32
98	// this is a soft limit for the number of thread death entries in a team
99
100
101struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
102	job_control_state	state;		// current team job control state
103	thread_id			thread;		// main thread ID == team ID
104	uint16				signal;		// signal causing the current state
105	bool				has_group_ref;
106	uid_t				signaling_user;
107
108	// valid while state != JOB_CONTROL_STATE_DEAD
109	BKernel::Team*		team;
110
111	// valid when state == JOB_CONTROL_STATE_DEAD
112	pid_t				group_id;
113	status_t			status;
114	uint16				reason;		// reason for the team's demise, one of the
115									// CLD_* values defined in <signal.h>
116
117	job_control_entry();
118	~job_control_entry();
119
120	void InitDeadState();
121
122	job_control_entry& operator=(const job_control_entry& other);
123};
124
125typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
126
127struct team_job_control_children {
128	JobControlEntryList		entries;
129};
130
131struct team_dead_children : team_job_control_children {
132	ConditionVariable	condition_variable;
133	uint32				count;
134	bigtime_t			kernel_time;
135	bigtime_t			user_time;
136};
137
138
139struct team_death_entry {
140	int32				remaining_threads;
141	ConditionVariable	condition;
142};
143
144
145struct free_user_thread {
146	struct free_user_thread*	next;
147	struct user_thread*			thread;
148};
149
150
151class AssociatedDataOwner;
152
153class AssociatedData : public BReferenceable,
154	public DoublyLinkedListLinkImpl<AssociatedData> {
155public:
156								AssociatedData();
157	virtual						~AssociatedData();
158
159			AssociatedDataOwner* Owner() const
160									{ return fOwner; }
161			void				SetOwner(AssociatedDataOwner* owner)
162									{ fOwner = owner; }
163
164	virtual	void				OwnerDeleted(AssociatedDataOwner* owner);
165
166private:
167			AssociatedDataOwner* fOwner;
168};
169
170
171class AssociatedDataOwner {
172public:
173								AssociatedDataOwner();
174								~AssociatedDataOwner();
175
176			bool				AddData(AssociatedData* data);
177			bool				RemoveData(AssociatedData* data);
178
179			void				PrepareForDeletion();
180
181private:
182			typedef DoublyLinkedList<AssociatedData> DataList;
183
184private:
185
186			mutex				fLock;
187			DataList			fList;
188};
189
190
191typedef int32 (*thread_entry_func)(thread_func, void *);
192
193
194namespace BKernel {
195
196
197template<typename IDType>
198struct TeamThreadIteratorEntry
199	: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
200	typedef IDType	id_type;
201	typedef TeamThreadIteratorEntry<id_type> iterator_type;
202
203	id_type	id;			// -1 for iterator entries, >= 0 for actual elements
204	bool	visible;	// the entry is publicly visible
205};
206
207
208struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
209		AssociatedDataOwner {
210	DoublyLinkedListLink<Team>	global_list_link;
211	Team			*hash_next;		// next in hash
212	Team			*siblings_next;	// next in parent's list; protected by
213									// parent's fLock
214	Team			*parent;		// write-protected by both parent (if any)
215									// and this team's fLock
216	Team			*children;		// protected by this team's fLock;
217									// adding/removing a child also requires the
218									// child's fLock
219	Team			*group_next;	// protected by the group's lock
220
221	int64			serial_number;	// immutable after adding team to hash
222
223	// process group info -- write-protected by both the group's lock, the
224	// team's lock, and the team's parent's lock
225	pid_t			group_id;
226	pid_t			session_id;
227	ProcessGroup	*group;
228
229	int				num_threads;	// number of threads in this team
230	int				state;			// current team state, see above
231	int32			flags;
232	struct io_context *io_context;
233	struct realtime_sem_context	*realtime_sem_context;
234	struct xsi_sem_context *xsi_sem_context;
235	struct team_death_entry *death_entry;	// protected by fLock
236	struct list		dead_threads;
237	int				dead_threads_count;
238
239	// protected by the team's fLock
240	team_dead_children dead_children;
241	team_job_control_children stopped_children;
242	team_job_control_children continued_children;
243
244	// protected by the parent team's fLock
245	struct job_control_entry* job_control_entry;
246
247	VMAddressSpace	*address_space;
248	Thread			*main_thread;	// protected by fLock, immutable
249									// after first set
250	Thread			*thread_list;	// protected by fLock, signal_lock and
251									// gThreadCreationLock
252	struct team_loading_info *loading_info;	// protected by fLock
253	struct list		image_list;		// protected by sImageMutex
254	struct list		watcher_list;
255	struct list		sem_list;		// protected by sSemsSpinlock
256	struct list		port_list;		// protected by sPortsLock
257	struct arch_team arch_info;
258
259	addr_t			user_data;
260	area_id			user_data_area;
261	size_t			user_data_size;
262	size_t			used_user_data;
263	struct free_user_thread* free_user_threads;
264
265	void*			commpage_address;
266
267	struct team_debug_info debug_info;
268
269	// protected by time_lock
270	bigtime_t		dead_threads_kernel_time;
271	bigtime_t		dead_threads_user_time;
272	bigtime_t		cpu_clock_offset;
273	spinlock		time_lock;
274
275	// user group information; protected by fLock
276	uid_t			saved_set_uid;
277	uid_t			real_uid;
278	uid_t			effective_uid;
279	gid_t			saved_set_gid;
280	gid_t			real_gid;
281	gid_t			effective_gid;
282	gid_t*			supplementary_groups;
283	int				supplementary_group_count;
284
285	// Exit status information. Set when the first terminal event occurs,
286	// immutable afterwards. Protected by fLock.
287	struct {
288		uint16		reason;			// reason for the team's demise, one of the
289									// CLD_* values defined in <signal.h>
290		uint16		signal;			// signal killing the team
291		uid_t		signaling_user;	// real UID of the signal sender
292		status_t	status;			// exit status, if normal team exit
293		bool		initialized;	// true when the state has been initialized
294	} exit;
295
296	spinlock		signal_lock;
297
298public:
299								~Team();
300
301	static	Team*				Create(team_id id, const char* name,
302									bool kernel);
303	static	Team*				Get(team_id id);
304	static	Team*				GetAndLock(team_id id);
305
306			bool				Lock()
307									{ mutex_lock(&fLock); return true; }
308			bool				TryLock()
309									{ return mutex_trylock(&fLock) == B_OK; }
310			void				Unlock()
311									{ mutex_unlock(&fLock); }
312
313			void				UnlockAndReleaseReference()
314									{ Unlock(); ReleaseReference(); }
315
316			void				LockTeamAndParent(bool dontLockParentIfKernel);
317			void				UnlockTeamAndParent();
318			void				LockTeamAndProcessGroup();
319			void				UnlockTeamAndProcessGroup();
320			void				LockTeamParentAndProcessGroup();
321			void				UnlockTeamParentAndProcessGroup();
322			void				LockProcessGroup()
323									{ LockTeamAndProcessGroup(); Unlock(); }
324
325			const char*			Name() const	{ return fName; }
326			void				SetName(const char* name);
327
328			const char*			Args() const	{ return fArgs; }
329			void				SetArgs(const char* args);
330			void				SetArgs(const char* path,
331									const char* const* otherArgs,
332									int otherArgCount);
333
334			BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
335									{ return fQueuedSignalsCounter; }
336			sigset_t			PendingSignals() const
337									{ return fPendingSignals.AllSignals(); }
338
339			void				AddPendingSignal(int signal)
340									{ fPendingSignals.AddSignal(signal); }
341			void				AddPendingSignal(Signal* signal)
342									{ fPendingSignals.AddSignal(signal); }
343			void				RemovePendingSignal(int signal)
344									{ fPendingSignals.RemoveSignal(signal); }
345			void				RemovePendingSignal(Signal* signal)
346									{ fPendingSignals.RemoveSignal(signal); }
347			void				RemovePendingSignals(sigset_t mask)
348									{ fPendingSignals.RemoveSignals(mask); }
349			void				ResetSignalsOnExec();
350
351	inline	int32				HighestPendingSignalPriority(
352									sigset_t nonBlocked) const;
353	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
354									Signal& buffer);
355
356			struct sigaction&	SignalActionFor(int32 signal)
357									{ return fSignalActions[signal - 1]; }
358			void				InheritSignalActions(Team* parent);
359
360			// user timers -- protected by fLock
361			UserTimer*			UserTimerFor(int32 id) const
362									{ return fUserTimers.TimerFor(id); }
363			status_t			AddUserTimer(UserTimer* timer);
364			void				RemoveUserTimer(UserTimer* timer);
365			void				DeleteUserTimers(bool userDefinedOnly);
366
367			bool				CheckAddUserDefinedTimer();
368			void				UserDefinedTimersRemoved(int32 count);
369
370			void				UserTimerActivated(TeamTimeUserTimer* timer)
371									{ fCPUTimeUserTimers.Add(timer); }
372			void				UserTimerActivated(TeamUserTimeUserTimer* timer)
373									{ fUserTimeUserTimers.Add(timer); }
374			void				UserTimerDeactivated(TeamTimeUserTimer* timer)
375									{ fCPUTimeUserTimers.Remove(timer); }
376			void				UserTimerDeactivated(
377									TeamUserTimeUserTimer* timer)
378									{ fUserTimeUserTimers.Remove(timer); }
379			void				DeactivateCPUTimeUserTimers();
380									// both total and user CPU timers
381			bool				HasActiveCPUTimeUserTimers() const
382									{ return !fCPUTimeUserTimers.IsEmpty(); }
383			bool				HasActiveUserTimeUserTimers() const
384									{ return !fUserTimeUserTimers.IsEmpty(); }
385			TeamTimeUserTimerList::ConstIterator
386									CPUTimeUserTimerIterator() const
387									{ return fCPUTimeUserTimers.GetIterator(); }
388	inline	TeamUserTimeUserTimerList::ConstIterator
389									UserTimeUserTimerIterator() const;
390
391			bigtime_t			CPUTime(bool ignoreCurrentRun,
392									Thread* lockedThread = NULL) const;
393			bigtime_t			UserCPUTime() const;
394
395private:
396								Team(team_id id, bool kernel);
397
398private:
399			mutex				fLock;
400			char				fName[B_OS_NAME_LENGTH];
401			char				fArgs[64];
402									// contents for the team_info::args field
403
404			BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
405			BKernel::PendingSignals	fPendingSignals;
406									// protected by signal_lock
407			struct sigaction 	fSignalActions[MAX_SIGNAL_NUMBER];
408									// indexed signal - 1, protected by fLock
409
410			UserTimerList		fUserTimers;			// protected by fLock
411			TeamTimeUserTimerList fCPUTimeUserTimers;
412									// protected by scheduler lock
413			TeamUserTimeUserTimerList fUserTimeUserTimers;
414			int32				fUserDefinedTimerCount;	// accessed atomically
415};
416
417
418struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
419	int32			flags;			// summary of events relevant in interrupt
420									// handlers (signals pending, user debugging
421									// enabled, etc.)
422	int64			serial_number;	// immutable after adding thread to hash
423	Thread			*hash_next;		// protected by thread hash lock
424	Thread			*team_next;		// protected by team lock and fLock
425	char			name[B_OS_NAME_LENGTH];	// protected by fLock
426	bool			going_to_suspend;	// protected by scheduler lock
427	int32			priority;		// protected by scheduler lock
428	int32			io_priority;	// protected by fLock
429	int32			state;			// protected by scheduler lock
430	struct cpu_ent	*cpu;			// protected by scheduler lock
431	struct cpu_ent	*previous_cpu;	// protected by scheduler lock
432	int32			pinned_to_cpu;	// only accessed by this thread or in the
433									// scheduler, when thread is not running
434	spinlock		scheduler_lock;
435
436	sigset_t		sig_block_mask;	// protected by team->signal_lock,
437									// only modified by the thread itself
438	sigset_t		sigsuspend_original_unblocked_mask;
439		// non-0 after a return from _user_sigsuspend(), containing the inverted
440		// original signal mask, reset in handle_signals(); only accessed by
441		// this thread
442	ucontext_t*		user_signal_context;	// only accessed by this thread
443	addr_t			signal_stack_base;		// only accessed by this thread
444	size_t			signal_stack_size;		// only accessed by this thread
445	bool			signal_stack_enabled;	// only accessed by this thread
446
447	bool			in_kernel;		// protected by time_lock, only written by
448									// this thread
449	bool			has_yielded;	// protected by scheduler lock
450	Scheduler::ThreadData*	scheduler_data; // protected by scheduler lock
451
452	struct user_thread*	user_thread;	// write-protected by fLock, only
453										// modified by the thread itself and
454										// thus freely readable by it
455
456	void 			(*cancel_function)(int);
457
458	struct {
459		uint8		parameters[SYSCALL_RESTART_PARAMETER_SIZE];
460	} syscall_restart;
461
462	struct {
463		status_t	status;				// current wait status
464		uint32		flags;				// interrupable flags
465		uint32		type;				// type of the object waited on
466		const void*	object;				// pointer to the object waited on
467		timer		unblock_timer;		// timer for block with timeout
468	} wait;
469
470	struct PrivateConditionVariableEntry *condition_variable_entry;
471
472	struct {
473		sem_id		write_sem;	// acquired by writers before writing
474		sem_id		read_sem;	// release by writers after writing, acquired
475								// by this thread when reading
476		thread_id	sender;
477		int32		code;
478		size_t		size;
479		void*		buffer;
480	} msg;	// write_sem/read_sem are protected by fLock when accessed by
481			// others, the other fields are protected by write_sem/read_sem
482
483	void			(*fault_handler)(void);
484	jmp_buf			fault_handler_state;
485	int32			page_faults_allowed;
486		/* this field may only stay in debug builds in the future */
487
488	BKernel::Team	*team;	// protected by team lock, thread lock, scheduler
489							// lock, team_lock
490	rw_spinlock		team_lock;
491
492	struct {
493		sem_id		sem;		// immutable after thread creation
494		status_t	status;		// accessed only by this thread
495		struct list	waiters;	// protected by fLock
496	} exit;
497
498	struct select_info *select_infos;	// protected by fLock
499
500	struct thread_debug_info debug_info;
501
502	// stack
503	area_id			kernel_stack_area;	// immutable after thread creation
504	addr_t			kernel_stack_base;	// immutable after thread creation
505	addr_t			kernel_stack_top;	// immutable after thread creation
506	area_id			user_stack_area;	// protected by thread lock
507	addr_t			user_stack_base;	// protected by thread lock
508	size_t			user_stack_size;	// protected by thread lock
509
510	addr_t			user_local_storage;
511		// usually allocated at the safe side of the stack
512	int				kernel_errno;
513		// kernel "errno" differs from its userspace alter ego
514
515	// user_time, kernel_time, and last_time are only written by the thread
516	// itself, so they can be read by the thread without lock. Holding the
517	// scheduler lock and checking that the thread does not run also guarantees
518	// that the times will not change.
519	spinlock		time_lock;
520	bigtime_t		user_time;			// protected by time_lock
521	bigtime_t		kernel_time;		// protected by time_lock
522	bigtime_t		last_time;			// protected by time_lock
523	bigtime_t		cpu_clock_offset;	// protected by time_lock
524
525	void			(*post_interrupt_callback)(void*);
526	void*			post_interrupt_data;
527
528	// architecture dependent section
529	struct arch_thread arch_info;
530
531public:
532								Thread() {}
533									// dummy for the idle threads
534								Thread(const char *name, thread_id threadID,
535									struct cpu_ent *cpu);
536								~Thread();
537
538	static	status_t			Create(const char* name, Thread*& _thread);
539
540	static	Thread*				Get(thread_id id);
541	static	Thread*				GetAndLock(thread_id id);
542	static	Thread*				GetDebug(thread_id id);
543									// in kernel debugger only
544
545	static	bool				IsAlive(thread_id id);
546
547			void*				operator new(size_t size);
548			void*				operator new(size_t, void* pointer);
549			void				operator delete(void* pointer, size_t size);
550
551			status_t			Init(bool idleThread);
552
553			bool				Lock()
554									{ mutex_lock(&fLock); return true; }
555			bool				TryLock()
556									{ return mutex_trylock(&fLock) == B_OK; }
557			void				Unlock()
558									{ mutex_unlock(&fLock); }
559
560			void				UnlockAndReleaseReference()
561									{ Unlock(); ReleaseReference(); }
562
563			bool				IsAlive() const;
564
565			bool				IsRunning() const
566									{ return cpu != NULL; }
567									// scheduler lock must be held
568
569			sigset_t			ThreadPendingSignals() const
570									{ return fPendingSignals.AllSignals(); }
571	inline	sigset_t			AllPendingSignals() const;
572			void				AddPendingSignal(int signal)
573									{ fPendingSignals.AddSignal(signal); }
574			void				AddPendingSignal(Signal* signal)
575									{ fPendingSignals.AddSignal(signal); }
576			void				RemovePendingSignal(int signal)
577									{ fPendingSignals.RemoveSignal(signal); }
578			void				RemovePendingSignal(Signal* signal)
579									{ fPendingSignals.RemoveSignal(signal); }
580			void				RemovePendingSignals(sigset_t mask)
581									{ fPendingSignals.RemoveSignals(mask); }
582			void				ResetSignalsOnExec();
583
584	inline	int32				HighestPendingSignalPriority(
585									sigset_t nonBlocked) const;
586	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
587									Signal& buffer);
588
589			// user timers -- protected by fLock
590			UserTimer*			UserTimerFor(int32 id) const
591									{ return fUserTimers.TimerFor(id); }
592			status_t			AddUserTimer(UserTimer* timer);
593			void				RemoveUserTimer(UserTimer* timer);
594			void				DeleteUserTimers(bool userDefinedOnly);
595
596			void				UserTimerActivated(ThreadTimeUserTimer* timer)
597									{ fCPUTimeUserTimers.Add(timer); }
598			void				UserTimerDeactivated(ThreadTimeUserTimer* timer)
599									{ fCPUTimeUserTimers.Remove(timer); }
600			void				DeactivateCPUTimeUserTimers();
601			bool				HasActiveCPUTimeUserTimers() const
602									{ return !fCPUTimeUserTimers.IsEmpty(); }
603			ThreadTimeUserTimerList::ConstIterator
604									CPUTimeUserTimerIterator() const
605									{ return fCPUTimeUserTimers.GetIterator(); }
606
607	inline	bigtime_t			CPUTime(bool ignoreCurrentRun) const;
608
609private:
610			mutex				fLock;
611
612			BKernel::PendingSignals	fPendingSignals;
613									// protected by team->signal_lock
614
615			UserTimerList		fUserTimers;			// protected by fLock
616			ThreadTimeUserTimerList fCPUTimeUserTimers;
617									// protected by time_lock
618};
619
620
621struct ProcessSession : BReferenceable {
622	pid_t				id;
623	int32				controlling_tty;	// index of the controlling tty,
624											// -1 if none
625	pid_t				foreground_group;
626
627public:
628								ProcessSession(pid_t id);
629								~ProcessSession();
630
631			bool				Lock()
632									{ mutex_lock(&fLock); return true; }
633			bool				TryLock()
634									{ return mutex_trylock(&fLock) == B_OK; }
635			void				Unlock()
636									{ mutex_unlock(&fLock); }
637
638private:
639			mutex				fLock;
640};
641
642
643struct ProcessGroup : KernelReferenceable {
644	struct ProcessGroup *next;		// next in hash
645	pid_t				id;
646	BKernel::Team		*teams;
647
648public:
649								ProcessGroup(pid_t id);
650								~ProcessGroup();
651
652	static	ProcessGroup*		Get(pid_t id);
653
654			bool				Lock()
655									{ mutex_lock(&fLock); return true; }
656			bool				TryLock()
657									{ return mutex_trylock(&fLock) == B_OK; }
658			void				Unlock()
659									{ mutex_unlock(&fLock); }
660
661			ProcessSession*		Session() const
662									{ return fSession; }
663			void				Publish(ProcessSession* session);
664			void				PublishLocked(ProcessSession* session);
665
666			bool				IsOrphaned() const;
667
668			void				ScheduleOrphanedCheck();
669			void				UnsetOrphanedCheck();
670
671public:
672			SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
673
674private:
675			mutex				fLock;
676			ProcessSession*		fSession;
677			bool				fInOrphanedCheckList;	// protected by
678														// sOrphanedCheckLock
679};
680
681typedef SinglyLinkedList<ProcessGroup,
682	SinglyLinkedListMemberGetLink<ProcessGroup,
683		&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
684
685
686/*!	\brief Allows to iterate through all teams.
687*/
688struct TeamListIterator {
689								TeamListIterator();
690								~TeamListIterator();
691
692			Team*				Next();
693
694private:
695			TeamThreadIteratorEntry<team_id> fEntry;
696};
697
698
699/*!	\brief Allows to iterate through all threads.
700*/
701struct ThreadListIterator {
702								ThreadListIterator();
703								~ThreadListIterator();
704
705			Thread*				Next();
706
707private:
708			TeamThreadIteratorEntry<thread_id> fEntry;
709};
710
711
712inline int32
713Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
714{
715	return fPendingSignals.HighestSignalPriority(nonBlocked);
716}
717
718
719inline Signal*
720Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
721{
722	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
723}
724
725
726inline TeamUserTimeUserTimerList::ConstIterator
727Team::UserTimeUserTimerIterator() const
728{
729	return fUserTimeUserTimers.GetIterator();
730}
731
732
733inline sigset_t
734Thread::AllPendingSignals() const
735{
736	return fPendingSignals.AllSignals() | team->PendingSignals();
737}
738
739
740inline int32
741Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
742{
743	return fPendingSignals.HighestSignalPriority(nonBlocked);
744}
745
746
747inline Signal*
748Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
749{
750	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
751}
752
753
754/*!	Returns the thread's current total CPU time (kernel + user + offset).
755
756	The caller must hold \c time_lock.
757
758	\param ignoreCurrentRun If \c true and the thread is currently running,
759		don't add the time since the last time \c last_time was updated. Should
760		be used in "thread unscheduled" scheduler callbacks, since although the
761		thread is still running at that time, its time has already been stopped.
762	\return The thread's current total CPU time.
763*/
764inline bigtime_t
765Thread::CPUTime(bool ignoreCurrentRun) const
766{
767	bigtime_t time = user_time + kernel_time + cpu_clock_offset;
768
769	// If currently running, also add the time since the last check, unless
770	// requested otherwise.
771	if (!ignoreCurrentRun && last_time != 0)
772		time += system_time() - last_time;
773
774	return time;
775}
776
777
778}	// namespace BKernel
779
780using BKernel::Team;
781using BKernel::TeamListIterator;
782using BKernel::Thread;
783using BKernel::ThreadListIterator;
784using BKernel::ProcessSession;
785using BKernel::ProcessGroup;
786using BKernel::ProcessGroupList;
787
788
789#endif	// !_ASSEMBLER
790
791
792// bits for the thread::flags field
793#define	THREAD_FLAGS_SIGNALS_PENDING		0x0001
794	// unblocked signals are pending (computed flag for optimization purposes)
795#define	THREAD_FLAGS_DEBUG_THREAD			0x0002
796	// forces the thread into the debugger as soon as possible (set by
797	// debug_thread())
798#define	THREAD_FLAGS_SINGLE_STEP			0x0004
799	// indicates that the thread is in single-step mode (in userland)
800#define	THREAD_FLAGS_DEBUGGER_INSTALLED		0x0008
801	// a debugger is installed for the current team (computed flag for
802	// optimization purposes)
803#define	THREAD_FLAGS_BREAKPOINTS_DEFINED	0x0010
804	// hardware breakpoints are defined for the current team (computed flag for
805	// optimization purposes)
806#define	THREAD_FLAGS_BREAKPOINTS_INSTALLED	0x0020
807	// breakpoints are currently installed for the thread (i.e. the hardware is
808	// actually set up to trigger debug events for them)
809#define	THREAD_FLAGS_64_BIT_SYSCALL_RETURN	0x0040
810	// set by 64 bit return value syscalls
811#define	THREAD_FLAGS_RESTART_SYSCALL		0x0080
812	// set by handle_signals(), if the current syscall shall be restarted
813#define	THREAD_FLAGS_DONT_RESTART_SYSCALL	0x0100
814	// explicitly disables automatic syscall restarts (e.g. resume_thread())
815#define	THREAD_FLAGS_ALWAYS_RESTART_SYSCALL	0x0200
816	// force syscall restart, even if a signal handler without SA_RESTART was
817	// invoked (e.g. sigwait())
818#define	THREAD_FLAGS_SYSCALL_RESTARTED		0x0400
819	// the current syscall has been restarted
820#define	THREAD_FLAGS_SYSCALL				0x0800
821	// the thread is currently in a syscall; set/reset only for certain
822	// functions (e.g. ioctl()) to allow inner functions to discriminate
823	// whether e.g. parameters were passed from userland or kernel
824
825
826#endif	/* _KERNEL_THREAD_TYPES_H */
827