thread_types.h revision 6ddf93bf
1/*
2 * Copyright 2004-2011, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
4 *
5 * Thread definition and structures
6 */
7#ifndef _KERNEL_THREAD_TYPES_H
8#define _KERNEL_THREAD_TYPES_H
9
10
11#ifndef _ASSEMBLER
12
13#include <pthread.h>
14
15#include <arch/thread_types.h>
16#include <condition_variable.h>
17#include <heap.h>
18#include <ksignal.h>
19#include <lock.h>
20#include <smp.h>
21#include <thread_defs.h>
22#include <timer.h>
23#include <UserTimer.h>
24#include <user_debugger.h>
25#include <util/DoublyLinkedList.h>
26#include <util/KernelReferenceable.h>
27#include <util/list.h>
28#include <sys/ucontext.h>
29
30
31enum additional_thread_state {
32	THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
33//	THREAD_STATE_BIRTH	// thread is being created
34};
35
36#define THREAD_MIN_SET_PRIORITY				B_LOWEST_ACTIVE_PRIORITY
37#define THREAD_MAX_SET_PRIORITY				B_REAL_TIME_PRIORITY
38
39enum team_state {
40	TEAM_STATE_NORMAL,		// normal state
41	TEAM_STATE_BIRTH,		// being constructed
42	TEAM_STATE_SHUTDOWN,	// still lives, but is going down
43	TEAM_STATE_DEATH		// only the Team object still exists, threads are
44							// gone
45};
46
47#define	TEAM_FLAG_EXEC_DONE	0x01
48
49typedef enum job_control_state {
50	JOB_CONTROL_STATE_NONE,
51	JOB_CONTROL_STATE_STOPPED,
52	JOB_CONTROL_STATE_CONTINUED,
53	JOB_CONTROL_STATE_DEAD
54} job_control_state;
55
56
57struct cpu_ent;
58struct image;					// defined in image.c
59struct io_context;
60struct realtime_sem_context;	// defined in realtime_sem.cpp
61struct select_info;
62struct user_thread;				// defined in libroot/user_thread.h
63struct VMAddressSpace;
64struct xsi_sem_context;			// defined in xsi_semaphore.cpp
65
66namespace Scheduler {
67	struct ThreadData;
68}
69
70namespace BKernel {
71	struct Team;
72	struct Thread;
73	struct ProcessGroup;
74}
75
76
77struct thread_death_entry {
78	struct list_link	link;
79	thread_id			thread;
80	status_t			status;
81};
82
83struct team_loading_info {
84	Thread*				thread;	// the waiting thread
85	status_t			result;		// the result of the loading
86	bool				done;		// set when loading is done/aborted
87};
88
89struct team_watcher {
90	struct list_link	link;
91	void				(*hook)(team_id team, void *data);
92	void				*data;
93};
94
95
96#define MAX_DEAD_CHILDREN	32
97	// this is a soft limit for the number of child death entries in a team
98#define MAX_DEAD_THREADS	32
99	// this is a soft limit for the number of thread death entries in a team
100
101
102struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
103	job_control_state	state;		// current team job control state
104	thread_id			thread;		// main thread ID == team ID
105	uint16				signal;		// signal causing the current state
106	bool				has_group_ref;
107	uid_t				signaling_user;
108
109	// valid while state != JOB_CONTROL_STATE_DEAD
110	BKernel::Team*		team;
111
112	// valid when state == JOB_CONTROL_STATE_DEAD
113	pid_t				group_id;
114	status_t			status;
115	uint16				reason;		// reason for the team's demise, one of the
116									// CLD_* values defined in <signal.h>
117
118	job_control_entry();
119	~job_control_entry();
120
121	void InitDeadState();
122
123	job_control_entry& operator=(const job_control_entry& other);
124};
125
126typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
127
128struct team_job_control_children {
129	JobControlEntryList		entries;
130};
131
132struct team_dead_children : team_job_control_children {
133	ConditionVariable	condition_variable;
134	uint32				count;
135	bigtime_t			kernel_time;
136	bigtime_t			user_time;
137};
138
139
140struct team_death_entry {
141	int32				remaining_threads;
142	ConditionVariable	condition;
143};
144
145
146struct free_user_thread {
147	struct free_user_thread*	next;
148	struct user_thread*			thread;
149};
150
151
152class AssociatedDataOwner;
153
154class AssociatedData : public BReferenceable,
155	public DoublyLinkedListLinkImpl<AssociatedData> {
156public:
157								AssociatedData();
158	virtual						~AssociatedData();
159
160			AssociatedDataOwner* Owner() const
161									{ return fOwner; }
162			void				SetOwner(AssociatedDataOwner* owner)
163									{ fOwner = owner; }
164
165	virtual	void				OwnerDeleted(AssociatedDataOwner* owner);
166
167private:
168			AssociatedDataOwner* fOwner;
169};
170
171
172class AssociatedDataOwner {
173public:
174								AssociatedDataOwner();
175								~AssociatedDataOwner();
176
177			bool				AddData(AssociatedData* data);
178			bool				RemoveData(AssociatedData* data);
179
180			void				PrepareForDeletion();
181
182private:
183			typedef DoublyLinkedList<AssociatedData> DataList;
184
185private:
186
187			mutex				fLock;
188			DataList			fList;
189};
190
191
192typedef int32 (*thread_entry_func)(thread_func, void *);
193
194
195namespace BKernel {
196
197
198template<typename IDType>
199struct TeamThreadIteratorEntry
200	: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
201	typedef IDType	id_type;
202	typedef TeamThreadIteratorEntry<id_type> iterator_type;
203
204	id_type	id;			// -1 for iterator entries, >= 0 for actual elements
205	bool	visible;	// the entry is publicly visible
206};
207
208
209struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
210		AssociatedDataOwner {
211	DoublyLinkedListLink<Team>	global_list_link;
212	Team			*hash_next;		// next in hash
213	Team			*siblings_next;	// next in parent's list; protected by
214									// parent's fLock
215	Team			*parent;		// write-protected by both parent (if any)
216									// and this team's fLock
217	Team			*children;		// protected by this team's fLock;
218									// adding/removing a child also requires the
219									// child's fLock
220	Team			*group_next;	// protected by the group's lock
221
222	int64			serial_number;	// immutable after adding team to hash
223
224	// process group info -- write-protected by both the group's lock, the
225	// team's lock, and the team's parent's lock
226	pid_t			group_id;
227	pid_t			session_id;
228	ProcessGroup	*group;
229
230	int				num_threads;	// number of threads in this team
231	int				state;			// current team state, see above
232	int32			flags;
233	struct io_context *io_context;
234	struct realtime_sem_context	*realtime_sem_context;
235	struct xsi_sem_context *xsi_sem_context;
236	struct team_death_entry *death_entry;	// protected by fLock
237	struct list		dead_threads;
238	int				dead_threads_count;
239
240	// protected by the team's fLock
241	team_dead_children dead_children;
242	team_job_control_children stopped_children;
243	team_job_control_children continued_children;
244
245	// protected by the parent team's fLock
246	struct job_control_entry* job_control_entry;
247
248	VMAddressSpace	*address_space;
249	Thread			*main_thread;	// protected by fLock, immutable
250									// after first set
251	Thread			*thread_list;	// protected by fLock, signal_lock and
252									// gThreadCreationLock
253	struct team_loading_info *loading_info;	// protected by fLock
254	struct list		image_list;		// protected by sImageMutex
255	struct list		watcher_list;
256	struct list		sem_list;		// protected by sSemsSpinlock
257	struct list		port_list;		// protected by sPortsLock
258	struct arch_team arch_info;
259
260	addr_t			user_data;
261	area_id			user_data_area;
262	size_t			user_data_size;
263	size_t			used_user_data;
264	struct free_user_thread* free_user_threads;
265
266	void*			commpage_address;
267
268	struct team_debug_info debug_info;
269
270	// protected by time_lock
271	bigtime_t		dead_threads_kernel_time;
272	bigtime_t		dead_threads_user_time;
273	bigtime_t		cpu_clock_offset;
274	spinlock		time_lock;
275
276	// user group information; protected by fLock
277	uid_t			saved_set_uid;
278	uid_t			real_uid;
279	uid_t			effective_uid;
280	gid_t			saved_set_gid;
281	gid_t			real_gid;
282	gid_t			effective_gid;
283	gid_t*			supplementary_groups;
284	int				supplementary_group_count;
285
286	// Exit status information. Set when the first terminal event occurs,
287	// immutable afterwards. Protected by fLock.
288	struct {
289		uint16		reason;			// reason for the team's demise, one of the
290									// CLD_* values defined in <signal.h>
291		uint16		signal;			// signal killing the team
292		uid_t		signaling_user;	// real UID of the signal sender
293		status_t	status;			// exit status, if normal team exit
294		bool		initialized;	// true when the state has been initialized
295	} exit;
296
297	spinlock		signal_lock;
298
299public:
300								~Team();
301
302	static	Team*				Create(team_id id, const char* name,
303									bool kernel);
304	static	Team*				Get(team_id id);
305	static	Team*				GetAndLock(team_id id);
306
307			bool				Lock()
308									{ mutex_lock(&fLock); return true; }
309			bool				TryLock()
310									{ return mutex_trylock(&fLock) == B_OK; }
311			void				Unlock()
312									{ mutex_unlock(&fLock); }
313
314			void				UnlockAndReleaseReference()
315									{ Unlock(); ReleaseReference(); }
316
317			void				LockTeamAndParent(bool dontLockParentIfKernel);
318			void				UnlockTeamAndParent();
319			void				LockTeamAndProcessGroup();
320			void				UnlockTeamAndProcessGroup();
321			void				LockTeamParentAndProcessGroup();
322			void				UnlockTeamParentAndProcessGroup();
323			void				LockProcessGroup()
324									{ LockTeamAndProcessGroup(); Unlock(); }
325
326			const char*			Name() const	{ return fName; }
327			void				SetName(const char* name);
328
329			const char*			Args() const	{ return fArgs; }
330			void				SetArgs(const char* args);
331			void				SetArgs(const char* path,
332									const char* const* otherArgs,
333									int otherArgCount);
334
335			BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
336									{ return fQueuedSignalsCounter; }
337			sigset_t			PendingSignals() const
338									{ return fPendingSignals.AllSignals(); }
339
340			void				AddPendingSignal(int signal)
341									{ fPendingSignals.AddSignal(signal); }
342			void				AddPendingSignal(Signal* signal)
343									{ fPendingSignals.AddSignal(signal); }
344			void				RemovePendingSignal(int signal)
345									{ fPendingSignals.RemoveSignal(signal); }
346			void				RemovePendingSignal(Signal* signal)
347									{ fPendingSignals.RemoveSignal(signal); }
348			void				RemovePendingSignals(sigset_t mask)
349									{ fPendingSignals.RemoveSignals(mask); }
350			void				ResetSignalsOnExec();
351
352	inline	int32				HighestPendingSignalPriority(
353									sigset_t nonBlocked) const;
354	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
355									Signal& buffer);
356
357			struct sigaction&	SignalActionFor(int32 signal)
358									{ return fSignalActions[signal - 1]; }
359			void				InheritSignalActions(Team* parent);
360
361			// user timers -- protected by fLock
362			UserTimer*			UserTimerFor(int32 id) const
363									{ return fUserTimers.TimerFor(id); }
364			status_t			AddUserTimer(UserTimer* timer);
365			void				RemoveUserTimer(UserTimer* timer);
366			void				DeleteUserTimers(bool userDefinedOnly);
367
368			bool				CheckAddUserDefinedTimer();
369			void				UserDefinedTimersRemoved(int32 count);
370
371			void				UserTimerActivated(TeamTimeUserTimer* timer)
372									{ fCPUTimeUserTimers.Add(timer); }
373			void				UserTimerActivated(TeamUserTimeUserTimer* timer)
374									{ fUserTimeUserTimers.Add(timer); }
375			void				UserTimerDeactivated(TeamTimeUserTimer* timer)
376									{ fCPUTimeUserTimers.Remove(timer); }
377			void				UserTimerDeactivated(
378									TeamUserTimeUserTimer* timer)
379									{ fUserTimeUserTimers.Remove(timer); }
380			void				DeactivateCPUTimeUserTimers();
381									// both total and user CPU timers
382			bool				HasActiveCPUTimeUserTimers() const
383									{ return !fCPUTimeUserTimers.IsEmpty(); }
384			bool				HasActiveUserTimeUserTimers() const
385									{ return !fUserTimeUserTimers.IsEmpty(); }
386			TeamTimeUserTimerList::ConstIterator
387									CPUTimeUserTimerIterator() const
388									{ return fCPUTimeUserTimers.GetIterator(); }
389	inline	TeamUserTimeUserTimerList::ConstIterator
390									UserTimeUserTimerIterator() const;
391
392			bigtime_t			CPUTime(bool ignoreCurrentRun) const;
393			bigtime_t			UserCPUTime() const;
394
395private:
396								Team(team_id id, bool kernel);
397
398private:
399			mutex				fLock;
400			char				fName[B_OS_NAME_LENGTH];
401			char				fArgs[64];
402									// contents for the team_info::args field
403
404			BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
405			BKernel::PendingSignals	fPendingSignals;
406									// protected by signal_lock
407			struct sigaction 	fSignalActions[MAX_SIGNAL_NUMBER];
408									// indexed signal - 1, protected by fLock
409
410			UserTimerList		fUserTimers;			// protected by fLock
411			TeamTimeUserTimerList fCPUTimeUserTimers;
412									// protected by scheduler lock
413			TeamUserTimeUserTimerList fUserTimeUserTimers;
414			int32				fUserDefinedTimerCount;	// accessed atomically
415};
416
417
418struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
419	int32			flags;			// summary of events relevant in interrupt
420									// handlers (signals pending, user debugging
421									// enabled, etc.)
422	int64			serial_number;	// immutable after adding thread to hash
423	Thread			*hash_next;		// protected by thread hash lock
424	Thread			*team_next;		// protected by team lock and fLock
425	char			name[B_OS_NAME_LENGTH];	// protected by fLock
426	bool			going_to_suspend;	// protected by scheduler lock
427	int32			priority;		// protected by scheduler lock
428	int32			io_priority;	// protected by fLock
429	int32			state;			// protected by scheduler lock
430	struct cpu_ent	*cpu;			// protected by scheduler lock
431	struct cpu_ent	*previous_cpu;	// protected by scheduler lock
432	int32			pinned_to_cpu;	// only accessed by this thread or in the
433									// scheduler, when thread is not running
434	spinlock		scheduler_lock;
435
436	sigset_t		sig_block_mask;	// protected by team->signal_lock,
437									// only modified by the thread itself
438	sigset_t		sigsuspend_original_unblocked_mask;
439		// non-0 after a return from _user_sigsuspend(), containing the inverted
440		// original signal mask, reset in handle_signals(); only accessed by
441		// this thread
442	ucontext_t*		user_signal_context;	// only accessed by this thread
443	addr_t			signal_stack_base;		// only accessed by this thread
444	size_t			signal_stack_size;		// only accessed by this thread
445	bool			signal_stack_enabled;	// only accessed by this thread
446
447	bool			in_kernel;		// protected by time_lock, only written by
448									// this thread
449	bool			has_yielded;	// protected by scheduler lock
450	Scheduler::ThreadData*	scheduler_data; // protected by scheduler lock
451
452	struct user_thread*	user_thread;	// write-protected by fLock, only
453										// modified by the thread itself and
454										// thus freely readable by it
455
456	void 			(*cancel_function)(int);
457
458	struct {
459		uint8		parameters[SYSCALL_RESTART_PARAMETER_SIZE];
460	} syscall_restart;
461
462	struct {
463		status_t	status;				// current wait status
464		uint32		flags;				// interrupable flags
465		uint32		type;				// type of the object waited on
466		const void*	object;				// pointer to the object waited on
467		timer		unblock_timer;		// timer for block with timeout
468	} wait;
469
470	struct PrivateConditionVariableEntry *condition_variable_entry;
471
472	struct {
473		sem_id		write_sem;	// acquired by writers before writing
474		sem_id		read_sem;	// release by writers after writing, acquired
475								// by this thread when reading
476		thread_id	sender;
477		int32		code;
478		size_t		size;
479		void*		buffer;
480	} msg;	// write_sem/read_sem are protected by fLock when accessed by
481			// others, the other fields are protected by write_sem/read_sem
482
483	addr_t			fault_handler;
484	int32			page_faults_allowed;
485		/* this field may only stay in debug builds in the future */
486
487	BKernel::Team	*team;	// protected by team lock, thread lock, scheduler
488							// lock, team_lock
489	rw_spinlock		team_lock;
490
491	struct {
492		sem_id		sem;		// immutable after thread creation
493		status_t	status;		// accessed only by this thread
494		struct list	waiters;	// protected by fLock
495	} exit;
496
497	struct select_info *select_infos;	// protected by fLock
498
499	struct thread_debug_info debug_info;
500
501	// stack
502	area_id			kernel_stack_area;	// immutable after thread creation
503	addr_t			kernel_stack_base;	// immutable after thread creation
504	addr_t			kernel_stack_top;	// immutable after thread creation
505	area_id			user_stack_area;	// protected by thread lock
506	addr_t			user_stack_base;	// protected by thread lock
507	size_t			user_stack_size;	// protected by thread lock
508
509	addr_t			user_local_storage;
510		// usually allocated at the safe side of the stack
511	int				kernel_errno;
512		// kernel "errno" differs from its userspace alter ego
513
514	// user_time, kernel_time, and last_time are only written by the thread
515	// itself, so they can be read by the thread without lock. Holding the
516	// scheduler lock and checking that the thread does not run also guarantees
517	// that the times will not change.
518	spinlock		time_lock;
519	bigtime_t		user_time;			// protected by time_lock
520	bigtime_t		kernel_time;		// protected by time_lock
521	bigtime_t		last_time;			// protected by time_lock
522	bigtime_t		cpu_clock_offset;	// protected by time_lock
523
524	void			(*post_interrupt_callback)(void*);
525	void*			post_interrupt_data;
526
527	// architecture dependent section
528	struct arch_thread arch_info;
529
530public:
531								Thread() {}
532									// dummy for the idle threads
533								Thread(const char *name, thread_id threadID,
534									struct cpu_ent *cpu);
535								~Thread();
536
537	static	status_t			Create(const char* name, Thread*& _thread);
538
539	static	Thread*				Get(thread_id id);
540	static	Thread*				GetAndLock(thread_id id);
541	static	Thread*				GetDebug(thread_id id);
542									// in kernel debugger only
543
544	static	bool				IsAlive(thread_id id);
545
546			void*				operator new(size_t size);
547			void*				operator new(size_t, void* pointer);
548			void				operator delete(void* pointer, size_t size);
549
550			status_t			Init(bool idleThread);
551
552			bool				Lock()
553									{ mutex_lock(&fLock); return true; }
554			bool				TryLock()
555									{ return mutex_trylock(&fLock) == B_OK; }
556			void				Unlock()
557									{ mutex_unlock(&fLock); }
558
559			void				UnlockAndReleaseReference()
560									{ Unlock(); ReleaseReference(); }
561
562			bool				IsAlive() const;
563
564			bool				IsRunning() const
565									{ return cpu != NULL; }
566									// scheduler lock must be held
567
568			sigset_t			ThreadPendingSignals() const
569									{ return fPendingSignals.AllSignals(); }
570	inline	sigset_t			AllPendingSignals() const;
571			void				AddPendingSignal(int signal)
572									{ fPendingSignals.AddSignal(signal); }
573			void				AddPendingSignal(Signal* signal)
574									{ fPendingSignals.AddSignal(signal); }
575			void				RemovePendingSignal(int signal)
576									{ fPendingSignals.RemoveSignal(signal); }
577			void				RemovePendingSignal(Signal* signal)
578									{ fPendingSignals.RemoveSignal(signal); }
579			void				RemovePendingSignals(sigset_t mask)
580									{ fPendingSignals.RemoveSignals(mask); }
581			void				ResetSignalsOnExec();
582
583	inline	int32				HighestPendingSignalPriority(
584									sigset_t nonBlocked) const;
585	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
586									Signal& buffer);
587
588			// user timers -- protected by fLock
589			UserTimer*			UserTimerFor(int32 id) const
590									{ return fUserTimers.TimerFor(id); }
591			status_t			AddUserTimer(UserTimer* timer);
592			void				RemoveUserTimer(UserTimer* timer);
593			void				DeleteUserTimers(bool userDefinedOnly);
594
595			void				UserTimerActivated(ThreadTimeUserTimer* timer)
596									{ fCPUTimeUserTimers.Add(timer); }
597			void				UserTimerDeactivated(ThreadTimeUserTimer* timer)
598									{ fCPUTimeUserTimers.Remove(timer); }
599			void				DeactivateCPUTimeUserTimers();
600			bool				HasActiveCPUTimeUserTimers() const
601									{ return !fCPUTimeUserTimers.IsEmpty(); }
602			ThreadTimeUserTimerList::ConstIterator
603									CPUTimeUserTimerIterator() const
604									{ return fCPUTimeUserTimers.GetIterator(); }
605
606	inline	bigtime_t			CPUTime(bool ignoreCurrentRun) const;
607
608private:
609			mutex				fLock;
610
611			BKernel::PendingSignals	fPendingSignals;
612									// protected by team->signal_lock
613
614			UserTimerList		fUserTimers;			// protected by fLock
615			ThreadTimeUserTimerList fCPUTimeUserTimers;
616									// protected by time_lock
617};
618
619
620struct ProcessSession : BReferenceable {
621	pid_t				id;
622	int32				controlling_tty;	// index of the controlling tty,
623											// -1 if none
624	pid_t				foreground_group;
625
626public:
627								ProcessSession(pid_t id);
628								~ProcessSession();
629
630			bool				Lock()
631									{ mutex_lock(&fLock); return true; }
632			bool				TryLock()
633									{ return mutex_trylock(&fLock) == B_OK; }
634			void				Unlock()
635									{ mutex_unlock(&fLock); }
636
637private:
638			mutex				fLock;
639};
640
641
642struct ProcessGroup : KernelReferenceable {
643	struct ProcessGroup *next;		// next in hash
644	pid_t				id;
645	BKernel::Team		*teams;
646
647public:
648								ProcessGroup(pid_t id);
649								~ProcessGroup();
650
651	static	ProcessGroup*		Get(pid_t id);
652
653			bool				Lock()
654									{ mutex_lock(&fLock); return true; }
655			bool				TryLock()
656									{ return mutex_trylock(&fLock) == B_OK; }
657			void				Unlock()
658									{ mutex_unlock(&fLock); }
659
660			ProcessSession*		Session() const
661									{ return fSession; }
662			void				Publish(ProcessSession* session);
663			void				PublishLocked(ProcessSession* session);
664
665			bool				IsOrphaned() const;
666
667			void				ScheduleOrphanedCheck();
668			void				UnsetOrphanedCheck();
669
670public:
671			SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
672
673private:
674			mutex				fLock;
675			ProcessSession*		fSession;
676			bool				fInOrphanedCheckList;	// protected by
677														// sOrphanedCheckLock
678};
679
680typedef SinglyLinkedList<ProcessGroup,
681	SinglyLinkedListMemberGetLink<ProcessGroup,
682		&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
683
684
685/*!	\brief Allows to iterate through all teams.
686*/
687struct TeamListIterator {
688								TeamListIterator();
689								~TeamListIterator();
690
691			Team*				Next();
692
693private:
694			TeamThreadIteratorEntry<team_id> fEntry;
695};
696
697
698/*!	\brief Allows to iterate through all threads.
699*/
700struct ThreadListIterator {
701								ThreadListIterator();
702								~ThreadListIterator();
703
704			Thread*				Next();
705
706private:
707			TeamThreadIteratorEntry<thread_id> fEntry;
708};
709
710
711inline int32
712Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
713{
714	return fPendingSignals.HighestSignalPriority(nonBlocked);
715}
716
717
718inline Signal*
719Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
720{
721	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
722}
723
724
725inline TeamUserTimeUserTimerList::ConstIterator
726Team::UserTimeUserTimerIterator() const
727{
728	return fUserTimeUserTimers.GetIterator();
729}
730
731
732inline sigset_t
733Thread::AllPendingSignals() const
734{
735	return fPendingSignals.AllSignals() | team->PendingSignals();
736}
737
738
739inline int32
740Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
741{
742	return fPendingSignals.HighestSignalPriority(nonBlocked);
743}
744
745
746inline Signal*
747Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
748{
749	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
750}
751
752
753/*!	Returns the thread's current total CPU time (kernel + user + offset).
754
755	The caller must hold \c time_lock.
756
757	\param ignoreCurrentRun If \c true and the thread is currently running,
758		don't add the time since the last time \c last_time was updated. Should
759		be used in "thread unscheduled" scheduler callbacks, since although the
760		thread is still running at that time, its time has already been stopped.
761	\return The thread's current total CPU time.
762*/
763inline bigtime_t
764Thread::CPUTime(bool ignoreCurrentRun) const
765{
766	bigtime_t time = user_time + kernel_time + cpu_clock_offset;
767
768	// If currently running, also add the time since the last check, unless
769	// requested otherwise.
770	if (!ignoreCurrentRun && last_time != 0)
771		time += system_time() - last_time;
772
773	return time;
774}
775
776
777}	// namespace BKernel
778
779using BKernel::Team;
780using BKernel::TeamListIterator;
781using BKernel::Thread;
782using BKernel::ThreadListIterator;
783using BKernel::ProcessSession;
784using BKernel::ProcessGroup;
785using BKernel::ProcessGroupList;
786
787
788#endif	// !_ASSEMBLER
789
790
791// bits for the thread::flags field
792#define	THREAD_FLAGS_SIGNALS_PENDING		0x0001
793	// unblocked signals are pending (computed flag for optimization purposes)
794#define	THREAD_FLAGS_DEBUG_THREAD			0x0002
795	// forces the thread into the debugger as soon as possible (set by
796	// debug_thread())
797#define	THREAD_FLAGS_SINGLE_STEP			0x0004
798	// indicates that the thread is in single-step mode (in userland)
799#define	THREAD_FLAGS_DEBUGGER_INSTALLED		0x0008
800	// a debugger is installed for the current team (computed flag for
801	// optimization purposes)
802#define	THREAD_FLAGS_BREAKPOINTS_DEFINED	0x0010
803	// hardware breakpoints are defined for the current team (computed flag for
804	// optimization purposes)
805#define	THREAD_FLAGS_BREAKPOINTS_INSTALLED	0x0020
806	// breakpoints are currently installed for the thread (i.e. the hardware is
807	// actually set up to trigger debug events for them)
808#define	THREAD_FLAGS_64_BIT_SYSCALL_RETURN	0x0040
809	// set by 64 bit return value syscalls
810#define	THREAD_FLAGS_RESTART_SYSCALL		0x0080
811	// set by handle_signals(), if the current syscall shall be restarted
812#define	THREAD_FLAGS_DONT_RESTART_SYSCALL	0x0100
813	// explicitly disables automatic syscall restarts (e.g. resume_thread())
814#define	THREAD_FLAGS_ALWAYS_RESTART_SYSCALL	0x0200
815	// force syscall restart, even if a signal handler without SA_RESTART was
816	// invoked (e.g. sigwait())
817#define	THREAD_FLAGS_SYSCALL_RESTARTED		0x0400
818	// the current syscall has been restarted
819#define	THREAD_FLAGS_SYSCALL				0x0800
820	// the thread is currently in a syscall; set/reset only for certain
821	// functions (e.g. ioctl()) to allow inner functions to discriminate
822	// whether e.g. parameters were passed from userland or kernel
823
824
825#endif	/* _KERNEL_THREAD_TYPES_H */
826