thread_types.h revision a295d3f4
1/*
2 * Copyright 2004-2016, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
4 *
5 * Thread definition and structures
6 */
7#ifndef _KERNEL_THREAD_TYPES_H
8#define _KERNEL_THREAD_TYPES_H
9
10
11#ifndef _ASSEMBLER
12
13#include <pthread.h>
14
15#include <arch/thread_types.h>
16#include <condition_variable.h>
17#include <heap.h>
18#include <ksignal.h>
19#include <lock.h>
20#include <smp.h>
21#include <thread_defs.h>
22#include <timer.h>
23#include <UserTimer.h>
24#include <user_debugger.h>
25#include <util/DoublyLinkedList.h>
26#include <util/KernelReferenceable.h>
27#include <util/list.h>
28
29
30enum additional_thread_state {
31	THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
32//	THREAD_STATE_BIRTH	// thread is being created
33};
34
35#define THREAD_MIN_SET_PRIORITY				B_LOWEST_ACTIVE_PRIORITY
36#define THREAD_MAX_SET_PRIORITY				B_REAL_TIME_PRIORITY
37
38enum team_state {
39	TEAM_STATE_NORMAL,		// normal state
40	TEAM_STATE_BIRTH,		// being constructed
41	TEAM_STATE_SHUTDOWN,	// still lives, but is going down
42	TEAM_STATE_DEATH		// only the Team object still exists, threads are
43							// gone
44};
45
46#define	TEAM_FLAG_EXEC_DONE	0x01
47	// team has executed exec*()
48#define	TEAM_FLAG_DUMP_CORE	0x02
49	// a core dump is in progress
50
51typedef enum job_control_state {
52	JOB_CONTROL_STATE_NONE,
53	JOB_CONTROL_STATE_STOPPED,
54	JOB_CONTROL_STATE_CONTINUED,
55	JOB_CONTROL_STATE_DEAD
56} job_control_state;
57
58
59struct cpu_ent;
60struct image;					// defined in image.c
61struct io_context;
62struct realtime_sem_context;	// defined in realtime_sem.cpp
63struct select_info;
64struct user_thread;				// defined in libroot/user_thread.h
65struct VMAddressSpace;
66struct xsi_sem_context;			// defined in xsi_semaphore.cpp
67
68namespace Scheduler {
69	struct ThreadData;
70}
71
72namespace BKernel {
73	struct Team;
74	struct Thread;
75	struct ProcessGroup;
76}
77
78
79struct thread_death_entry {
80	struct list_link	link;
81	thread_id			thread;
82	status_t			status;
83};
84
85struct team_loading_info {
86	Thread*				thread;	// the waiting thread
87	status_t			result;		// the result of the loading
88	bool				done;		// set when loading is done/aborted
89};
90
91struct team_watcher {
92	struct list_link	link;
93	void				(*hook)(team_id team, void *data);
94	void				*data;
95};
96
97
98#define MAX_DEAD_CHILDREN	32
99	// this is a soft limit for the number of child death entries in a team
100#define MAX_DEAD_THREADS	32
101	// this is a soft limit for the number of thread death entries in a team
102
103
104struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
105	job_control_state	state;		// current team job control state
106	thread_id			thread;		// main thread ID == team ID
107	uint16				signal;		// signal causing the current state
108	bool				has_group_ref;
109	uid_t				signaling_user;
110
111	// valid while state != JOB_CONTROL_STATE_DEAD
112	BKernel::Team*		team;
113
114	// valid when state == JOB_CONTROL_STATE_DEAD
115	pid_t				group_id;
116	status_t			status;
117	uint16				reason;		// reason for the team's demise, one of the
118									// CLD_* values defined in <signal.h>
119	bigtime_t			user_time;
120	bigtime_t			kernel_time;
121
122	job_control_entry();
123	~job_control_entry();
124
125	void InitDeadState();
126
127	job_control_entry& operator=(const job_control_entry& other);
128};
129
130typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
131
132struct team_job_control_children {
133	JobControlEntryList		entries;
134};
135
136struct team_dead_children : team_job_control_children {
137	ConditionVariable	condition_variable;
138	uint32				count;
139	bigtime_t			kernel_time;
140	bigtime_t			user_time;
141};
142
143
144struct team_death_entry {
145	int32				remaining_threads;
146	ConditionVariable	condition;
147};
148
149
150struct free_user_thread {
151	struct free_user_thread*	next;
152	struct user_thread*			thread;
153};
154
155
156class AssociatedDataOwner;
157
158class AssociatedData : public BReferenceable,
159	public DoublyLinkedListLinkImpl<AssociatedData> {
160public:
161								AssociatedData();
162	virtual						~AssociatedData();
163
164			AssociatedDataOwner* Owner() const
165									{ return fOwner; }
166			void				SetOwner(AssociatedDataOwner* owner)
167									{ fOwner = owner; }
168
169	virtual	void				OwnerDeleted(AssociatedDataOwner* owner);
170
171private:
172			AssociatedDataOwner* fOwner;
173};
174
175
176class AssociatedDataOwner {
177public:
178								AssociatedDataOwner();
179								~AssociatedDataOwner();
180
181			bool				AddData(AssociatedData* data);
182			bool				RemoveData(AssociatedData* data);
183
184			void				PrepareForDeletion();
185
186private:
187			typedef DoublyLinkedList<AssociatedData> DataList;
188
189private:
190
191			mutex				fLock;
192			DataList			fList;
193};
194
195
196typedef int32 (*thread_entry_func)(thread_func, void *);
197
198
199namespace BKernel {
200
201
202template<typename IDType>
203struct TeamThreadIteratorEntry
204	: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
205	typedef IDType	id_type;
206	typedef TeamThreadIteratorEntry<id_type> iterator_type;
207
208	id_type	id;			// -1 for iterator entries, >= 0 for actual elements
209	bool	visible;	// the entry is publicly visible
210};
211
212
213struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
214		AssociatedDataOwner {
215	DoublyLinkedListLink<Team>	global_list_link;
216	Team			*hash_next;		// next in hash
217	Team			*siblings_next;	// next in parent's list; protected by
218									// parent's fLock
219	Team			*parent;		// write-protected by both parent (if any)
220									// and this team's fLock
221	Team			*children;		// protected by this team's fLock;
222									// adding/removing a child also requires the
223									// child's fLock
224	Team			*group_next;	// protected by the group's lock
225
226	int64			serial_number;	// immutable after adding team to hash
227
228	// process group info -- write-protected by both the group's lock, the
229	// team's lock, and the team's parent's lock
230	pid_t			group_id;
231	pid_t			session_id;
232	ProcessGroup	*group;
233
234	int				num_threads;	// number of threads in this team
235	int				state;			// current team state, see above
236	int32			flags;
237	struct io_context *io_context;
238	struct realtime_sem_context	*realtime_sem_context;
239	struct xsi_sem_context *xsi_sem_context;
240	struct team_death_entry *death_entry;	// protected by fLock
241	struct list		dead_threads;
242	int				dead_threads_count;
243
244	// protected by the team's fLock
245	team_dead_children dead_children;
246	team_job_control_children stopped_children;
247	team_job_control_children continued_children;
248
249	// protected by the parent team's fLock
250	struct job_control_entry* job_control_entry;
251
252	VMAddressSpace	*address_space;
253	Thread			*main_thread;	// protected by fLock, immutable
254									// after first set
255	Thread			*thread_list;	// protected by fLock, signal_lock and
256									// gThreadCreationLock
257	struct team_loading_info *loading_info;	// protected by fLock
258	struct list		image_list;		// protected by sImageMutex
259	struct list		watcher_list;
260	struct list		sem_list;		// protected by sSemsSpinlock
261	struct list		port_list;		// protected by sPortsLock
262	struct arch_team arch_info;
263
264	addr_t			user_data;
265	area_id			user_data_area;
266	size_t			user_data_size;
267	size_t			used_user_data;
268	struct free_user_thread* free_user_threads;
269
270	void*			commpage_address;
271
272	struct team_debug_info debug_info;
273
274	// protected by time_lock
275	bigtime_t		dead_threads_kernel_time;
276	bigtime_t		dead_threads_user_time;
277	bigtime_t		cpu_clock_offset;
278	spinlock		time_lock;
279
280	// user group information; protected by fLock
281	uid_t			saved_set_uid;
282	uid_t			real_uid;
283	uid_t			effective_uid;
284	gid_t			saved_set_gid;
285	gid_t			real_gid;
286	gid_t			effective_gid;
287	gid_t*			supplementary_groups;
288	int				supplementary_group_count;
289
290	// Exit status information. Set when the first terminal event occurs,
291	// immutable afterwards. Protected by fLock.
292	struct {
293		uint16		reason;			// reason for the team's demise, one of the
294									// CLD_* values defined in <signal.h>
295		uint16		signal;			// signal killing the team
296		uid_t		signaling_user;	// real UID of the signal sender
297		status_t	status;			// exit status, if normal team exit
298		bool		initialized;	// true when the state has been initialized
299	} exit;
300
301	spinlock		signal_lock;
302
303public:
304								~Team();
305
306	static	Team*				Create(team_id id, const char* name,
307									bool kernel);
308	static	Team*				Get(team_id id);
309	static	Team*				GetAndLock(team_id id);
310
311			bool				Lock()
312									{ mutex_lock(&fLock); return true; }
313			bool				TryLock()
314									{ return mutex_trylock(&fLock) == B_OK; }
315			void				Unlock()
316									{ mutex_unlock(&fLock); }
317
318			void				UnlockAndReleaseReference()
319									{ Unlock(); ReleaseReference(); }
320
321			void				LockTeamAndParent(bool dontLockParentIfKernel);
322			void				UnlockTeamAndParent();
323			void				LockTeamAndProcessGroup();
324			void				UnlockTeamAndProcessGroup();
325			void				LockTeamParentAndProcessGroup();
326			void				UnlockTeamParentAndProcessGroup();
327			void				LockProcessGroup()
328									{ LockTeamAndProcessGroup(); Unlock(); }
329
330			const char*			Name() const	{ return fName; }
331			void				SetName(const char* name);
332
333			const char*			Args() const	{ return fArgs; }
334			void				SetArgs(const char* args);
335			void				SetArgs(const char* path,
336									const char* const* otherArgs,
337									int otherArgCount);
338
339			BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
340									{ return fQueuedSignalsCounter; }
341			sigset_t			PendingSignals() const
342									{ return fPendingSignals.AllSignals(); }
343
344			void				AddPendingSignal(int signal)
345									{ fPendingSignals.AddSignal(signal); }
346			void				AddPendingSignal(Signal* signal)
347									{ fPendingSignals.AddSignal(signal); }
348			void				RemovePendingSignal(int signal)
349									{ fPendingSignals.RemoveSignal(signal); }
350			void				RemovePendingSignal(Signal* signal)
351									{ fPendingSignals.RemoveSignal(signal); }
352			void				RemovePendingSignals(sigset_t mask)
353									{ fPendingSignals.RemoveSignals(mask); }
354			void				ResetSignalsOnExec();
355
356	inline	int32				HighestPendingSignalPriority(
357									sigset_t nonBlocked) const;
358	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
359									Signal& buffer);
360
361			struct sigaction&	SignalActionFor(int32 signal)
362									{ return fSignalActions[signal - 1]; }
363			void				InheritSignalActions(Team* parent);
364
365			// user timers -- protected by fLock
366			UserTimer*			UserTimerFor(int32 id) const
367									{ return fUserTimers.TimerFor(id); }
368			status_t			AddUserTimer(UserTimer* timer);
369			void				RemoveUserTimer(UserTimer* timer);
370			void				DeleteUserTimers(bool userDefinedOnly);
371
372			bool				CheckAddUserDefinedTimer();
373			void				UserDefinedTimersRemoved(int32 count);
374
375			void				UserTimerActivated(TeamTimeUserTimer* timer)
376									{ fCPUTimeUserTimers.Add(timer); }
377			void				UserTimerActivated(TeamUserTimeUserTimer* timer)
378									{ fUserTimeUserTimers.Add(timer); }
379			void				UserTimerDeactivated(TeamTimeUserTimer* timer)
380									{ fCPUTimeUserTimers.Remove(timer); }
381			void				UserTimerDeactivated(
382									TeamUserTimeUserTimer* timer)
383									{ fUserTimeUserTimers.Remove(timer); }
384			void				DeactivateCPUTimeUserTimers();
385									// both total and user CPU timers
386			bool				HasActiveCPUTimeUserTimers() const
387									{ return !fCPUTimeUserTimers.IsEmpty(); }
388			bool				HasActiveUserTimeUserTimers() const
389									{ return !fUserTimeUserTimers.IsEmpty(); }
390			TeamTimeUserTimerList::ConstIterator
391									CPUTimeUserTimerIterator() const
392									{ return fCPUTimeUserTimers.GetIterator(); }
393	inline	TeamUserTimeUserTimerList::ConstIterator
394									UserTimeUserTimerIterator() const;
395
396			bigtime_t			CPUTime(bool ignoreCurrentRun,
397									Thread* lockedThread = NULL) const;
398			bigtime_t			UserCPUTime() const;
399
400			ConditionVariable*	CoreDumpCondition() const
401									{ return fCoreDumpCondition; }
402			void				SetCoreDumpCondition(
403									ConditionVariable* condition)
404									{ fCoreDumpCondition = condition; }
405private:
406								Team(team_id id, bool kernel);
407
408private:
409			mutex				fLock;
410			char				fName[B_OS_NAME_LENGTH];
411			char				fArgs[64];
412									// contents for the team_info::args field
413
414			BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
415			BKernel::PendingSignals	fPendingSignals;
416									// protected by signal_lock
417			struct sigaction 	fSignalActions[MAX_SIGNAL_NUMBER];
418									// indexed signal - 1, protected by fLock
419
420			UserTimerList		fUserTimers;			// protected by fLock
421			TeamTimeUserTimerList fCPUTimeUserTimers;
422									// protected by scheduler lock
423			TeamUserTimeUserTimerList fUserTimeUserTimers;
424			int32				fUserDefinedTimerCount;	// accessed atomically
425
426			ConditionVariable*	fCoreDumpCondition;
427									// protected by fLock
428};
429
430
431struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
432	int32			flags;			// summary of events relevant in interrupt
433									// handlers (signals pending, user debugging
434									// enabled, etc.)
435	int64			serial_number;	// immutable after adding thread to hash
436	Thread			*hash_next;		// protected by thread hash lock
437	Thread			*team_next;		// protected by team lock and fLock
438	char			name[B_OS_NAME_LENGTH];	// protected by fLock
439	bool			going_to_suspend;	// protected by scheduler lock
440	int32			priority;		// protected by scheduler lock
441	int32			io_priority;	// protected by fLock
442	int32			state;			// protected by scheduler lock
443	struct cpu_ent	*cpu;			// protected by scheduler lock
444	struct cpu_ent	*previous_cpu;	// protected by scheduler lock
445	int32			pinned_to_cpu;	// only accessed by this thread or in the
446									// scheduler, when thread is not running
447	spinlock		scheduler_lock;
448
449	sigset_t		sig_block_mask;	// protected by team->signal_lock,
450									// only modified by the thread itself
451	sigset_t		sigsuspend_original_unblocked_mask;
452		// non-0 after a return from _user_sigsuspend(), containing the inverted
453		// original signal mask, reset in handle_signals(); only accessed by
454		// this thread
455	ucontext_t*		user_signal_context;	// only accessed by this thread
456	addr_t			signal_stack_base;		// only accessed by this thread
457	size_t			signal_stack_size;		// only accessed by this thread
458	bool			signal_stack_enabled;	// only accessed by this thread
459
460	bool			in_kernel;		// protected by time_lock, only written by
461									// this thread
462	bool			has_yielded;	// protected by scheduler lock
463	Scheduler::ThreadData*	scheduler_data; // protected by scheduler lock
464
465	struct user_thread*	user_thread;	// write-protected by fLock, only
466										// modified by the thread itself and
467										// thus freely readable by it
468
469	void			(*cancel_function)(int);
470
471	struct {
472		uint8		parameters[SYSCALL_RESTART_PARAMETER_SIZE];
473	} syscall_restart;
474
475	struct {
476		status_t	status;				// current wait status
477		uint32		flags;				// interrupable flags
478		uint32		type;				// type of the object waited on
479		const void*	object;				// pointer to the object waited on
480		timer		unblock_timer;		// timer for block with timeout
481	} wait;
482
483	struct PrivateConditionVariableEntry *condition_variable_entry;
484
485	struct {
486		sem_id		write_sem;	// acquired by writers before writing
487		sem_id		read_sem;	// release by writers after writing, acquired
488								// by this thread when reading
489		thread_id	sender;
490		int32		code;
491		size_t		size;
492		void*		buffer;
493	} msg;	// write_sem/read_sem are protected by fLock when accessed by
494			// others, the other fields are protected by write_sem/read_sem
495
496	void			(*fault_handler)(void);
497	jmp_buf			fault_handler_state;
498	int32			page_faults_allowed;
499		/* this field may only stay in debug builds in the future */
500
501	BKernel::Team	*team;	// protected by team lock, thread lock, scheduler
502							// lock, team_lock
503	rw_spinlock		team_lock;
504
505	struct {
506		sem_id		sem;		// immutable after thread creation
507		status_t	status;		// accessed only by this thread
508		struct list	waiters;	// protected by fLock
509	} exit;
510
511	struct select_info *select_infos;	// protected by fLock
512
513	struct thread_debug_info debug_info;
514
515	// stack
516	area_id			kernel_stack_area;	// immutable after thread creation
517	addr_t			kernel_stack_base;	// immutable after thread creation
518	addr_t			kernel_stack_top;	// immutable after thread creation
519	area_id			user_stack_area;	// protected by thread lock
520	addr_t			user_stack_base;	// protected by thread lock
521	size_t			user_stack_size;	// protected by thread lock
522
523	addr_t			user_local_storage;
524		// usually allocated at the safe side of the stack
525	int				kernel_errno;
526		// kernel "errno" differs from its userspace alter ego
527
528	// user_time, kernel_time, and last_time are only written by the thread
529	// itself, so they can be read by the thread without lock. Holding the
530	// scheduler lock and checking that the thread does not run also guarantees
531	// that the times will not change.
532	spinlock		time_lock;
533	bigtime_t		user_time;			// protected by time_lock
534	bigtime_t		kernel_time;		// protected by time_lock
535	bigtime_t		last_time;			// protected by time_lock
536	bigtime_t		cpu_clock_offset;	// protected by time_lock
537
538	void			(*post_interrupt_callback)(void*);
539	void*			post_interrupt_data;
540
541	// architecture dependent section
542	struct arch_thread arch_info;
543
544public:
545								Thread() {}
546									// dummy for the idle threads
547								Thread(const char *name, thread_id threadID,
548									struct cpu_ent *cpu);
549								~Thread();
550
551	static	status_t			Create(const char* name, Thread*& _thread);
552
553	static	Thread*				Get(thread_id id);
554	static	Thread*				GetAndLock(thread_id id);
555	static	Thread*				GetDebug(thread_id id);
556									// in kernel debugger only
557
558	static	bool				IsAlive(thread_id id);
559
560			void*				operator new(size_t size);
561			void*				operator new(size_t, void* pointer);
562			void				operator delete(void* pointer, size_t size);
563
564			status_t			Init(bool idleThread);
565
566			bool				Lock()
567									{ mutex_lock(&fLock); return true; }
568			bool				TryLock()
569									{ return mutex_trylock(&fLock) == B_OK; }
570			void				Unlock()
571									{ mutex_unlock(&fLock); }
572
573			void				UnlockAndReleaseReference()
574									{ Unlock(); ReleaseReference(); }
575
576			bool				IsAlive() const;
577
578			bool				IsRunning() const
579									{ return cpu != NULL; }
580									// scheduler lock must be held
581
582			sigset_t			ThreadPendingSignals() const
583									{ return fPendingSignals.AllSignals(); }
584	inline	sigset_t			AllPendingSignals() const;
585			void				AddPendingSignal(int signal)
586									{ fPendingSignals.AddSignal(signal); }
587			void				AddPendingSignal(Signal* signal)
588									{ fPendingSignals.AddSignal(signal); }
589			void				RemovePendingSignal(int signal)
590									{ fPendingSignals.RemoveSignal(signal); }
591			void				RemovePendingSignal(Signal* signal)
592									{ fPendingSignals.RemoveSignal(signal); }
593			void				RemovePendingSignals(sigset_t mask)
594									{ fPendingSignals.RemoveSignals(mask); }
595			void				ResetSignalsOnExec();
596
597	inline	int32				HighestPendingSignalPriority(
598									sigset_t nonBlocked) const;
599	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
600									Signal& buffer);
601
602			// user timers -- protected by fLock
603			UserTimer*			UserTimerFor(int32 id) const
604									{ return fUserTimers.TimerFor(id); }
605			status_t			AddUserTimer(UserTimer* timer);
606			void				RemoveUserTimer(UserTimer* timer);
607			void				DeleteUserTimers(bool userDefinedOnly);
608
609			void				UserTimerActivated(ThreadTimeUserTimer* timer)
610									{ fCPUTimeUserTimers.Add(timer); }
611			void				UserTimerDeactivated(ThreadTimeUserTimer* timer)
612									{ fCPUTimeUserTimers.Remove(timer); }
613			void				DeactivateCPUTimeUserTimers();
614			bool				HasActiveCPUTimeUserTimers() const
615									{ return !fCPUTimeUserTimers.IsEmpty(); }
616			ThreadTimeUserTimerList::ConstIterator
617									CPUTimeUserTimerIterator() const
618									{ return fCPUTimeUserTimers.GetIterator(); }
619
620	inline	bigtime_t			CPUTime(bool ignoreCurrentRun) const;
621
622private:
623			mutex				fLock;
624
625			BKernel::PendingSignals	fPendingSignals;
626									// protected by team->signal_lock
627
628			UserTimerList		fUserTimers;			// protected by fLock
629			ThreadTimeUserTimerList fCPUTimeUserTimers;
630									// protected by time_lock
631};
632
633
634struct ProcessSession : BReferenceable {
635	pid_t				id;
636	int32				controlling_tty;	// index of the controlling tty,
637											// -1 if none
638	pid_t				foreground_group;
639
640public:
641								ProcessSession(pid_t id);
642								~ProcessSession();
643
644			bool				Lock()
645									{ mutex_lock(&fLock); return true; }
646			bool				TryLock()
647									{ return mutex_trylock(&fLock) == B_OK; }
648			void				Unlock()
649									{ mutex_unlock(&fLock); }
650
651private:
652			mutex				fLock;
653};
654
655
656struct ProcessGroup : KernelReferenceable {
657	struct ProcessGroup *next;		// next in hash
658	pid_t				id;
659	BKernel::Team		*teams;
660
661public:
662								ProcessGroup(pid_t id);
663								~ProcessGroup();
664
665	static	ProcessGroup*		Get(pid_t id);
666
667			bool				Lock()
668									{ mutex_lock(&fLock); return true; }
669			bool				TryLock()
670									{ return mutex_trylock(&fLock) == B_OK; }
671			void				Unlock()
672									{ mutex_unlock(&fLock); }
673
674			ProcessSession*		Session() const
675									{ return fSession; }
676			void				Publish(ProcessSession* session);
677			void				PublishLocked(ProcessSession* session);
678
679			bool				IsOrphaned() const;
680
681			void				ScheduleOrphanedCheck();
682			void				UnsetOrphanedCheck();
683
684public:
685			SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
686
687private:
688			mutex				fLock;
689			ProcessSession*		fSession;
690			bool				fInOrphanedCheckList;	// protected by
691														// sOrphanedCheckLock
692};
693
694typedef SinglyLinkedList<ProcessGroup,
695	SinglyLinkedListMemberGetLink<ProcessGroup,
696		&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
697
698
699/*!	\brief Allows to iterate through all teams.
700*/
701struct TeamListIterator {
702								TeamListIterator();
703								~TeamListIterator();
704
705			Team*				Next();
706
707private:
708			TeamThreadIteratorEntry<team_id> fEntry;
709};
710
711
712/*!	\brief Allows to iterate through all threads.
713*/
714struct ThreadListIterator {
715								ThreadListIterator();
716								~ThreadListIterator();
717
718			Thread*				Next();
719
720private:
721			TeamThreadIteratorEntry<thread_id> fEntry;
722};
723
724
725inline int32
726Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
727{
728	return fPendingSignals.HighestSignalPriority(nonBlocked);
729}
730
731
732inline Signal*
733Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
734{
735	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
736}
737
738
739inline TeamUserTimeUserTimerList::ConstIterator
740Team::UserTimeUserTimerIterator() const
741{
742	return fUserTimeUserTimers.GetIterator();
743}
744
745
746inline sigset_t
747Thread::AllPendingSignals() const
748{
749	return fPendingSignals.AllSignals() | team->PendingSignals();
750}
751
752
753inline int32
754Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
755{
756	return fPendingSignals.HighestSignalPriority(nonBlocked);
757}
758
759
760inline Signal*
761Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
762{
763	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
764}
765
766
767/*!	Returns the thread's current total CPU time (kernel + user + offset).
768
769	The caller must hold \c time_lock.
770
771	\param ignoreCurrentRun If \c true and the thread is currently running,
772		don't add the time since the last time \c last_time was updated. Should
773		be used in "thread unscheduled" scheduler callbacks, since although the
774		thread is still running at that time, its time has already been stopped.
775	\return The thread's current total CPU time.
776*/
777inline bigtime_t
778Thread::CPUTime(bool ignoreCurrentRun) const
779{
780	bigtime_t time = user_time + kernel_time + cpu_clock_offset;
781
782	// If currently running, also add the time since the last check, unless
783	// requested otherwise.
784	if (!ignoreCurrentRun && last_time != 0)
785		time += system_time() - last_time;
786
787	return time;
788}
789
790
791}	// namespace BKernel
792
793using BKernel::Team;
794using BKernel::TeamListIterator;
795using BKernel::Thread;
796using BKernel::ThreadListIterator;
797using BKernel::ProcessSession;
798using BKernel::ProcessGroup;
799using BKernel::ProcessGroupList;
800
801
802#endif	// !_ASSEMBLER
803
804
805// bits for the thread::flags field
806#define	THREAD_FLAGS_SIGNALS_PENDING		0x0001
807	// unblocked signals are pending (computed flag for optimization purposes)
808#define	THREAD_FLAGS_DEBUG_THREAD			0x0002
809	// forces the thread into the debugger as soon as possible (set by
810	// debug_thread())
811#define	THREAD_FLAGS_SINGLE_STEP			0x0004
812	// indicates that the thread is in single-step mode (in userland)
813#define	THREAD_FLAGS_DEBUGGER_INSTALLED		0x0008
814	// a debugger is installed for the current team (computed flag for
815	// optimization purposes)
816#define	THREAD_FLAGS_BREAKPOINTS_DEFINED	0x0010
817	// hardware breakpoints are defined for the current team (computed flag for
818	// optimization purposes)
819#define	THREAD_FLAGS_BREAKPOINTS_INSTALLED	0x0020
820	// breakpoints are currently installed for the thread (i.e. the hardware is
821	// actually set up to trigger debug events for them)
822#define	THREAD_FLAGS_64_BIT_SYSCALL_RETURN	0x0040
823	// set by 64 bit return value syscalls
824#define	THREAD_FLAGS_RESTART_SYSCALL		0x0080
825	// set by handle_signals(), if the current syscall shall be restarted
826#define	THREAD_FLAGS_DONT_RESTART_SYSCALL	0x0100
827	// explicitly disables automatic syscall restarts (e.g. resume_thread())
828#define	THREAD_FLAGS_ALWAYS_RESTART_SYSCALL	0x0200
829	// force syscall restart, even if a signal handler without SA_RESTART was
830	// invoked (e.g. sigwait())
831#define	THREAD_FLAGS_SYSCALL_RESTARTED		0x0400
832	// the current syscall has been restarted
833#define	THREAD_FLAGS_SYSCALL				0x0800
834	// the thread is currently in a syscall; set/reset only for certain
835	// functions (e.g. ioctl()) to allow inner functions to discriminate
836	// whether e.g. parameters were passed from userland or kernel
837#define	THREAD_FLAGS_TRAP_FOR_CORE_DUMP		0x1000
838	// core dump in progress; the thread shall not exit the kernel to userland,
839	// but shall invoke core_dump_trap_thread() instead.
840
841
842#endif	/* _KERNEL_THREAD_TYPES_H */
843