1/*
2 * Copyright 2002-2010, Haiku.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 *		Marcus Overhagen
7 *		J��r��me Duval
8 */
9
10
11/*!	This is the BBufferProducer used internally by BSoundPlayer.
12*/
13
14
15#include "SoundPlayNode.h"
16
17#include <string.h>
18#include <stdlib.h>
19#include <unistd.h>
20
21#include <TimeSource.h>
22#include <MediaRoster.h>
23#include "MediaDebug.h"
24
25
26#define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1)
27
28
29namespace BPrivate {
30
31
32SoundPlayNode::SoundPlayNode(const char* name, BSoundPlayer* player)
33	:
34	BMediaNode(name),
35	BBufferProducer(B_MEDIA_RAW_AUDIO),
36	BMediaEventLooper(),
37	fPlayer(player),
38	fInitStatus(B_OK),
39	fOutputEnabled(true),
40	fBufferGroup(NULL),
41	fFramesSent(0),
42	fTooEarlyCount(0)
43{
44	CALLED();
45	fOutput.format.type = B_MEDIA_RAW_AUDIO;
46	fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
47}
48
49
50SoundPlayNode::~SoundPlayNode()
51{
52	CALLED();
53	Quit();
54}
55
56
57bool
58SoundPlayNode::IsPlaying()
59{
60	return RunState() == B_STARTED;
61}
62
63
64bigtime_t
65SoundPlayNode::CurrentTime()
66{
67	int frameRate = (int)fOutput.format.u.raw_audio.frame_rate;
68	return frameRate == 0 ? 0
69		: bigtime_t((1000000LL * fFramesSent) / frameRate);
70}
71
72
73media_multi_audio_format
74SoundPlayNode::Format() const
75{
76	return fOutput.format.u.raw_audio;
77}
78
79
80// #pragma mark - implementation of BMediaNode
81
82
83BMediaAddOn*
84SoundPlayNode::AddOn(int32* _internalID) const
85{
86	CALLED();
87	// This only gets called if we are in an add-on.
88	return NULL;
89}
90
91
92void
93SoundPlayNode::Preroll()
94{
95	CALLED();
96	// TODO: Performance opportunity
97	BMediaNode::Preroll();
98}
99
100
101status_t
102SoundPlayNode::HandleMessage(int32 message, const void* data, size_t size)
103{
104	CALLED();
105	return B_ERROR;
106}
107
108
109void
110SoundPlayNode::NodeRegistered()
111{
112	CALLED();
113
114	if (fInitStatus != B_OK) {
115		ReportError(B_NODE_IN_DISTRESS);
116		return;
117	}
118
119	SetPriority(B_URGENT_PRIORITY);
120
121	fOutput.format.type = B_MEDIA_RAW_AUDIO;
122	fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
123	fOutput.destination = media_destination::null;
124	fOutput.source.port = ControlPort();
125	fOutput.source.id = 0;
126	fOutput.node = Node();
127	strcpy(fOutput.name, Name());
128
129	Run();
130}
131
132
133status_t
134SoundPlayNode::RequestCompleted(const media_request_info& info)
135{
136	CALLED();
137	return B_OK;
138}
139
140
141void
142SoundPlayNode::SetTimeSource(BTimeSource* timeSource)
143{
144	CALLED();
145	BMediaNode::SetTimeSource(timeSource);
146}
147
148
149void
150SoundPlayNode::SetRunMode(run_mode mode)
151{
152	TRACE("SoundPlayNode::SetRunMode mode:%i\n", mode);
153	BMediaNode::SetRunMode(mode);
154}
155
156
157// #pragma mark - implementation for BBufferProducer
158
159
160status_t
161SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/,
162	media_format* format)
163{
164	// FormatSuggestionRequested() is not necessarily part of the format
165	// negotiation process; it's simply an interrogation -- the caller wants
166	// to see what the node's preferred data format is, given a suggestion by
167	// the caller.
168	CALLED();
169
170	// a wildcard type is okay; but we only support raw audio
171	if (type != B_MEDIA_RAW_AUDIO && type != B_MEDIA_UNKNOWN_TYPE)
172		return B_MEDIA_BAD_FORMAT;
173
174	// this is the format we'll be returning (our preferred format)
175	format->type = B_MEDIA_RAW_AUDIO;
176	format->u.raw_audio = media_multi_audio_format::wildcard;
177
178	return B_OK;
179}
180
181
182status_t
183SoundPlayNode::FormatProposal(const media_source& output, media_format* format)
184{
185	// FormatProposal() is the first stage in the BMediaRoster::Connect()
186	// process. We hand out a suggested format, with wildcards for any
187	// variations we support.
188	CALLED();
189
190	// is this a proposal for our one output?
191	if (output != fOutput.source) {
192		TRACE("SoundPlayNode::FormatProposal returning B_MEDIA_BAD_SOURCE\n");
193		return B_MEDIA_BAD_SOURCE;
194	}
195
196	// if wildcard, change it to raw audio
197	if (format->type == B_MEDIA_UNKNOWN_TYPE)
198		format->type = B_MEDIA_RAW_AUDIO;
199
200	// if not raw audio, we can't support it
201	if (format->type != B_MEDIA_RAW_AUDIO) {
202		TRACE("SoundPlayNode::FormatProposal returning B_MEDIA_BAD_FORMAT\n");
203		return B_MEDIA_BAD_FORMAT;
204	}
205
206#if DEBUG >0
207	char buf[100];
208	string_for_format(*format, buf, sizeof(buf));
209	TRACE("SoundPlayNode::FormatProposal: format %s\n", buf);
210#endif
211
212	return B_OK;
213}
214
215
216status_t
217SoundPlayNode::FormatChangeRequested(const media_source& source,
218	const media_destination& destination, media_format* _format,
219	int32* /* deprecated */)
220{
221	CALLED();
222
223	// we don't support any other formats, so we just reject any format changes.
224	return B_ERROR;
225}
226
227
228status_t
229SoundPlayNode::GetNextOutput(int32* cookie, media_output* _output)
230{
231	CALLED();
232
233	if (*cookie == 0) {
234		*_output = fOutput;
235		*cookie += 1;
236		return B_OK;
237	} else {
238		return B_BAD_INDEX;
239	}
240}
241
242
243status_t
244SoundPlayNode::DisposeOutputCookie(int32 cookie)
245{
246	CALLED();
247	// do nothing because we don't use the cookie for anything special
248	return B_OK;
249}
250
251
252status_t
253SoundPlayNode::SetBufferGroup(const media_source& forSource,
254	BBufferGroup* newGroup)
255{
256	CALLED();
257
258	// is this our output?
259	if (forSource != fOutput.source) {
260		TRACE("SoundPlayNode::SetBufferGroup returning B_MEDIA_BAD_SOURCE\n");
261		return B_MEDIA_BAD_SOURCE;
262	}
263
264	// Are we being passed the buffer group we're already using?
265	if (newGroup == fBufferGroup)
266		return B_OK;
267
268	// Ahh, someone wants us to use a different buffer group. At this point we
269	// delete the one we are using and use the specified one instead.
270	// If the specified group is NULL, we need to recreate one ourselves, and
271	// use *that*. Note that if we're caching a BBuffer that we requested
272	// earlier, we have to Recycle() that buffer *before* deleting the buffer
273	// group, otherwise we'll deadlock waiting for that buffer to be recycled!
274	delete fBufferGroup;
275		// waits for all buffers to recycle
276
277	if (newGroup != NULL) {
278		// we were given a valid group; just use that one from now on
279		fBufferGroup = newGroup;
280		return B_OK;
281	}
282
283	// we were passed a NULL group pointer; that means we construct
284	// our own buffer group to use from now on
285	return AllocateBuffers();
286}
287
288
289status_t
290SoundPlayNode::GetLatency(bigtime_t* _latency)
291{
292	CALLED();
293
294	// report our *total* latency:  internal plus downstream plus scheduling
295	*_latency = EventLatency() + SchedulingLatency();
296	return B_OK;
297}
298
299
300status_t
301SoundPlayNode::PrepareToConnect(const media_source& what,
302	const media_destination& where, media_format* format,
303	media_source* _source, char* _name)
304{
305	// PrepareToConnect() is the second stage of format negotiations that
306	// happens inside BMediaRoster::Connect(). At this point, the consumer's
307	// AcceptFormat() method has been called, and that node has potentially
308	// changed the proposed format. It may also have left wildcards in the
309	// format. PrepareToConnect() *must* fully specialize the format before
310	// returning!
311	CALLED();
312
313	// is this our output?
314	if (what != fOutput.source)	{
315		TRACE("SoundPlayNode::PrepareToConnect returning "
316			"B_MEDIA_BAD_SOURCE\n");
317		return B_MEDIA_BAD_SOURCE;
318	}
319
320	// are we already connected?
321	if (fOutput.destination != media_destination::null)
322		return B_MEDIA_ALREADY_CONNECTED;
323
324	// the format may not yet be fully specialized (the consumer might have
325	// passed back some wildcards). Finish specializing it now, and return an
326	// error if we don't support the requested format.
327
328#if DEBUG > 0
329	char buf[100];
330	string_for_format(*format, buf, sizeof(buf));
331	TRACE("SoundPlayNode::PrepareToConnect: input format %s\n", buf);
332#endif
333
334	// if not raw audio, we can't support it
335	if (format->type != B_MEDIA_UNKNOWN_TYPE
336		&& format->type != B_MEDIA_RAW_AUDIO) {
337		TRACE("SoundPlayNode::PrepareToConnect: non raw format, returning "
338			"B_MEDIA_BAD_FORMAT\n");
339		return B_MEDIA_BAD_FORMAT;
340	}
341
342	// the haiku mixer might have a hint
343	// for us, so check for it
344	#define FORMAT_USER_DATA_TYPE 		0x7294a8f3
345	#define FORMAT_USER_DATA_MAGIC_1	0xc84173bd
346	#define FORMAT_USER_DATA_MAGIC_2	0x4af62b7d
347	uint32 channel_count = 0;
348	float frame_rate = 0;
349	if (format->user_data_type == FORMAT_USER_DATA_TYPE
350		&& *(uint32 *)&format->user_data[0] == FORMAT_USER_DATA_MAGIC_1
351		&& *(uint32 *)&format->user_data[44] == FORMAT_USER_DATA_MAGIC_2) {
352		channel_count = *(uint32 *)&format->user_data[4];
353		frame_rate = *(float *)&format->user_data[20];
354		TRACE("SoundPlayNode::PrepareToConnect: found mixer info: "
355			"channel_count %" B_PRId32 " , frame_rate %.1f\n", channel_count, frame_rate);
356	}
357
358	media_format default_format;
359	default_format.type = B_MEDIA_RAW_AUDIO;
360	default_format.u.raw_audio.frame_rate = frame_rate > 0 ? frame_rate : 44100;
361	default_format.u.raw_audio.channel_count = channel_count > 0
362		? channel_count : 2;
363	default_format.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT;
364	default_format.u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN;
365	default_format.u.raw_audio.buffer_size = 0;
366	format->SpecializeTo(&default_format);
367
368	if (format->u.raw_audio.buffer_size == 0) {
369		format->u.raw_audio.buffer_size
370			= BMediaRoster::Roster()->AudioBufferSizeFor(
371				format->u.raw_audio.channel_count, format->u.raw_audio.format,
372				format->u.raw_audio.frame_rate);
373	}
374
375#if DEBUG > 0
376	string_for_format(*format, buf, sizeof(buf));
377	TRACE("SoundPlayNode::PrepareToConnect: output format %s\n", buf);
378#endif
379
380	// Now reserve the connection, and return information about it
381	fOutput.destination = where;
382	fOutput.format = *format;
383	*_source = fOutput.source;
384	strcpy(_name, Name());
385	return B_OK;
386}
387
388
389void
390SoundPlayNode::Connect(status_t error, const media_source& source,
391	const media_destination& destination, const media_format& format,
392	char* name)
393{
394	CALLED();
395
396	// is this our output?
397	if (source != fOutput.source) {
398		TRACE("SoundPlayNode::Connect returning\n");
399		return;
400	}
401
402	// If something earlier failed, Connect() might still be called, but with
403	// a non-zero error code.  When that happens we simply unreserve the
404	// connection and do nothing else.
405	if (error) {
406		fOutput.destination = media_destination::null;
407		fOutput.format.type = B_MEDIA_RAW_AUDIO;
408		fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
409		return;
410	}
411
412	// Okay, the connection has been confirmed.  Record the destination and
413	// format that we agreed on, and report our connection name again.
414	fOutput.destination = destination;
415	fOutput.format = format;
416	strcpy(name, Name());
417
418	// Now that we're connected, we can determine our downstream latency.
419	// Do so, then make sure we get our events early enough.
420	media_node_id id;
421	FindLatencyFor(fOutput.destination, &fLatency, &id);
422	TRACE("SoundPlayNode::Connect: downstream latency = %" B_PRId64 "\n",
423		fLatency);
424
425	// reset our buffer duration, etc. to avoid later calculations
426	bigtime_t duration = ((fOutput.format.u.raw_audio.buffer_size * 1000000LL)
427		/ ((fOutput.format.u.raw_audio.format
428				& media_raw_audio_format::B_AUDIO_SIZE_MASK)
429			* fOutput.format.u.raw_audio.channel_count))
430		/ (int32)fOutput.format.u.raw_audio.frame_rate;
431	SetBufferDuration(duration);
432	TRACE("SoundPlayNode::Connect: buffer duration is %" B_PRId64 "\n",
433		duration);
434
435	fInternalLatency = (3 * BufferDuration()) / 4;
436	TRACE("SoundPlayNode::Connect: using %" B_PRId64 " as internal latency\n",
437		fInternalLatency);
438	SetEventLatency(fLatency + fInternalLatency);
439
440	// Set up the buffer group for our connection, as long as nobody handed us
441	// a buffer group (via SetBufferGroup()) prior to this.
442	// That can happen, for example, if the consumer calls SetOutputBuffersFor()
443	// on us from within its Connected() method.
444	if (!fBufferGroup)
445		AllocateBuffers();
446}
447
448
449void
450SoundPlayNode::Disconnect(const media_source& what,
451	const media_destination& where)
452{
453	CALLED();
454
455	// is this our output?
456	if (what != fOutput.source) {
457		TRACE("SoundPlayNode::Disconnect returning\n");
458		return;
459	}
460
461	// Make sure that our connection is the one being disconnected
462	if (where == fOutput.destination && what == fOutput.source) {
463		fOutput.destination = media_destination::null;
464		fOutput.format.type = B_MEDIA_RAW_AUDIO;
465		fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
466		delete fBufferGroup;
467		fBufferGroup = NULL;
468	} else {
469		fprintf(stderr, "\tDisconnect() called with wrong source/destination "
470			"(%" B_PRId32 "/%" B_PRId32 "), ours is (%" B_PRId32 "/%" B_PRId32
471			")\n", what.id, where.id, fOutput.source.id,
472			fOutput.destination.id);
473	}
474}
475
476
477void
478SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t howMuch,
479	bigtime_t performanceTime)
480{
481	CALLED();
482
483	TRACE("SoundPlayNode::LateNoticeReceived, %" B_PRId64 " too late at %"
484		B_PRId64 "\n", howMuch, performanceTime);
485
486	// is this our output?
487	if (what != fOutput.source) {
488		TRACE("SoundPlayNode::LateNoticeReceived returning\n");
489		return;
490	}
491
492	if (RunMode() != B_DROP_DATA) {
493		// We're late, and our run mode dictates that we try to produce buffers
494		// earlier in order to catch up.  This argues that the downstream nodes are
495		// not properly reporting their latency, but there's not much we can do about
496		// that at the moment, so we try to start producing buffers earlier to
497		// compensate.
498
499		fInternalLatency += howMuch;
500
501		if (fInternalLatency > 30000)	// avoid getting a too high latency
502			fInternalLatency = 30000;
503
504		SetEventLatency(fLatency + fInternalLatency);
505		TRACE("SoundPlayNode::LateNoticeReceived: increasing latency to %"
506			B_PRId64 "\n", fLatency + fInternalLatency);
507	} else {
508		// The other run modes dictate various strategies for sacrificing data quality
509		// in the interests of timely data delivery.  The way *we* do this is to skip
510		// a buffer, which catches us up in time by one buffer duration.
511
512		size_t nFrames = fOutput.format.u.raw_audio.buffer_size
513			/ ((fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
514			* fOutput.format.u.raw_audio.channel_count);
515
516		fFramesSent += nFrames;
517
518		TRACE("SoundPlayNode::LateNoticeReceived: skipping a buffer to try to catch up\n");
519	}
520}
521
522
523void
524SoundPlayNode::EnableOutput(const media_source& what, bool enabled,
525	int32* /* deprecated */)
526{
527	CALLED();
528
529	// If I had more than one output, I'd have to walk my list of output
530	// records to see which one matched the given source, and then
531	// enable/disable that one.
532	// But this node only has one output, so I just make sure the given source
533	// matches, then set the enable state accordingly.
534
535	// is this our output?
536	if (what != fOutput.source) {
537		fprintf(stderr, "SoundPlayNode::EnableOutput returning\n");
538		return;
539	}
540
541	fOutputEnabled = enabled;
542}
543
544
545void
546SoundPlayNode::AdditionalBufferRequested(const media_source& source,
547	media_buffer_id previousBuffer, bigtime_t previousTime,
548	const media_seek_tag* previousTag)
549{
550	CALLED();
551	// we don't support offline mode
552	return;
553}
554
555
556void
557SoundPlayNode::LatencyChanged(const media_source& source,
558	const media_destination& destination, bigtime_t newLatency, uint32 flags)
559{
560	CALLED();
561
562	TRACE("SoundPlayNode::LatencyChanged: new_latency %" B_PRId64 "\n",
563		newLatency);
564
565	// something downstream changed latency, so we need to start producing
566	// buffers earlier (or later) than we were previously.  Make sure that the
567	// connection that changed is ours, and adjust to the new downstream
568	// latency if so.
569	if (source == fOutput.source && destination == fOutput.destination) {
570		fLatency = newLatency;
571		SetEventLatency(fLatency + fInternalLatency);
572	} else {
573		TRACE("SoundPlayNode::LatencyChanged: ignored\n");
574	}
575}
576
577
578// #pragma mark - implementation for BMediaEventLooper
579
580
581void
582SoundPlayNode::HandleEvent(const media_timed_event* event, bigtime_t lateness,
583	bool realTimeEvent)
584{
585	CALLED();
586	switch (event->type) {
587		case BTimedEventQueue::B_START:
588			HandleStart(event,lateness,realTimeEvent);
589			break;
590		case BTimedEventQueue::B_SEEK:
591			HandleSeek(event,lateness,realTimeEvent);
592			break;
593		case BTimedEventQueue::B_WARP:
594			HandleWarp(event,lateness,realTimeEvent);
595			break;
596		case BTimedEventQueue::B_STOP:
597			HandleStop(event,lateness,realTimeEvent);
598			break;
599		case BTimedEventQueue::B_HANDLE_BUFFER:
600			// we don't get any buffers
601			break;
602		case SEND_NEW_BUFFER_EVENT:
603			if (RunState() == BMediaEventLooper::B_STARTED)
604				SendNewBuffer(event, lateness, realTimeEvent);
605			break;
606		case BTimedEventQueue::B_DATA_STATUS:
607			HandleDataStatus(event,lateness,realTimeEvent);
608			break;
609		case BTimedEventQueue::B_PARAMETER:
610			HandleParameter(event,lateness,realTimeEvent);
611			break;
612		default:
613			fprintf(stderr," unknown event type: %" B_PRId32 "\n", event->type);
614			break;
615	}
616}
617
618
619// #pragma mark - protected methods
620
621
622// how should we handle late buffers?  drop them?
623// notify the producer?
624status_t
625SoundPlayNode::SendNewBuffer(const media_timed_event* event,
626	bigtime_t lateness, bool realTimeEvent)
627{
628	CALLED();
629	// TRACE("latency = %12Ld, event = %12Ld, sched = %5Ld, arrive at %12Ld, now %12Ld, current lateness %12Ld\n", EventLatency() + SchedulingLatency(), EventLatency(), SchedulingLatency(), event->event_time, TimeSource()->Now(), lateness);
630
631	// make sure we're both started *and* connected before delivering a buffer
632	if (RunState() != BMediaEventLooper::B_STARTED
633		|| fOutput.destination == media_destination::null)
634		return B_OK;
635
636	// The event->event_time is the time at which the buffer we are preparing
637	// here should arrive at it's destination. The MediaEventLooper should have
638	// scheduled us early enough (based on EventLatency() and the
639	// SchedulingLatency()) to make this possible.
640	// lateness is independent of EventLatency()!
641
642	if (lateness > (BufferDuration() / 3) ) {
643		TRACE("SoundPlayNode::SendNewBuffer, event scheduled much too late, "
644			"lateness is %" B_PRId64 "\n", lateness);
645	}
646
647	// skip buffer creation if output not enabled
648	if (fOutputEnabled) {
649
650		// Get the next buffer of data
651		BBuffer* buffer = FillNextBuffer(event->event_time);
652
653		if (buffer) {
654
655			// If we are ready way too early, decrase internal latency
656/*
657			bigtime_t how_early = event->event_time - TimeSource()->Now() - fLatency - fInternalLatency;
658			if (how_early > 5000) {
659
660				TRACE("SoundPlayNode::SendNewBuffer, event scheduled too early, how_early is %Ld\n", how_early);
661
662				if (fTooEarlyCount++ == 5) {
663					fInternalLatency -= how_early;
664					if (fInternalLatency < 500)
665						fInternalLatency = 500;
666					TRACE("SoundPlayNode::SendNewBuffer setting internal latency to %Ld\n", fInternalLatency);
667					SetEventLatency(fLatency + fInternalLatency);
668					fTooEarlyCount = 0;
669				}
670			}
671*/
672			// send the buffer downstream if and only if output is enabled
673			if (SendBuffer(buffer, fOutput.source, fOutput.destination)
674					!= B_OK) {
675				// we need to recycle the buffer
676				// if the call to SendBuffer() fails
677				TRACE("SoundPlayNode::SendNewBuffer: Buffer sending "
678					"failed\n");
679				buffer->Recycle();
680			}
681		}
682	}
683
684	// track how much media we've delivered so far
685	size_t nFrames = fOutput.format.u.raw_audio.buffer_size
686		/ ((fOutput.format.u.raw_audio.format
687			& media_raw_audio_format::B_AUDIO_SIZE_MASK)
688		* fOutput.format.u.raw_audio.channel_count);
689	fFramesSent += nFrames;
690
691	// The buffer is on its way; now schedule the next one to go
692	// nextEvent is the time at which the buffer should arrive at it's
693	// destination
694	bigtime_t nextEvent = fStartTime + bigtime_t((1000000LL * fFramesSent)
695		/ (int32)fOutput.format.u.raw_audio.frame_rate);
696	media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT);
697	EventQueue()->AddEvent(nextBufferEvent);
698
699	return B_OK;
700}
701
702
703status_t
704SoundPlayNode::HandleDataStatus(const media_timed_event* event,
705	bigtime_t lateness, bool realTimeEvent)
706{
707	TRACE("SoundPlayNode::HandleDataStatus status: %" B_PRId32 ", lateness: %"
708		B_PRId64 "\n", event->data, lateness);
709
710	switch (event->data) {
711		case B_DATA_NOT_AVAILABLE:
712			break;
713		case B_DATA_AVAILABLE:
714			break;
715		case B_PRODUCER_STOPPED:
716			break;
717		default:
718			break;
719	}
720	return B_OK;
721}
722
723
724status_t
725SoundPlayNode::HandleStart(const media_timed_event* event, bigtime_t lateness,
726	bool realTimeEvent)
727{
728	CALLED();
729	// don't do anything if we're already running
730	if (RunState() != B_STARTED) {
731		// We want to start sending buffers now, so we set up the buffer-sending
732		// bookkeeping and fire off the first "produce a buffer" event.
733
734		fFramesSent = 0;
735		fStartTime = event->event_time;
736		media_timed_event firstBufferEvent(event->event_time,
737			SEND_NEW_BUFFER_EVENT);
738
739		// Alternatively, we could call HandleEvent() directly with this event,
740		// to avoid a trip through the event queue, like this:
741		//
742		//		this->HandleEvent(&firstBufferEvent, 0, false);
743		//
744		EventQueue()->AddEvent(firstBufferEvent);
745	}
746	return B_OK;
747}
748
749
750status_t
751SoundPlayNode::HandleSeek(const media_timed_event* event, bigtime_t lateness,
752	bool realTimeEvent)
753{
754	CALLED();
755	TRACE("SoundPlayNode::HandleSeek(t=%" B_PRId64 ", d=%" B_PRId32 ", bd=%"
756		B_PRId64 ")\n", event->event_time, event->data, event->bigdata);
757	return B_OK;
758}
759
760
761status_t
762SoundPlayNode::HandleWarp(const media_timed_event* event, bigtime_t lateness,
763	bool realTimeEvent)
764{
765	CALLED();
766	return B_OK;
767}
768
769
770status_t
771SoundPlayNode::HandleStop(const media_timed_event* event, bigtime_t lateness,
772	bool realTimeEvent)
773{
774	CALLED();
775	// flush the queue so downstreamers don't get any more
776	EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true,
777		SEND_NEW_BUFFER_EVENT);
778
779	return B_OK;
780}
781
782
783status_t
784SoundPlayNode::HandleParameter(const media_timed_event* event,
785	bigtime_t lateness, bool realTimeEvent)
786{
787	CALLED();
788	return B_OK;
789}
790
791
792status_t
793SoundPlayNode::AllocateBuffers()
794{
795	CALLED();
796
797	// allocate enough buffers to span our downstream latency, plus one
798	size_t size = fOutput.format.u.raw_audio.buffer_size;
799	int32 count = int32(fLatency / BufferDuration() + 1 + 1);
800
801	TRACE("SoundPlayNode::AllocateBuffers: latency = %" B_PRId64 ", buffer "
802		"duration = %" B_PRId64 ", count %" B_PRId32 "\n", fLatency,
803		BufferDuration(), count);
804
805	if (count < 3)
806		count = 3;
807
808	TRACE("SoundPlayNode::AllocateBuffers: creating group of %" B_PRId32
809		" buffers, size = %" B_PRIuSIZE "\n", count, size);
810
811	fBufferGroup = new BBufferGroup(size, count);
812	if (fBufferGroup->InitCheck() != B_OK) {
813		ERROR("SoundPlayNode::AllocateBuffers: BufferGroup::InitCheck() "
814			"failed\n");
815	}
816
817	return fBufferGroup->InitCheck();
818}
819
820
821BBuffer*
822SoundPlayNode::FillNextBuffer(bigtime_t eventTime)
823{
824	CALLED();
825
826	// get a buffer from our buffer group
827	BBuffer* buffer = fBufferGroup->RequestBuffer(
828		fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
829
830	// If we fail to get a buffer (for example, if the request times out), we
831	// skip this buffer and go on to the next, to avoid locking up the control
832	// thread
833	if (buffer == NULL) {
834		ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
835		return NULL;
836	}
837
838	if (fPlayer->HasData()) {
839		fPlayer->PlayBuffer(buffer->Data(),
840			fOutput.format.u.raw_audio.buffer_size, fOutput.format.u.raw_audio);
841	} else
842		memset(buffer->Data(), 0, fOutput.format.u.raw_audio.buffer_size);
843
844	// fill in the buffer header
845	media_header* header = buffer->Header();
846	header->type = B_MEDIA_RAW_AUDIO;
847	header->size_used = fOutput.format.u.raw_audio.buffer_size;
848	header->time_source = TimeSource()->ID();
849	header->start_time = eventTime;
850
851	return buffer;
852}
853
854
855}	// namespace BPrivate
856