1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include "dma_resources.h"
9
10#include <device_manager.h>
11
12#include <kernel.h>
13#include <util/AutoLock.h>
14#include <vm/vm.h>
15
16#include "IORequest.h"
17
18
19//#define TRACE_DMA_RESOURCE
20#ifdef TRACE_DMA_RESOURCE
21#	define TRACE(x...) dprintf(x)
22#else
23#	define TRACE(x...) ;
24#endif
25
26
27extern device_manager_info gDeviceManagerModule;
28
29const phys_size_t kMaxBounceBufferSize = 4 * B_PAGE_SIZE;
30
31
32DMABuffer*
33DMABuffer::Create(size_t count)
34{
35	DMABuffer* buffer = (DMABuffer*)malloc(
36		sizeof(DMABuffer) + sizeof(generic_io_vec) * (count - 1));
37	if (buffer == NULL)
38		return NULL;
39
40	buffer->fVecCount = count;
41
42	return buffer;
43}
44
45
46void
47DMABuffer::SetVecCount(uint32 count)
48{
49	fVecCount = count;
50}
51
52
53void
54DMABuffer::AddVec(generic_addr_t base, generic_size_t size)
55{
56	generic_io_vec& vec = fVecs[fVecCount++];
57	vec.base = base;
58	vec.length = size;
59}
60
61
62bool
63DMABuffer::UsesBounceBufferAt(uint32 index)
64{
65	if (index >= fVecCount || fBounceBuffer == NULL)
66		return false;
67
68	return fVecs[index].base >= fBounceBuffer->physical_address
69		&& fVecs[index].base
70				< fBounceBuffer->physical_address + fBounceBuffer->size;
71}
72
73
74void
75DMABuffer::Dump() const
76{
77	kprintf("DMABuffer at %p\n", this);
78
79	kprintf("  bounce buffer:      %p (physical %#" B_PRIxPHYSADDR ")\n",
80		fBounceBuffer->address, fBounceBuffer->physical_address);
81	kprintf("  bounce buffer size: %" B_PRIxPHYSADDR "\n", fBounceBuffer->size);
82	kprintf("  vecs:               %" B_PRIu32 "\n", fVecCount);
83
84	for (uint32 i = 0; i < fVecCount; i++) {
85		kprintf("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
86			i, fVecs[i].base, fVecs[i].length);
87	}
88}
89
90
91//	#pragma mark -
92
93
94DMAResource::DMAResource()
95	:
96	fScratchVecs(NULL)
97{
98	mutex_init(&fLock, "dma resource");
99}
100
101
102DMAResource::~DMAResource()
103{
104	mutex_lock(&fLock);
105	mutex_destroy(&fLock);
106	free(fScratchVecs);
107
108// TODO: Delete DMABuffers and BounceBuffers!
109}
110
111
112status_t
113DMAResource::Init(device_node* node, generic_size_t blockSize,
114	uint32 bufferCount, uint32 bounceBufferCount)
115{
116	dma_restrictions restrictions;
117	memset(&restrictions, 0, sizeof(dma_restrictions));
118
119	// TODO: add DMA attributes instead of reusing block_io's
120
121	uint32 value;
122	if (gDeviceManagerModule.get_attr_uint32(node,
123			B_DMA_ALIGNMENT, &value, true) == B_OK)
124		restrictions.alignment = (generic_size_t)value + 1;
125
126	if (gDeviceManagerModule.get_attr_uint32(node,
127			B_DMA_BOUNDARY, &value, true) == B_OK)
128		restrictions.boundary = (generic_size_t)value + 1;
129
130	if (gDeviceManagerModule.get_attr_uint32(node,
131			B_DMA_MAX_SEGMENT_BLOCKS, &value, true) == B_OK)
132		restrictions.max_segment_size = (generic_size_t)value * blockSize;
133
134	if (gDeviceManagerModule.get_attr_uint32(node,
135			B_DMA_MAX_TRANSFER_BLOCKS, &value, true) == B_OK)
136		restrictions.max_transfer_size = (generic_size_t)value * blockSize;
137
138	if (gDeviceManagerModule.get_attr_uint32(node,
139			B_DMA_MAX_SEGMENT_COUNT, &value, true) == B_OK)
140		restrictions.max_segment_count = value;
141
142	uint64 value64;
143	if (gDeviceManagerModule.get_attr_uint64(node,
144			B_DMA_LOW_ADDRESS, &value64, true) == B_OK) {
145		restrictions.low_address = value64;
146	}
147
148	if (gDeviceManagerModule.get_attr_uint64(node,
149			B_DMA_HIGH_ADDRESS, &value64, true) == B_OK) {
150		restrictions.high_address = value64;
151	}
152
153	return Init(restrictions, blockSize, bufferCount, bounceBufferCount);
154}
155
156
157status_t
158DMAResource::Init(const dma_restrictions& restrictions,
159	generic_size_t blockSize, uint32 bufferCount, uint32 bounceBufferCount)
160{
161	fRestrictions = restrictions;
162	fBlockSize = blockSize == 0 ? 1 : blockSize;
163	fBufferCount = bufferCount;
164	fBounceBufferCount = bounceBufferCount;
165	fBounceBufferSize = 0;
166
167	if (fRestrictions.high_address == 0)
168		fRestrictions.high_address = ~(generic_addr_t)0;
169	if (fRestrictions.max_segment_count == 0)
170		fRestrictions.max_segment_count = 16;
171	if (fRestrictions.alignment == 0)
172		fRestrictions.alignment = 1;
173	if (fRestrictions.max_transfer_size == 0)
174		fRestrictions.max_transfer_size = ~(generic_size_t)0;
175	if (fRestrictions.max_segment_size == 0)
176		fRestrictions.max_segment_size = ~(generic_size_t)0;
177
178	if (_NeedsBoundsBuffers()) {
179		fBounceBufferSize = fRestrictions.max_segment_size
180			* min_c(fRestrictions.max_segment_count, 4);
181		if (fBounceBufferSize > kMaxBounceBufferSize)
182			fBounceBufferSize = kMaxBounceBufferSize;
183		TRACE("DMAResource::Init(): chose bounce buffer size %lu\n",
184			fBounceBufferSize);
185	}
186
187	dprintf("DMAResource@%p: low/high %" B_PRIxGENADDR "/%" B_PRIxGENADDR
188		", max segment count %" B_PRIu32 ", align %" B_PRIuGENADDR ", "
189		"boundary %" B_PRIuGENADDR ", max transfer %" B_PRIuGENADDR
190		", max segment size %" B_PRIuGENADDR "\n", this,
191		fRestrictions.low_address, fRestrictions.high_address,
192		fRestrictions.max_segment_count, fRestrictions.alignment,
193		fRestrictions.boundary, fRestrictions.max_transfer_size,
194		fRestrictions.max_segment_size);
195
196	fScratchVecs = (generic_io_vec*)malloc(
197		sizeof(generic_io_vec) * fRestrictions.max_segment_count);
198	if (fScratchVecs == NULL)
199		return B_NO_MEMORY;
200
201	for (size_t i = 0; i < fBufferCount; i++) {
202		DMABuffer* buffer;
203		status_t error = CreateBuffer(&buffer);
204		if (error != B_OK)
205			return error;
206
207		fDMABuffers.Add(buffer);
208	}
209
210	// TODO: create bounce buffers in as few areas as feasible
211	for (size_t i = 0; i < fBounceBufferCount; i++) {
212		DMABounceBuffer* buffer;
213		status_t error = CreateBounceBuffer(&buffer);
214		if (error != B_OK)
215			return error;
216
217		fBounceBuffers.Add(buffer);
218	}
219
220	return B_OK;
221}
222
223
224status_t
225DMAResource::CreateBuffer(DMABuffer** _buffer)
226{
227	DMABuffer* buffer = DMABuffer::Create(fRestrictions.max_segment_count);
228	if (buffer == NULL)
229		return B_NO_MEMORY;
230
231	*_buffer = buffer;
232	return B_OK;
233}
234
235
236status_t
237DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
238{
239	void* bounceBuffer = NULL;
240	phys_addr_t physicalBase = 0;
241	area_id area = -1;
242	phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);
243
244	virtual_address_restrictions virtualRestrictions = {};
245	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
246	physical_address_restrictions physicalRestrictions = {};
247	physicalRestrictions.low_address = fRestrictions.low_address;
248	physicalRestrictions.high_address = fRestrictions.high_address;
249	physicalRestrictions.alignment = fRestrictions.alignment;
250	physicalRestrictions.boundary = fRestrictions.boundary;
251	area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS,
252		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0, &virtualRestrictions,
253		&physicalRestrictions, &bounceBuffer);
254	if (area < B_OK)
255		return area;
256
257	physical_entry entry;
258	if (get_memory_map(bounceBuffer, size, &entry, 1) != B_OK) {
259		panic("get_memory_map() failed.");
260		delete_area(area);
261		return B_ERROR;
262	}
263
264	physicalBase = entry.address;
265
266	ASSERT(fRestrictions.high_address >= physicalBase + size);
267
268	DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer;
269	if (buffer == NULL) {
270		delete_area(area);
271		return B_NO_MEMORY;
272	}
273
274	buffer->address = bounceBuffer;
275	buffer->physical_address = physicalBase;
276	buffer->size = size;
277
278	*_buffer = buffer;
279	return B_OK;
280}
281
282
283inline void
284DMAResource::_RestrictBoundaryAndSegmentSize(generic_addr_t base,
285	generic_addr_t& length)
286{
287	if (length > fRestrictions.max_segment_size)
288		length = fRestrictions.max_segment_size;
289	if (fRestrictions.boundary > 0) {
290		generic_addr_t baseBoundary = base / fRestrictions.boundary;
291		if (baseBoundary
292				!= (base + (length - 1)) / fRestrictions.boundary) {
293			length = (baseBoundary + 1) * fRestrictions.boundary - base;
294		}
295	}
296}
297
298
299void
300DMAResource::_CutBuffer(DMABuffer& buffer, phys_addr_t& physicalBounceBuffer,
301	phys_size_t& bounceLeft, generic_size_t toCut)
302{
303	int32 vecCount = buffer.VecCount();
304	for (int32 i = vecCount - 1; toCut > 0 && i >= 0; i--) {
305		generic_io_vec& vec = buffer.VecAt(i);
306		generic_size_t length = vec.length;
307		bool inBounceBuffer = buffer.UsesBounceBufferAt(i);
308
309		if (length <= toCut) {
310			vecCount--;
311			toCut -= length;
312
313			if (inBounceBuffer) {
314				bounceLeft += length;
315				physicalBounceBuffer -= length;
316			}
317		} else {
318			vec.length -= toCut;
319
320			if (inBounceBuffer) {
321				bounceLeft += toCut;
322				physicalBounceBuffer -= toCut;
323			}
324			break;
325		}
326	}
327
328	buffer.SetVecCount(vecCount);
329}
330
331
332/*!	Adds \a length bytes from the bounce buffer to the DMABuffer \a buffer.
333	Takes care of boundary, and segment restrictions. \a length must be aligned.
334	If \a fixedLength is requested, this function will fail if it cannot
335	satisfy the request.
336
337	\return 0 if the request cannot be satisfied. There could have been some
338		additions to the DMA buffer, and you will need to cut them back.
339	TODO: is that what we want here?
340	\return >0 the number of bytes added to the buffer.
341*/
342phys_size_t
343DMAResource::_AddBounceBuffer(DMABuffer& buffer,
344	phys_addr_t& physicalBounceBuffer, phys_size_t& bounceLeft,
345	generic_size_t length, bool fixedLength)
346{
347	if (bounceLeft < length) {
348		if (fixedLength)
349			return 0;
350
351		length = bounceLeft;
352	}
353
354	phys_size_t bounceUsed = 0;
355
356	uint32 vecCount = buffer.VecCount();
357	if (vecCount > 0) {
358		// see if we can join the bounce buffer with the previously last vec
359		generic_io_vec& vec = buffer.VecAt(vecCount - 1);
360		generic_addr_t vecBase = vec.base;
361		generic_size_t vecLength = vec.length;
362
363		if (vecBase + vecLength == physicalBounceBuffer) {
364			vecLength += length;
365			_RestrictBoundaryAndSegmentSize(vecBase, vecLength);
366
367			generic_size_t lengthDiff = vecLength - vec.length;
368			length -= lengthDiff;
369
370			physicalBounceBuffer += lengthDiff;
371			bounceLeft -= lengthDiff;
372			bounceUsed += lengthDiff;
373
374			vec.length = vecLength;
375		}
376	}
377
378	while (length > 0) {
379		// We need to add another bounce vec
380
381		if (vecCount == fRestrictions.max_segment_count)
382			return fixedLength ? 0 : bounceUsed;
383
384		generic_addr_t vecLength = length;
385		_RestrictBoundaryAndSegmentSize(physicalBounceBuffer, vecLength);
386
387		buffer.AddVec(physicalBounceBuffer, vecLength);
388		vecCount++;
389
390		physicalBounceBuffer += vecLength;
391		bounceLeft -= vecLength;
392		bounceUsed += vecLength;
393		length -= vecLength;
394	}
395
396	return bounceUsed;
397}
398
399
400status_t
401DMAResource::TranslateNext(IORequest* request, IOOperation* operation,
402	generic_size_t maxOperationLength)
403{
404	IOBuffer* buffer = request->Buffer();
405	off_t originalOffset = request->Offset() + request->Length()
406		- request->RemainingBytes();
407	off_t offset = originalOffset;
408	generic_size_t partialBegin = offset & (fBlockSize - 1);
409
410	// current iteration state
411	uint32 vecIndex = request->VecIndex();
412	uint32 vecOffset = request->VecOffset();
413	generic_size_t totalLength = min_c(request->RemainingBytes(),
414		fRestrictions.max_transfer_size);
415
416	if (maxOperationLength > 0
417		&& maxOperationLength < totalLength + partialBegin) {
418		totalLength = maxOperationLength - partialBegin;
419	}
420
421	MutexLocker locker(fLock);
422
423	DMABuffer* dmaBuffer = fDMABuffers.RemoveHead();
424	if (dmaBuffer == NULL)
425		return B_BUSY;
426
427	dmaBuffer->SetVecCount(0);
428
429	generic_io_vec* vecs = NULL;
430	uint32 segmentCount = 0;
431
432	TRACE("  offset %Ld, remaining size: %lu, block size %lu -> partial: %lu\n",
433		offset, request->RemainingBytes(), fBlockSize, partialBegin);
434
435	if (buffer->IsVirtual()) {
436		// Unless we need the bounce buffer anyway, we have to translate the
437		// virtual addresses to physical addresses, so we can check the DMA
438		// restrictions.
439		TRACE("  buffer is virtual %s\n", buffer->IsUser() ? "user" : "kernel");
440		// TODO: !partialOperation || totalLength >= fBlockSize
441		// TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
442		if (true) {
443			generic_size_t transferLeft = totalLength;
444			vecs = fScratchVecs;
445
446			TRACE("  create physical map (for %ld vecs)\n", buffer->VecCount());
447			for (uint32 i = vecIndex; i < buffer->VecCount(); i++) {
448				generic_io_vec& vec = buffer->VecAt(i);
449				generic_addr_t base = vec.base + vecOffset;
450				generic_size_t size = vec.length - vecOffset;
451				vecOffset = 0;
452				if (size > transferLeft)
453					size = transferLeft;
454
455				while (size > 0 && segmentCount
456						< fRestrictions.max_segment_count) {
457					physical_entry entry;
458					uint32 count = 1;
459					get_memory_map_etc(request->TeamID(), (void*)base, size,
460						&entry, &count);
461
462					vecs[segmentCount].base = entry.address;
463					vecs[segmentCount].length = entry.size;
464
465					transferLeft -= entry.size;
466					base += entry.size;
467					size -= entry.size;
468					segmentCount++;
469				}
470
471				if (transferLeft == 0)
472					break;
473			}
474
475			totalLength -= transferLeft;
476		}
477
478		vecIndex = 0;
479		vecOffset = 0;
480	} else {
481		// We do already have physical addresses.
482		locker.Unlock();
483		vecs = buffer->Vecs();
484		segmentCount = min_c(buffer->VecCount() - vecIndex,
485			fRestrictions.max_segment_count);
486	}
487
488#ifdef TRACE_DMA_RESOURCE
489	TRACE("  physical count %lu\n", segmentCount);
490	for (uint32 i = 0; i < segmentCount; i++) {
491		TRACE("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIxGENADDR "\n",
492			i, vecs[vecIndex + i].base, vecs[vecIndex + i].length);
493	}
494#endif
495
496	// check alignment, boundaries, etc. and set vecs in DMA buffer
497
498	// Fetch a bounce buffer we can use for the DMABuffer.
499	// TODO: We should do that lazily when needed!
500	DMABounceBuffer* bounceBuffer = NULL;
501	if (_NeedsBoundsBuffers()) {
502		bounceBuffer = fBounceBuffers.Head();
503		if (bounceBuffer == NULL)
504			return B_BUSY;
505	}
506	dmaBuffer->SetBounceBuffer(bounceBuffer);
507
508	generic_size_t dmaLength = 0;
509	phys_addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBufferAddress();
510	phys_size_t bounceLeft = fBounceBufferSize;
511	generic_size_t transferLeft = totalLength;
512
513	// If the offset isn't block-aligned, use the bounce buffer to bridge the
514	// gap to the start of the vec.
515	if (partialBegin > 0) {
516		generic_size_t length;
517		if (request->IsWrite()) {
518			// we always need to read in a whole block for the partial write
519			length = fBlockSize;
520		} else {
521			length = (partialBegin + fRestrictions.alignment - 1)
522				& ~(fRestrictions.alignment - 1);
523		}
524
525		if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft,
526				length, true) == 0) {
527			TRACE("  adding partial begin failed, length %lu!\n", length);
528			return B_BAD_VALUE;
529		}
530
531		dmaLength += length;
532
533		generic_size_t transferred = length - partialBegin;
534		vecOffset += transferred;
535		offset -= partialBegin;
536
537		if (transferLeft > transferred)
538			transferLeft -= transferred;
539		else
540			transferLeft = 0;
541
542		TRACE("  partial begin, using bounce buffer: offset: %lld, length: "
543			"%lu\n", offset, length);
544	}
545
546	for (uint32 i = vecIndex;
547			i < vecIndex + segmentCount && transferLeft > 0;) {
548		if (dmaBuffer->VecCount() >= fRestrictions.max_segment_count)
549			break;
550
551		const generic_io_vec& vec = vecs[i];
552		if (vec.length <= vecOffset) {
553			vecOffset -= vec.length;
554			i++;
555			continue;
556		}
557
558		generic_addr_t base = vec.base + vecOffset;
559		generic_size_t maxLength = vec.length - vecOffset;
560		if (maxLength > transferLeft)
561			maxLength = transferLeft;
562		generic_size_t length = maxLength;
563
564		// Cut the vec according to transfer size, segment size, and boundary.
565
566		if (dmaLength + length > fRestrictions.max_transfer_size) {
567			length = fRestrictions.max_transfer_size - dmaLength;
568			TRACE("  vec %lu: restricting length to %lu due to transfer size "
569				"limit\n", i, length);
570		}
571		_RestrictBoundaryAndSegmentSize(base, length);
572
573		phys_size_t useBounceBufferSize = 0;
574
575		// Check low address: use bounce buffer for range to low address.
576		// Check alignment: if not aligned, use bounce buffer for complete vec.
577		if (base < fRestrictions.low_address) {
578			useBounceBufferSize = fRestrictions.low_address - base;
579			TRACE("  vec %lu: below low address, using bounce buffer: %lu\n", i,
580				useBounceBufferSize);
581		} else if (base & (fRestrictions.alignment - 1)) {
582			useBounceBufferSize = length;
583			TRACE("  vec %lu: misalignment, using bounce buffer: %lu\n", i,
584				useBounceBufferSize);
585		}
586
587		// Enforce high address restriction
588		if (base > fRestrictions.high_address)
589			useBounceBufferSize = length;
590		else if (base + length > fRestrictions.high_address)
591			length = fRestrictions.high_address - base;
592
593		// Align length as well
594		if (useBounceBufferSize == 0)
595			length &= ~(fRestrictions.alignment - 1);
596
597		// If length is 0, use bounce buffer for complete vec.
598		if (length == 0) {
599			length = maxLength;
600			useBounceBufferSize = length;
601			TRACE("  vec %lu: 0 length, using bounce buffer: %lu\n", i,
602				useBounceBufferSize);
603		}
604
605		if (useBounceBufferSize > 0) {
606			// alignment could still be wrong (we round up here)
607			useBounceBufferSize = (useBounceBufferSize
608				+ fRestrictions.alignment - 1) & ~(fRestrictions.alignment - 1);
609
610			length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
611				bounceLeft, useBounceBufferSize, false);
612			if (length == 0) {
613				TRACE("  vec %lu: out of bounce buffer space\n", i);
614				// We don't have any bounce buffer space left, we need to move
615				// this request to the next I/O operation.
616				break;
617			}
618			TRACE("  vec %lu: final bounce length: %lu\n", i, length);
619		} else {
620			TRACE("  vec %lu: final length restriction: %lu\n", i, length);
621			dmaBuffer->AddVec(base, length);
622		}
623
624		dmaLength += length;
625		vecOffset += length;
626		transferLeft -= min_c(length, transferLeft);
627	}
628
629	// If we're writing partially, we always need to have a block sized bounce
630	// buffer (or else we would overwrite memory to be written on the read in
631	// the first phase).
632	off_t requestEnd = request->Offset() + request->Length();
633	if (request->IsWrite()) {
634		generic_size_t diff = dmaLength & (fBlockSize - 1);
635
636		// If the transfer length is block aligned and we're writing past the
637		// end of the given data, we still have to check the whether the last
638		// vec is a bounce buffer segment shorter than the block size. If so, we
639		// have to cut back the complete block and use a bounce buffer for it
640		// entirely.
641		if (diff == 0 && offset + (off_t)dmaLength > requestEnd) {
642			const generic_io_vec& dmaVec
643				= dmaBuffer->VecAt(dmaBuffer->VecCount() - 1);
644			ASSERT(dmaVec.base >= dmaBuffer->PhysicalBounceBufferAddress()
645				&& dmaVec.base
646					< dmaBuffer->PhysicalBounceBufferAddress()
647						+ fBounceBufferSize);
648				// We can be certain that the last vec is a bounce buffer vec,
649				// since otherwise the DMA buffer couldn't exceed the end of the
650				// request data.
651			if (dmaVec.length < fBlockSize)
652				diff = fBlockSize;
653		}
654
655		if (diff != 0) {
656			// Not yet block aligned -- cut back to the previous block and add
657			// a block-sized bounce buffer segment.
658			TRACE("  partial end write: %lu, diff %lu\n", dmaLength, diff);
659
660			_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff);
661			dmaLength -= diff;
662
663			if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
664					bounceLeft, fBlockSize, true) == 0) {
665				// If we cannot write anything, we can't process the request at
666				// all.
667				TRACE("  adding bounce buffer failed!!!\n");
668				if (dmaLength == 0)
669					return B_BAD_VALUE;
670			} else
671				dmaLength += fBlockSize;
672		}
673	}
674
675	// If total length not block aligned, use bounce buffer for padding (read
676	// case only).
677	while ((dmaLength & (fBlockSize - 1)) != 0) {
678		TRACE("  dmaLength not block aligned: %lu\n", dmaLength);
679			generic_size_t length
680				= (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1);
681
682		// If total length > max transfer size, segment count > max segment
683		// count, truncate.
684		// TODO: sometimes we can replace the last vec with the bounce buffer
685		// to let it match the restrictions.
686		if (length > fRestrictions.max_transfer_size
687			|| dmaBuffer->VecCount() == fRestrictions.max_segment_count
688			|| bounceLeft < length - dmaLength) {
689			// cut the part of dma length
690			TRACE("  can't align length due to max transfer size, segment "
691				"count restrictions, or lacking bounce buffer space\n");
692			generic_size_t toCut = dmaLength
693				& (max_c(fBlockSize, fRestrictions.alignment) - 1);
694			dmaLength -= toCut;
695			if (dmaLength == 0) {
696				// This can only happen, when we have too many small segments
697				// and hit the max segment count. In this case we just use the
698				// bounce buffer for as much as possible of the total length.
699				dmaBuffer->SetVecCount(0);
700				generic_addr_t base = dmaBuffer->PhysicalBounceBufferAddress();
701				dmaLength = min_c(totalLength, fBounceBufferSize)
702					& ~(max_c(fBlockSize, fRestrictions.alignment) - 1);
703				_RestrictBoundaryAndSegmentSize(base, dmaLength);
704				dmaBuffer->AddVec(base, dmaLength);
705
706				physicalBounceBuffer = base + dmaLength;
707				bounceLeft = fBounceBufferSize - dmaLength;
708			} else {
709				_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, toCut);
710			}
711		} else {
712			TRACE("  adding %lu bytes final bounce buffer\n",
713				length - dmaLength);
714			length -= dmaLength;
715			length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
716				bounceLeft, length, true);
717			if (length == 0)
718				panic("don't do this to me!");
719			dmaLength += length;
720		}
721	}
722
723	operation->SetBuffer(dmaBuffer);
724	operation->SetBlockSize(fBlockSize);
725	operation->SetOriginalRange(originalOffset,
726		min_c(offset + (off_t)dmaLength, requestEnd) - originalOffset);
727	operation->SetRange(offset, dmaLength);
728	operation->SetPartial(partialBegin != 0,
729		offset + (off_t)dmaLength > requestEnd);
730
731	// If we don't need the bounce buffer, we put it back, otherwise
732	operation->SetUsesBounceBuffer(bounceLeft < fBounceBufferSize);
733	if (operation->UsesBounceBuffer())
734		fBounceBuffers.RemoveHead();
735	else
736		dmaBuffer->SetBounceBuffer(NULL);
737
738
739	status_t error = operation->Prepare(request);
740	if (error != B_OK)
741		return error;
742
743	request->Advance(operation->OriginalLength());
744
745	return B_OK;
746}
747
748
749void
750DMAResource::RecycleBuffer(DMABuffer* buffer)
751{
752	if (buffer == NULL)
753		return;
754
755	MutexLocker _(fLock);
756	fDMABuffers.Add(buffer);
757	if (buffer->BounceBuffer() != NULL) {
758		fBounceBuffers.Add(buffer->BounceBuffer());
759		buffer->SetBounceBuffer(NULL);
760	}
761}
762
763
764bool
765DMAResource::_NeedsBoundsBuffers() const
766{
767	return fRestrictions.alignment > 1
768		|| fRestrictions.low_address != 0
769		|| fRestrictions.high_address != ~(generic_addr_t)0
770		|| fBlockSize > 1;
771}
772
773
774
775
776#if 0
777
778
779status_t
780create_dma_resource(restrictions)
781{
782	// Restrictions are: transfer size, address space, alignment
783	// segment min/max size, num segments
784}
785
786
787void
788delete_dma_resource(resource)
789{
790}
791
792
793dma_buffer_alloc(resource, size)
794{
795}
796
797
798dma_buffer_free(buffer)
799{
800//	Allocates or frees memory in that DMA buffer.
801}
802
803#endif	// 0
804