file_cache.cpp revision f8154d17
1f72376a8SAxel Dörfler/*
26102f078SAxel Dörfler * Copyright 2004-2009, Axel D��rfler, axeld@pinc-software.de.
3eb9f0103SAxel Dörfler * Distributed under the terms of the MIT License.
4eb9f0103SAxel Dörfler */
5f72376a8SAxel Dörfler
6f72376a8SAxel Dörfler
7f72376a8SAxel Dörfler#include "vnode_store.h"
8f72376a8SAxel Dörfler
90d871d3cSAxel Dörfler#include <unistd.h>
100d871d3cSAxel Dörfler#include <stdlib.h>
110d871d3cSAxel Dörfler#include <string.h>
120d871d3cSAxel Dörfler
13f72376a8SAxel Dörfler#include <KernelExport.h>
14f72376a8SAxel Dörfler#include <fs_cache.h>
15f72376a8SAxel Dörfler
16279c6b76SIngo Weinhold#include <condition_variable.h>
17f72376a8SAxel Dörfler#include <file_cache.h>
18279c6b76SIngo Weinhold#include <generic_syscall.h>
195c99d639SIngo Weinhold#include <low_resource_manager.h>
207b8683b2SIngo Weinhold#include <thread.h>
210d871d3cSAxel Dörfler#include <util/AutoLock.h>
22279c6b76SIngo Weinhold#include <util/kernel_cpp.h>
23f72376a8SAxel Dörfler#include <vfs.h>
24e50cf876SIngo Weinhold#include <vm/vm.h>
25e50cf876SIngo Weinhold#include <vm/vm_page.h>
26e50cf876SIngo Weinhold#include <vm/VMCache.h>
27f72376a8SAxel Dörfler
28aa4ba93eSIngo Weinhold#include "IORequest.h"
297f12cc54SIngo Weinhold
30f72376a8SAxel Dörfler
31f72376a8SAxel Dörfler//#define TRACE_FILE_CACHE
32f72376a8SAxel Dörfler#ifdef TRACE_FILE_CACHE
33f72376a8SAxel Dörfler#	define TRACE(x) dprintf x
34f72376a8SAxel Dörfler#else
35f72376a8SAxel Dörfler#	define TRACE(x) ;
36f72376a8SAxel Dörfler#endif
37f72376a8SAxel Dörfler
380f6c560eSAxel Dörfler// maximum number of iovecs per request
39279c6b76SIngo Weinhold#define MAX_IO_VECS			32	// 128 kB
400f6c560eSAxel Dörfler#define MAX_FILE_IO_VECS	32
4111a3346cSAxel Dörfler
42cfe386c2SAxel Dörfler#define BYPASS_IO_SIZE		65536
43c6573329SAxel Dörfler#define LAST_ACCESSES		3
44c6573329SAxel Dörfler
45f72376a8SAxel Dörflerstruct file_cache_ref {
46a477e3cfSIngo Weinhold	VMCache			*cache;
4780f54692SAxel Dörfler	struct vnode	*vnode;
48c6573329SAxel Dörfler	off_t			last_access[LAST_ACCESSES];
49c6573329SAxel Dörfler		// TODO: it would probably be enough to only store the least
50c6573329SAxel Dörfler		//	significant 31 bits, and make this uint32 (one bit for
51c6573329SAxel Dörfler		//	write vs. read)
52c6573329SAxel Dörfler	int32			last_access_index;
537491000fSIngo Weinhold	uint16			disabled_count;
548a26f35aSAxel Dörfler
558a26f35aSAxel Dörfler	inline void SetLastAccess(int32 index, off_t access, bool isWrite)
568a26f35aSAxel Dörfler	{
578a26f35aSAxel Dörfler		// we remember writes as negative offsets
588a26f35aSAxel Dörfler		last_access[index] = isWrite ? -access : access;
598a26f35aSAxel Dörfler	}
608a26f35aSAxel Dörfler
61cee04e80SArtur Wyszynski	inline off_t LastAccess(int32 index, bool isWrite) const
628a26f35aSAxel Dörfler	{
638a26f35aSAxel Dörfler		return isWrite ? -last_access[index] : last_access[index];
648a26f35aSAxel Dörfler	}
658a26f35aSAxel Dörfler
668a26f35aSAxel Dörfler	inline uint32 LastAccessPageOffset(int32 index, bool isWrite)
678a26f35aSAxel Dörfler	{
688a26f35aSAxel Dörfler		return LastAccess(index, isWrite) >> PAGE_SHIFT;
698a26f35aSAxel Dörfler	}
70f72376a8SAxel Dörfler};
71f72376a8SAxel Dörfler
72eb2bd0e8SStephan Aßmusclass PrecacheIO : public AsyncIOCallback {
73eb2bd0e8SStephan Aßmuspublic:
74eb2bd0e8SStephan Aßmus								PrecacheIO(file_cache_ref* ref, off_t offset,
75435c43f5SIngo Weinhold									generic_size_t size);
76eb2bd0e8SStephan Aßmus								~PrecacheIO();
77eb2bd0e8SStephan Aßmus
7840bb9481SIngo Weinhold			status_t			Prepare(vm_page_reservation* reservation);
79a5dbd78bSAxel Dörfler			void				ReadAsync();
80eb2bd0e8SStephan Aßmus
81eb2bd0e8SStephan Aßmus	virtual	void				IOFinished(status_t status,
82eb2bd0e8SStephan Aßmus									bool partialTransfer,
83435c43f5SIngo Weinhold									generic_size_t bytesTransferred);
84eb2bd0e8SStephan Aßmus
85eb2bd0e8SStephan Aßmusprivate:
86eb2bd0e8SStephan Aßmus			file_cache_ref*		fRef;
87eb2bd0e8SStephan Aßmus			VMCache*			fCache;
88eb2bd0e8SStephan Aßmus			vm_page**			fPages;
89eb2bd0e8SStephan Aßmus			size_t				fPageCount;
90eb2bd0e8SStephan Aßmus			ConditionVariable*	fBusyConditions;
91435c43f5SIngo Weinhold			generic_io_vec*		fVecs;
92eb2bd0e8SStephan Aßmus			off_t				fOffset;
93a5dbd78bSAxel Dörfler			uint32				fVecCount;
94435c43f5SIngo Weinhold			generic_size_t		fSize;
953cd20943SIngo Weinhold#if DEBUG_PAGE_ACCESS
963cd20943SIngo Weinhold			thread_id			fAllocatingThread;
973cd20943SIngo Weinhold#endif
98eb2bd0e8SStephan Aßmus};
99eb2bd0e8SStephan Aßmus
10061b6f38cSAxel Dörflertypedef status_t (*cache_func)(file_cache_ref* ref, void* cookie, off_t offset,
101a121b8c8SAxel Dörfler	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
10240bb9481SIngo Weinhold	vm_page_reservation* reservation, size_t reservePages);
10309149281SAxel Dörfler
104435c43f5SIngo Weinholdstatic void add_to_iovec(generic_io_vec* vecs, uint32 &index, uint32 max,
105435c43f5SIngo Weinhold	generic_addr_t address, generic_size_t size);
106eb2bd0e8SStephan Aßmus
107f72376a8SAxel Dörfler
10861b6f38cSAxel Dörflerstatic struct cache_module_info* sCacheModule;
10964a9bd15SIngo Weinhold
11064a9bd15SIngo Weinhold
11164a9bd15SIngo Weinholdstatic const uint32 kZeroVecCount = 32;
11264a9bd15SIngo Weinholdstatic const size_t kZeroVecSize = kZeroVecCount * B_PAGE_SIZE;
113435c43f5SIngo Weinholdstatic phys_addr_t sZeroPage;	// physical address
114435c43f5SIngo Weinholdstatic generic_io_vec sZeroVecs[kZeroVecCount];
115324fc66bSAxel Dörfler
116324fc66bSAxel Dörfler
11711a3346cSAxel Dörfler//	#pragma mark -
11811a3346cSAxel Dörfler
11911a3346cSAxel Dörfler
120435c43f5SIngo WeinholdPrecacheIO::PrecacheIO(file_cache_ref* ref, off_t offset, generic_size_t size)
121eb2bd0e8SStephan Aßmus	:
122eb2bd0e8SStephan Aßmus	fRef(ref),
123eb2bd0e8SStephan Aßmus	fCache(ref->cache),
124eb2bd0e8SStephan Aßmus	fPages(NULL),
125eb2bd0e8SStephan Aßmus	fVecs(NULL),
126eb2bd0e8SStephan Aßmus	fOffset(offset),
127a5dbd78bSAxel Dörfler	fVecCount(0),
128eb2bd0e8SStephan Aßmus	fSize(size)
129eb2bd0e8SStephan Aßmus{
130eb2bd0e8SStephan Aßmus	fPageCount = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
131eb2bd0e8SStephan Aßmus	fCache->AcquireRefLocked();
132eb2bd0e8SStephan Aßmus}
133eb2bd0e8SStephan Aßmus
134eb2bd0e8SStephan Aßmus
135eb2bd0e8SStephan AßmusPrecacheIO::~PrecacheIO()
136eb2bd0e8SStephan Aßmus{
137eb2bd0e8SStephan Aßmus	delete[] fPages;
138eb2bd0e8SStephan Aßmus	delete[] fVecs;
139eb2bd0e8SStephan Aßmus	fCache->ReleaseRefLocked();
140eb2bd0e8SStephan Aßmus}
141eb2bd0e8SStephan Aßmus
142eb2bd0e8SStephan Aßmus
143eb2bd0e8SStephan Aßmusstatus_t
14440bb9481SIngo WeinholdPrecacheIO::Prepare(vm_page_reservation* reservation)
145eb2bd0e8SStephan Aßmus{
146eb2bd0e8SStephan Aßmus	if (fPageCount == 0)
147eb2bd0e8SStephan Aßmus		return B_BAD_VALUE;
148eb2bd0e8SStephan Aßmus
149eb2bd0e8SStephan Aßmus	fPages = new(std::nothrow) vm_page*[fPageCount];
150eb2bd0e8SStephan Aßmus	if (fPages == NULL)
151eb2bd0e8SStephan Aßmus		return B_NO_MEMORY;
152eb2bd0e8SStephan Aßmus
153435c43f5SIngo Weinhold	fVecs = new(std::nothrow) generic_io_vec[fPageCount];
154eb2bd0e8SStephan Aßmus	if (fVecs == NULL)
155eb2bd0e8SStephan Aßmus		return B_NO_MEMORY;
156eb2bd0e8SStephan Aßmus
157eb2bd0e8SStephan Aßmus	// allocate pages for the cache and mark them busy
158eb2bd0e8SStephan Aßmus	uint32 i = 0;
159435c43f5SIngo Weinhold	for (generic_size_t pos = 0; pos < fSize; pos += B_PAGE_SIZE) {
16040bb9481SIngo Weinhold		vm_page* page = vm_page_allocate_page(reservation,
16140bb9481SIngo Weinhold			PAGE_STATE_CACHED | VM_PAGE_ALLOC_BUSY);
162eb2bd0e8SStephan Aßmus
163eb2bd0e8SStephan Aßmus		fCache->InsertPage(page, fOffset + pos);
164eb2bd0e8SStephan Aßmus
165a5dbd78bSAxel Dörfler		add_to_iovec(fVecs, fVecCount, fPageCount,
166eb2bd0e8SStephan Aßmus			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
167d5ad7629SAxel Dörfler		fPages[i++] = page;
168eb2bd0e8SStephan Aßmus	}
169eb2bd0e8SStephan Aßmus
1703cd20943SIngo Weinhold#if DEBUG_PAGE_ACCESS
1713cd20943SIngo Weinhold	fAllocatingThread = find_thread(NULL);
1723cd20943SIngo Weinhold#endif
173eb2bd0e8SStephan Aßmus
174a5dbd78bSAxel Dörfler	return B_OK;
175a5dbd78bSAxel Dörfler}
176aff0fbbeSStephan Aßmus
177aff0fbbeSStephan Aßmus
178a5dbd78bSAxel Dörflervoid
179a5dbd78bSAxel DörflerPrecacheIO::ReadAsync()
180a5dbd78bSAxel Dörfler{
181a5dbd78bSAxel Dörfler	// This object is going to be deleted after the I/O request has been
182a5dbd78bSAxel Dörfler	// fulfilled
183a5dbd78bSAxel Dörfler	vfs_asynchronous_read_pages(fRef->vnode, NULL, fOffset, fVecs, fVecCount,
184a5dbd78bSAxel Dörfler		fSize, B_PHYSICAL_IO_REQUEST, this);
185eb2bd0e8SStephan Aßmus}
186eb2bd0e8SStephan Aßmus
187eb2bd0e8SStephan Aßmus
188eb2bd0e8SStephan Aßmusvoid
189eb2bd0e8SStephan AßmusPrecacheIO::IOFinished(status_t status, bool partialTransfer,
190435c43f5SIngo Weinhold	generic_size_t bytesTransferred)
191eb2bd0e8SStephan Aßmus{
192eb2bd0e8SStephan Aßmus	AutoLocker<VMCache> locker(fCache);
193eb2bd0e8SStephan Aßmus
194eb2bd0e8SStephan Aßmus	// Make successfully loaded pages accessible again (partially
195eb2bd0e8SStephan Aßmus	// transferred pages are considered failed)
196435c43f5SIngo Weinhold	phys_size_t pagesTransferred
197c32499b4SAxel Dörfler		= (bytesTransferred + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
198c32499b4SAxel Dörfler
1991d578e15SIngo Weinhold	if (fOffset + (off_t)bytesTransferred > fCache->virtual_end)
200c32499b4SAxel Dörfler		bytesTransferred = fCache->virtual_end - fOffset;
201c32499b4SAxel Dörfler
202eb2bd0e8SStephan Aßmus	for (uint32 i = 0; i < pagesTransferred; i++) {
203c32499b4SAxel Dörfler		if (i == pagesTransferred - 1
204c32499b4SAxel Dörfler			&& (bytesTransferred % B_PAGE_SIZE) != 0) {
205c32499b4SAxel Dörfler			// clear partial page
206c32499b4SAxel Dörfler			size_t bytesTouched = bytesTransferred % B_PAGE_SIZE;
207435c43f5SIngo Weinhold			vm_memset_physical(
208435c43f5SIngo Weinhold				((phys_addr_t)fPages[i]->physical_page_number << PAGE_SHIFT)
209435c43f5SIngo Weinhold					+ bytesTouched,
210435c43f5SIngo Weinhold				0, B_PAGE_SIZE - bytesTouched);
211c32499b4SAxel Dörfler		}
212c32499b4SAxel Dörfler
2133cd20943SIngo Weinhold		DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread);
2143cd20943SIngo Weinhold
21572382fa6SIngo Weinhold		fCache->MarkPageUnbusy(fPages[i]);
2163cd20943SIngo Weinhold
2173cd20943SIngo Weinhold		DEBUG_PAGE_ACCESS_END(fPages[i]);
218eb2bd0e8SStephan Aßmus	}
219eb2bd0e8SStephan Aßmus
220eb2bd0e8SStephan Aßmus	// Free pages after failed I/O
221eb2bd0e8SStephan Aßmus	for (uint32 i = pagesTransferred; i < fPageCount; i++) {
2223cd20943SIngo Weinhold		DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread);
223522c2f19SIngo Weinhold		fCache->NotifyPageEvents(fPages[i], PAGE_EVENT_NOT_BUSY);
224eb2bd0e8SStephan Aßmus		fCache->RemovePage(fPages[i]);
225eb2bd0e8SStephan Aßmus		vm_page_set_state(fPages[i], PAGE_STATE_FREE);
226eb2bd0e8SStephan Aßmus	}
227eb2bd0e8SStephan Aßmus
228eb2bd0e8SStephan Aßmus	delete this;
229eb2bd0e8SStephan Aßmus}
230eb2bd0e8SStephan Aßmus
231eb2bd0e8SStephan Aßmus
232eb2bd0e8SStephan Aßmus//	#pragma mark -
233eb2bd0e8SStephan Aßmus
234eb2bd0e8SStephan Aßmus
235f72376a8SAxel Dörflerstatic void
236435c43f5SIngo Weinholdadd_to_iovec(generic_io_vec* vecs, uint32 &index, uint32 max,
237435c43f5SIngo Weinhold	generic_addr_t address, generic_size_t size)
238f72376a8SAxel Dörfler{
239435c43f5SIngo Weinhold	if (index > 0 && vecs[index - 1].base + vecs[index - 1].length == address) {
240f72376a8SAxel Dörfler		// the iovec can be combined with the previous one
241435c43f5SIngo Weinhold		vecs[index - 1].length += size;
242f72376a8SAxel Dörfler		return;
243f72376a8SAxel Dörfler	}
244f72376a8SAxel Dörfler
245139353cfSAxel Dörfler	if (index == max)
246139353cfSAxel Dörfler		panic("no more space for iovecs!");
247139353cfSAxel Dörfler
248f72376a8SAxel Dörfler	// we need to start a new iovec
249435c43f5SIngo Weinhold	vecs[index].base = address;
250435c43f5SIngo Weinhold	vecs[index].length = size;
251f72376a8SAxel Dörfler	index++;
252f72376a8SAxel Dörfler}
253f72376a8SAxel Dörfler
254f72376a8SAxel Dörfler
255c6573329SAxel Dörflerstatic inline bool
25661b6f38cSAxel Dörfleraccess_is_sequential(file_cache_ref* ref)
257c6573329SAxel Dörfler{
258c6573329SAxel Dörfler	return ref->last_access[ref->last_access_index] != 0;
259c6573329SAxel Dörfler}
260c6573329SAxel Dörfler
261c6573329SAxel Dörfler
262c6573329SAxel Dörflerstatic inline void
263435c43f5SIngo Weinholdpush_access(file_cache_ref* ref, off_t offset, generic_size_t bytes,
264435c43f5SIngo Weinhold	bool isWrite)
265c6573329SAxel Dörfler{
266c6573329SAxel Dörfler	TRACE(("%p: push %Ld, %ld, %s\n", ref, offset, bytes,
267c6573329SAxel Dörfler		isWrite ? "write" : "read"));
268c6573329SAxel Dörfler
269c6573329SAxel Dörfler	int32 index = ref->last_access_index;
270c6573329SAxel Dörfler	int32 previous = index - 1;
271c6573329SAxel Dörfler	if (previous < 0)
272c6573329SAxel Dörfler		previous = LAST_ACCESSES - 1;
273c6573329SAxel Dörfler
2748a26f35aSAxel Dörfler	if (offset != ref->LastAccess(previous, isWrite))
275c6573329SAxel Dörfler		ref->last_access[previous] = 0;
276c6573329SAxel Dörfler
2778a26f35aSAxel Dörfler	ref->SetLastAccess(index, offset + bytes, isWrite);
278c6573329SAxel Dörfler
279c6573329SAxel Dörfler	if (++index >= LAST_ACCESSES)
280c6573329SAxel Dörfler		index = 0;
281c6573329SAxel Dörfler	ref->last_access_index = index;
282c6573329SAxel Dörfler}
283c6573329SAxel Dörfler
284c6573329SAxel Dörfler
285c6573329SAxel Dörflerstatic void
28640bb9481SIngo Weinholdreserve_pages(file_cache_ref* ref, vm_page_reservation* reservation,
28740bb9481SIngo Weinhold	size_t reservePages, bool isWrite)
288c6573329SAxel Dörfler{
2895c99d639SIngo Weinhold	if (low_resource_state(B_KERNEL_RESOURCE_PAGES) != B_NO_LOW_RESOURCE) {
290a477e3cfSIngo Weinhold		VMCache* cache = ref->cache;
2915c99d639SIngo Weinhold		cache->Lock();
292c6573329SAxel Dörfler
293f8154d17SIngo Weinhold		if (cache->consumers.IsEmpty() && cache->areas == NULL
294c6573329SAxel Dörfler			&& access_is_sequential(ref)) {
295c6573329SAxel Dörfler			// we are not mapped, and we're accessed sequentially
296c6573329SAxel Dörfler
297c6573329SAxel Dörfler			if (isWrite) {
2984fe399e4SAxel Dörfler				// Just write some pages back, and actually wait until they
2994fe399e4SAxel Dörfler				// have been written back in order to relieve the page pressure
3004fe399e4SAxel Dörfler				// a bit.
3018a26f35aSAxel Dörfler				int32 index = ref->last_access_index;
3028a26f35aSAxel Dörfler				int32 previous = index - 1;
3038a26f35aSAxel Dörfler				if (previous < 0)
3048a26f35aSAxel Dörfler					previous = LAST_ACCESSES - 1;
3058a26f35aSAxel Dörfler
3064fe399e4SAxel Dörfler				vm_page_write_modified_page_range(cache,
3078a26f35aSAxel Dörfler					ref->LastAccessPageOffset(previous, true),
3088a26f35aSAxel Dörfler					ref->LastAccessPageOffset(index, true));
309c6573329SAxel Dörfler			} else {
310c6573329SAxel Dörfler				// free some pages from our cache
311e1b630c5SIngo Weinhold				// TODO: start with oldest
312c6573329SAxel Dörfler				uint32 left = reservePages;
31361b6f38cSAxel Dörfler				vm_page* page;
314e1b630c5SIngo Weinhold				for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
315e1b630c5SIngo Weinhold						(page = it.Next()) != NULL && left > 0;) {
316bd7645a1SIngo Weinhold					if (page->State() == PAGE_STATE_CACHED && !page->busy) {
3173cd20943SIngo Weinhold						DEBUG_PAGE_ACCESS_START(page);
318211c63dfSIngo Weinhold						ASSERT(!page->IsMapped());
319211c63dfSIngo Weinhold						ASSERT(!page->modified);
3205c99d639SIngo Weinhold						cache->RemovePage(page);
321c6573329SAxel Dörfler						vm_page_set_state(page, PAGE_STATE_FREE);
322c6573329SAxel Dörfler						left--;
323c6573329SAxel Dörfler					}
324c6573329SAxel Dörfler				}
325c6573329SAxel Dörfler			}
326c6573329SAxel Dörfler		}
3275c99d639SIngo Weinhold		cache->Unlock();
328c6573329SAxel Dörfler	}
329c6573329SAxel Dörfler
33040bb9481SIngo Weinhold	vm_page_reserve_pages(reservation, reservePages, VM_PRIORITY_USER);
331c6573329SAxel Dörfler}
332c6573329SAxel Dörfler
333c6573329SAxel Dörfler
334c32499b4SAxel Dörflerstatic inline status_t
335c32499b4SAxel Dörflerread_pages_and_clear_partial(file_cache_ref* ref, void* cookie, off_t offset,
336435c43f5SIngo Weinhold	const generic_io_vec* vecs, size_t count, uint32 flags,
337435c43f5SIngo Weinhold	generic_size_t* _numBytes)
338c32499b4SAxel Dörfler{
339435c43f5SIngo Weinhold	generic_size_t bytesUntouched = *_numBytes;
340c32499b4SAxel Dörfler
341c32499b4SAxel Dörfler	status_t status = vfs_read_pages(ref->vnode, cookie, offset, vecs, count,
342c32499b4SAxel Dörfler		flags, _numBytes);
343c32499b4SAxel Dörfler
344435c43f5SIngo Weinhold	generic_size_t bytesEnd = *_numBytes;
345c32499b4SAxel Dörfler
3461d578e15SIngo Weinhold	if (offset + (off_t)bytesEnd > ref->cache->virtual_end)
347c32499b4SAxel Dörfler		bytesEnd = ref->cache->virtual_end - offset;
348c32499b4SAxel Dörfler
349c32499b4SAxel Dörfler	if (status == B_OK && bytesEnd < bytesUntouched) {
350c32499b4SAxel Dörfler		// Clear out any leftovers that were not touched by the above read.
351c32499b4SAxel Dörfler		// We're doing this here so that not every file system/device has to
352c32499b4SAxel Dörfler		// implement this.
353c32499b4SAxel Dörfler		bytesUntouched -= bytesEnd;
354c32499b4SAxel Dörfler
355c32499b4SAxel Dörfler		for (int32 i = count; i-- > 0 && bytesUntouched != 0; ) {
356435c43f5SIngo Weinhold			generic_size_t length = min_c(bytesUntouched, vecs[i].length);
357435c43f5SIngo Weinhold			vm_memset_physical(vecs[i].base + vecs[i].length - length, 0,
358435c43f5SIngo Weinhold				length);
359c32499b4SAxel Dörfler
360c32499b4SAxel Dörfler			bytesUntouched -= length;
361c32499b4SAxel Dörfler		}
362c32499b4SAxel Dörfler	}
363c32499b4SAxel Dörfler
364c32499b4SAxel Dörfler	return status;
365c32499b4SAxel Dörfler}
366c32499b4SAxel Dörfler
367c32499b4SAxel Dörfler
3680633dcc2SAxel Dörfler/*!	Reads the requested amount of data into the cache, and allocates
3690633dcc2SAxel Dörfler	pages needed to fulfill that request. This function is called by cache_io().
3700633dcc2SAxel Dörfler	It can only handle a certain amount of bytes, and the caller must make
371061816eeSAxel Dörfler	sure that it matches that criterion.
3721af7d115SMichael Lotz	The cache_ref lock must be held when calling this function; during
3730633dcc2SAxel Dörfler	operation it will unlock the cache, though.
374061816eeSAxel Dörfler*/
3750710d59cSAxel Dörflerstatic status_t
37661b6f38cSAxel Dörflerread_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
377a121b8c8SAxel Dörfler	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
37840bb9481SIngo Weinhold	vm_page_reservation* reservation, size_t reservePages)
379f72376a8SAxel Dörfler{
380cfe386c2SAxel Dörfler	TRACE(("read_into_cache(offset = %Ld, pageOffset = %ld, buffer = %#lx, "
381cfe386c2SAxel Dörfler		"bufferSize = %lu\n", offset, pageOffset, buffer, bufferSize));
382f72376a8SAxel Dörfler
383a477e3cfSIngo Weinhold	VMCache* cache = ref->cache;
3840f6c560eSAxel Dörfler
385279c6b76SIngo Weinhold	// TODO: We're using way too much stack! Rather allocate a sufficiently
386279c6b76SIngo Weinhold	// large chunk on the heap.
387435c43f5SIngo Weinhold	generic_io_vec vecs[MAX_IO_VECS];
388eb2bd0e8SStephan Aßmus	uint32 vecCount = 0;
389f72376a8SAxel Dörfler
3901d578e15SIngo Weinhold	generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize);
39161b6f38cSAxel Dörfler	vm_page* pages[MAX_IO_VECS];
392f72376a8SAxel Dörfler	int32 pageIndex = 0;
393f72376a8SAxel Dörfler
394f72376a8SAxel Dörfler	// allocate pages for the cache and mark them busy
3951d578e15SIngo Weinhold	for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
39661b6f38cSAxel Dörfler		vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
39740bb9481SIngo Weinhold			reservation, PAGE_STATE_CACHED | VM_PAGE_ALLOC_BUSY);
398139353cfSAxel Dörfler
3995c99d639SIngo Weinhold		cache->InsertPage(page, offset + pos);
400f72376a8SAxel Dörfler
4017f12cc54SIngo Weinhold		add_to_iovec(vecs, vecCount, MAX_IO_VECS,
4027f12cc54SIngo Weinhold			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
403061816eeSAxel Dörfler			// TODO: check if the array is large enough (currently panics)!
404f72376a8SAxel Dörfler	}
405f72376a8SAxel Dörfler
4063d268edaSAxel Dörfler	push_access(ref, offset, bufferSize, false);
4075c99d639SIngo Weinhold	cache->Unlock();
40840bb9481SIngo Weinhold	vm_page_unreserve_pages(reservation);
409a1d09631SAxel Dörfler
410f72376a8SAxel Dörfler	// read file into reserved pages
411c32499b4SAxel Dörfler	status_t status = read_pages_and_clear_partial(ref, cookie, offset, vecs,
4127f12cc54SIngo Weinhold		vecCount, B_PHYSICAL_IO_REQUEST, &numBytes);
413db8fb4fdSAxel Dörfler	if (status != B_OK) {
4142b028fcaSAxel Dörfler		// reading failed, free allocated pages
4152b028fcaSAxel Dörfler
4162b028fcaSAxel Dörfler		dprintf("file_cache: read pages failed: %s\n", strerror(status));
4172b028fcaSAxel Dörfler
4185c99d639SIngo Weinhold		cache->Lock();
419b2707997SStephan Aßmus
4202b028fcaSAxel Dörfler		for (int32 i = 0; i < pageIndex; i++) {
421522c2f19SIngo Weinhold			cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
4225c99d639SIngo Weinhold			cache->RemovePage(pages[i]);
4232b028fcaSAxel Dörfler			vm_page_set_state(pages[i], PAGE_STATE_FREE);
4242b028fcaSAxel Dörfler		}
4252b028fcaSAxel Dörfler
426f72376a8SAxel Dörfler		return status;
427f72376a8SAxel Dörfler	}
428f72376a8SAxel Dörfler
429a121b8c8SAxel Dörfler	// copy the pages if needed and unmap them again
430f72376a8SAxel Dörfler
4317f12cc54SIngo Weinhold	for (int32 i = 0; i < pageIndex; i++) {
432a121b8c8SAxel Dörfler		if (useBuffer && bufferSize != 0) {
4337f12cc54SIngo Weinhold			size_t bytes = min_c(bufferSize, (size_t)B_PAGE_SIZE - pageOffset);
4347f12cc54SIngo Weinhold
43547c40a10SIngo Weinhold			vm_memcpy_from_physical((void*)buffer,
43647c40a10SIngo Weinhold				pages[i]->physical_page_number * B_PAGE_SIZE + pageOffset,
43747c40a10SIngo Weinhold				bytes, true);
43847c40a10SIngo Weinhold
439f72376a8SAxel Dörfler			buffer += bytes;
440f72376a8SAxel Dörfler			bufferSize -= bytes;
4410f6c560eSAxel Dörfler			pageOffset = 0;
442b50494aaSAxel Dörfler		}
443f72376a8SAxel Dörfler	}
444f72376a8SAxel Dörfler
44540bb9481SIngo Weinhold	reserve_pages(ref, reservation, reservePages, false);
4465c99d639SIngo Weinhold	cache->Lock();
447a1d09631SAxel Dörfler
448f72376a8SAxel Dörfler	// make the pages accessible in the cache
449279c6b76SIngo Weinhold	for (int32 i = pageIndex; i-- > 0;) {
4503cd20943SIngo Weinhold		DEBUG_PAGE_ACCESS_END(pages[i]);
451