1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11#include "paging/pae/X86VMTranslationMapPAE.h"
12
13#include <int.h>
14#include <slab/Slab.h>
15#include <thread.h>
16#include <tracing.h>
17#include <util/AutoLock.h>
18#include <vm/vm_page.h>
19#include <vm/VMAddressSpace.h>
20#include <vm/VMCache.h>
21
22#include "paging/pae/X86PagingMethodPAE.h"
23#include "paging/pae/X86PagingStructuresPAE.h"
24#include "paging/x86_physical_page_mapper.h"
25
26
27//#define TRACE_X86_VM_TRANSLATION_MAP_PAE
28#ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
29#	define TRACE(x...) dprintf(x)
30#else
31#	define TRACE(x...) ;
32#endif
33
34
35#if B_HAIKU_PHYSICAL_BITS == 64
36
37
38#if TRANSLATION_MAP_TRACING
39
40
41namespace TranslationMapTracing {
42
43
44class TranslationMapTraceEntryBase
45	: public TRACE_ENTRY_SELECTOR(TRANSLATION_MAP_TRACING_STACK_TRACE) {
46public:
47	TranslationMapTraceEntryBase()
48		:
49		TraceEntryBase(TRANSLATION_MAP_TRACING_STACK_TRACE, 0, true)
50	{
51	}
52
53	void PrintPageTableEntry(TraceOutput& out, pae_page_table_entry entry)
54	{
55		out.Print("%#" B_PRIx64  " %c%c%c%c%c %s %s %c%c",
56			entry & X86_PAE_PTE_ADDRESS_MASK,
57			(entry & X86_PAE_PTE_PRESENT) != 0 ? 'P' : '-',
58			(entry & X86_PAE_PTE_WRITABLE) != 0 ? 'W' : '-',
59			(entry & X86_PAE_PTE_USER) != 0 ? 'U' : '-',
60			(entry & X86_PAE_PTE_NOT_EXECUTABLE) != 0 ? '-' : 'X',
61			(entry & X86_PAE_PTE_GLOBAL) != 0 ? 'G' : '-',
62			(entry & X86_PAE_PTE_WRITE_THROUGH) != 0 ? "WT" : "--",
63			(entry & X86_PAE_PTE_CACHING_DISABLED) != 0 ? "UC" : "--",
64			(entry & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
65			(entry & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-');
66	}
67};
68
69
70class Map : public TranslationMapTraceEntryBase {
71public:
72	Map(X86VMTranslationMapPAE* map, addr_t virtualAddress,
73		pae_page_table_entry entry)
74		:
75		TranslationMapTraceEntryBase(),
76		fMap(map),
77		fVirtualAddress(virtualAddress),
78		fEntry(entry)
79	{
80		Initialized();
81	}
82
83	virtual void AddDump(TraceOutput& out)
84	{
85		out.Print("translation map map: %p: %#" B_PRIxADDR " -> ", fMap,
86			fVirtualAddress);
87		PrintPageTableEntry(out, fEntry);
88	}
89
90private:
91	X86VMTranslationMapPAE*	fMap;
92	addr_t					fVirtualAddress;
93	pae_page_table_entry	fEntry;
94};
95
96
97class Unmap : public TranslationMapTraceEntryBase {
98public:
99	Unmap(X86VMTranslationMapPAE* map, addr_t virtualAddress,
100		pae_page_table_entry entry)
101		:
102		TranslationMapTraceEntryBase(),
103		fMap(map),
104		fVirtualAddress(virtualAddress),
105		fEntry(entry)
106	{
107		Initialized();
108	}
109
110	virtual void AddDump(TraceOutput& out)
111	{
112		out.Print("translation map unmap: %p: %#" B_PRIxADDR
113			" -> ", fMap, fVirtualAddress);
114		PrintPageTableEntry(out, fEntry);
115	}
116
117private:
118	X86VMTranslationMapPAE*	fMap;
119	addr_t					fVirtualAddress;
120	pae_page_table_entry	fEntry;
121};
122
123
124class Protect : public TranslationMapTraceEntryBase {
125public:
126	Protect(X86VMTranslationMapPAE* map, addr_t virtualAddress,
127		pae_page_table_entry oldEntry, pae_page_table_entry newEntry)
128		:
129		TranslationMapTraceEntryBase(),
130		fMap(map),
131		fVirtualAddress(virtualAddress),
132		fOldEntry(oldEntry),
133		fNewEntry(newEntry)
134	{
135		Initialized();
136	}
137
138	virtual void AddDump(TraceOutput& out)
139	{
140		out.Print("translation map protect: %p: %#" B_PRIxADDR
141			" -> ", fMap, fVirtualAddress);
142		PrintPageTableEntry(out, fNewEntry);
143		out.Print(" (%c%c%c)",
144			(fOldEntry & X86_PAE_PTE_WRITABLE) != 0 ? 'W' : '-',
145			(fOldEntry & X86_PAE_PTE_USER) != 0 ? 'U' : '-',
146			(fOldEntry & X86_PAE_PTE_NOT_EXECUTABLE) != 0 ? '-' : 'X');
147	}
148
149private:
150	X86VMTranslationMapPAE*	fMap;
151	addr_t					fVirtualAddress;
152	pae_page_table_entry	fOldEntry;
153	pae_page_table_entry	fNewEntry;
154};
155
156
157class ClearFlags : public TranslationMapTraceEntryBase {
158public:
159	ClearFlags(X86VMTranslationMapPAE* map, addr_t virtualAddress,
160		pae_page_table_entry oldEntry, pae_page_table_entry flagsCleared)
161		:
162		TranslationMapTraceEntryBase(),
163		fMap(map),
164		fVirtualAddress(virtualAddress),
165		fOldEntry(oldEntry),
166		fFlagsCleared(flagsCleared)
167	{
168		Initialized();
169	}
170
171	virtual void AddDump(TraceOutput& out)
172	{
173		out.Print("translation map clear flags: %p: %#" B_PRIxADDR
174			" -> ", fMap, fVirtualAddress);
175		PrintPageTableEntry(out, fOldEntry & ~fFlagsCleared);
176		out.Print(", cleared %c%c (%c%c)",
177			(fOldEntry & fFlagsCleared & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
178			(fOldEntry & fFlagsCleared & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-',
179			(fFlagsCleared & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
180			(fFlagsCleared & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-');
181	}
182
183private:
184	X86VMTranslationMapPAE*	fMap;
185	addr_t					fVirtualAddress;
186	pae_page_table_entry	fOldEntry;
187	pae_page_table_entry	fFlagsCleared;
188};
189
190
191class ClearFlagsUnmap : public TranslationMapTraceEntryBase {
192public:
193	ClearFlagsUnmap(X86VMTranslationMapPAE* map, addr_t virtualAddress,
194		pae_page_table_entry entry)
195		:
196		TranslationMapTraceEntryBase(),
197		fMap(map),
198		fVirtualAddress(virtualAddress),
199		fEntry(entry)
200	{
201		Initialized();
202	}
203
204	virtual void AddDump(TraceOutput& out)
205	{
206		out.Print("translation map clear flags unmap: %p: %#" B_PRIxADDR
207			" -> ", fMap, fVirtualAddress);
208		PrintPageTableEntry(out, fEntry);
209	}
210
211private:
212	X86VMTranslationMapPAE*	fMap;
213	addr_t					fVirtualAddress;
214	pae_page_table_entry	fEntry;
215};
216
217
218}	// namespace TranslationMapTracing
219
220#	define T(x)	new(std::nothrow) TranslationMapTracing::x
221
222#else
223#	define T(x)
224#endif	// TRANSLATION_MAP_TRACING
225
226
227
228X86VMTranslationMapPAE::X86VMTranslationMapPAE()
229	:
230	fPagingStructures(NULL)
231{
232}
233
234
235X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
236{
237	if (fPagingStructures == NULL)
238		return;
239
240	if (fPageMapper != NULL)
241		fPageMapper->Delete();
242
243	// cycle through and free all of the user space page tables
244
245	STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
246		// assuming 1-1 split of the address space
247
248	for (uint32 k = 0; k < 2; k++) {
249		pae_page_directory_entry* pageDir
250			= fPagingStructures->VirtualPageDirs()[k];
251		if (pageDir == NULL)
252			continue;
253
254		for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
255			if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
256				phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
257				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
258				if (page == NULL)
259					panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
260						"didn't find page table page: page address: %#"
261						B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
262						address,
263						(k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
264				DEBUG_PAGE_ACCESS_START(page);
265				vm_page_set_state(page, PAGE_STATE_FREE);
266			}
267		}
268	}
269
270	fPagingStructures->RemoveReference();
271}
272
273
274status_t
275X86VMTranslationMapPAE::Init(bool kernel)
276{
277	TRACE("X86VMTranslationMapPAE::Init()\n");
278
279	X86VMTranslationMap::Init(kernel);
280
281	fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
282	if (fPagingStructures == NULL)
283		return B_NO_MEMORY;
284
285	X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
286
287	if (kernel) {
288		// kernel
289		// get the physical page mapper
290		fPageMapper = method->KernelPhysicalPageMapper();
291
292		// we already know the kernel pgdir mapping
293		fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
294			method->KernelPhysicalPageDirPointerTable(), NULL,
295			method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
296	} else {
297		// user
298		// allocate a physical page mapper
299		status_t error = method->PhysicalPageMapper()
300			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
301		if (error != B_OK)
302			return error;
303
304		// The following code assumes that the kernel address space occupies the
305		// upper half of the virtual address space. This simplifies things a
306		// lot, since it allows us to just use the upper two page directories
307		// of the kernel and create two new lower page directories for the
308		// userland.
309		STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
310
311		// allocate the page directories (both at once)
312		pae_page_directory_entry* virtualPageDirs[4];
313		phys_addr_t physicalPageDirs[4];
314		virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
315			2 * B_PAGE_SIZE);
316		if (virtualPageDirs[0] == NULL)
317			return B_NO_MEMORY;
318		virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
319
320		// clear the userland page directories
321		memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
322
323		// use the upper two kernel page directories
324		for (int32 i = 2; i < 4; i++) {
325			virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
326			physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
327		}
328
329		// look up the page directories' physical addresses
330		for (int32 i = 0; i < 2; i++) {
331			vm_get_page_mapping(VMAddressSpace::KernelID(),
332				(addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
333		}
334
335		// allocate the PDPT -- needs to have a 32 bit physical address
336		phys_addr_t physicalPDPT;
337		void* pdptHandle;
338		pae_page_directory_pointer_table_entry* pdpt
339			= (pae_page_directory_pointer_table_entry*)
340				method->Allocate32BitPage(physicalPDPT, pdptHandle);
341		if (pdpt == NULL) {
342			free(virtualPageDirs[0]);
343			return B_NO_MEMORY;
344		}
345
346		// init the PDPT entries
347		for (int32 i = 0; i < 4; i++) {
348			pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
349				| X86_PAE_PDPTE_PRESENT;
350		}
351
352		// init the paging structures
353		fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
354			physicalPageDirs);
355	}
356
357	return B_OK;
358}
359
360
361size_t
362X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
363{
364	// If start == 0, the actual base address is not yet known to the caller and
365	// we shall assume the worst case.
366	if (start == 0) {
367		// offset the range so it has the worst possible alignment
368		start = kPAEPageTableRange - B_PAGE_SIZE;
369		end += kPAEPageTableRange - B_PAGE_SIZE;
370	}
371
372	return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
373}
374
375
376status_t
377X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
378	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
379{
380	TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
381		"\n", virtualAddress, physicalAddress);
382
383	// check to see if a page table exists for this range
384	pae_page_directory_entry* pageDirEntry
385		= X86PagingMethodPAE::PageDirEntryForAddress(
386			fPagingStructures->VirtualPageDirs(), virtualAddress);
387	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
388		// we need to allocate a page table
389		vm_page *page = vm_page_allocate_page(reservation,
390			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
391
392		DEBUG_PAGE_ACCESS_END(page);
393
394		phys_addr_t physicalPageTable
395			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
396
397		TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
398			"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
399
400		// put it in the page dir
401		X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
402			physicalPageTable,
403			attributes
404				| ((attributes & B_USER_PROTECTION) != 0
405						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
406
407		fMapCount++;
408	}
409
410	// now, fill in the page table entry
411	Thread* thread = thread_get_current_thread();
412	ThreadCPUPinner pinner(thread);
413
414	pae_page_table_entry* pageTable
415		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
416			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
417	pae_page_table_entry* entry = pageTable
418		+ virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
419
420	ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
421		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64 " @ %p",
422		virtualAddress, *entry, entry);
423
424	X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
425		attributes, memoryType, fIsKernelMap);
426
427	T(Map(this, virtualAddress, *entry));
428
429	pinner.Unlock();
430
431	// Note: We don't need to invalidate the TLB for this address, as previously
432	// the entry was not present and the TLB doesn't cache those entries.
433
434	fMapCount++;
435
436	return 0;
437}
438
439
440status_t
441X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
442{
443	start = ROUNDDOWN(start, B_PAGE_SIZE);
444	if (start >= end)
445		return B_OK;
446
447	TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
448		"\n", start, end);
449
450	do {
451		pae_page_directory_entry* pageDirEntry
452			= X86PagingMethodPAE::PageDirEntryForAddress(
453				fPagingStructures->VirtualPageDirs(), start);
454		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
455			// no page table here, move the start up to access the next page
456			// table
457			start = ROUNDUP(start + 1, kPAEPageTableRange);
458			continue;
459		}
460
461		Thread* thread = thread_get_current_thread();
462		ThreadCPUPinner pinner(thread);
463
464		pae_page_table_entry* pageTable
465			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
466				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
467
468		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
469		for (; index < kPAEPageTableEntryCount && start < end;
470				index++, start += B_PAGE_SIZE) {
471			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
472				// page mapping not valid
473				continue;
474			}
475
476			TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
477				B_PRIxADDR "\n", start);
478
479			pae_page_table_entry oldEntry
480				= X86PagingMethodPAE::ClearTableEntryFlags(
481					&pageTable[index], X86_PAE_PTE_PRESENT);
482
483			T(Unmap(this, start, oldEntry));
484
485			fMapCount--;
486
487			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
488				// Note, that we only need to invalidate the address, if the
489				// accessed flags was set, since only then the entry could have
490				// been in any TLB.
491				InvalidatePage(start);
492			}
493		}
494	} while (start != 0 && start < end);
495
496	return B_OK;
497}
498
499
500status_t
501X86VMTranslationMapPAE::DebugMarkRangePresent(addr_t start, addr_t end,
502	bool markPresent)
503{
504	start = ROUNDDOWN(start, B_PAGE_SIZE);
505	if (start >= end)
506		return B_OK;
507
508	do {
509		pae_page_directory_entry* pageDirEntry
510			= X86PagingMethodPAE::PageDirEntryForAddress(
511				fPagingStructures->VirtualPageDirs(), start);
512		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
513			// no page table here, move the start up to access the next page
514			// table
515			start = ROUNDUP(start + 1, kPAEPageTableRange);
516			continue;
517		}
518
519		Thread* thread = thread_get_current_thread();
520		ThreadCPUPinner pinner(thread);
521
522		pae_page_table_entry* pageTable
523			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
524				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
525
526		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
527		for (; index < kPAEPageTableEntryCount && start < end;
528				index++, start += B_PAGE_SIZE) {
529
530			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
531				if (!markPresent)
532					continue;
533
534				X86PagingMethodPAE::SetTableEntryFlags(
535					&pageTable[index], X86_PAE_PTE_PRESENT);
536			} else {
537				if (markPresent)
538					continue;
539
540				pae_page_table_entry oldEntry
541					= X86PagingMethodPAE::ClearTableEntryFlags(
542						&pageTable[index], X86_PAE_PTE_PRESENT);
543
544				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
545					// Note, that we only need to invalidate the address, if the
546					// accessed flags was set, since only then the entry could
547					// have been in any TLB.
548					InvalidatePage(start);
549				}
550			}
551		}
552	} while (start != 0 && start < end);
553
554	return B_OK;
555}
556
557
558/*!	Caller must have locked the cache of the page to be unmapped.
559	This object shouldn't be locked.
560*/
561status_t
562X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
563	bool updatePageQueue)
564{
565	ASSERT(address % B_PAGE_SIZE == 0);
566
567	pae_page_directory_entry* pageDirEntry
568		= X86PagingMethodPAE::PageDirEntryForAddress(
569			fPagingStructures->VirtualPageDirs(), address);
570
571	TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
572
573	RecursiveLocker locker(fLock);
574
575	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
576		return B_ENTRY_NOT_FOUND;
577
578	ThreadCPUPinner pinner(thread_get_current_thread());
579
580	pae_page_table_entry* pageTable
581		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
582			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
583
584	pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearTableEntry(
585		&pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
586
587	T(Unmap(this, address, oldEntry));
588
589	pinner.Unlock();
590
591	if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
592		// page mapping not valid
593		return B_ENTRY_NOT_FOUND;
594	}
595
596	fMapCount--;
597
598	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
599		// Note, that we only need to invalidate the address, if the
600		// accessed flags was set, since only then the entry could have been
601		// in any TLB.
602		InvalidatePage(address);
603
604		Flush();
605
606		// NOTE: Between clearing the page table entry and Flush() other
607		// processors (actually even this processor with another thread of the
608		// same team) could still access the page in question via their cached
609		// entry. We can obviously lose a modified flag in this case, with the
610		// effect that the page looks unmodified (and might thus be recycled),
611		// but is actually modified.
612		// In most cases this is harmless, but for vm_remove_all_page_mappings()
613		// this is actually a problem.
614		// Interestingly FreeBSD seems to ignore this problem as well
615		// (cf. pmap_remove_all()), unless I've missed something.
616	}
617
618	locker.Detach();
619		// PageUnmapped() will unlock for us
620
621	PageUnmapped(area, (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
622		(oldEntry & X86_PAE_PTE_ACCESSED) != 0,
623		(oldEntry & X86_PAE_PTE_DIRTY) != 0, updatePageQueue);
624
625	return B_OK;
626}
627
628
629void
630X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
631	bool updatePageQueue)
632{
633	if (size == 0)
634		return;
635
636	addr_t start = base;
637	addr_t end = base + size - 1;
638
639	TRACE("X86VMTranslationMapPAE::UnmapPages(%p, %#" B_PRIxADDR ", %#"
640		B_PRIxADDR ")\n", area, start, end);
641
642	VMAreaMappings queue;
643
644	RecursiveLocker locker(fLock);
645
646	do {
647		pae_page_directory_entry* pageDirEntry
648			= X86PagingMethodPAE::PageDirEntryForAddress(
649				fPagingStructures->VirtualPageDirs(), start);
650		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
651			// no page table here, move the start up to access the next page
652			// table
653			start = ROUNDUP(start + 1, kPAEPageTableRange);
654			continue;
655		}
656
657		Thread* thread = thread_get_current_thread();
658		ThreadCPUPinner pinner(thread);
659
660		pae_page_table_entry* pageTable
661			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
662				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
663
664		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
665		for (; index < kPAEPageTableEntryCount && start < end;
666				index++, start += B_PAGE_SIZE) {
667			pae_page_table_entry oldEntry
668				= X86PagingMethodPAE::ClearTableEntry(&pageTable[index]);
669			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0)
670				continue;
671
672			T(Unmap(this, start, oldEntry));
673
674			fMapCount--;
675
676			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
677				// Note, that we only need to invalidate the address, if the
678				// accessed flags was set, since only then the entry could have
679				// been in any TLB.
680				InvalidatePage(start);
681			}
682
683			if (area->cache_type != CACHE_TYPE_DEVICE) {
684				// get the page
685				vm_page* page = vm_lookup_page(
686					(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
687				ASSERT(page != NULL);
688
689				DEBUG_PAGE_ACCESS_START(page);
690
691				// transfer the accessed/dirty flags to the page
692				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
693					page->accessed = true;
694				if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
695					page->modified = true;
696
697				// remove the mapping object/decrement the wired_count of the
698				// page
699				if (area->wiring == B_NO_LOCK) {
700					vm_page_mapping* mapping = NULL;
701					vm_page_mappings::Iterator iterator
702						= page->mappings.GetIterator();
703					while ((mapping = iterator.Next()) != NULL) {
704						if (mapping->area == area)
705							break;
706					}
707
708					ASSERT(mapping != NULL);
709
710					area->mappings.Remove(mapping);
711					page->mappings.Remove(mapping);
712					queue.Add(mapping);
713				} else
714					page->DecrementWiredCount();
715
716				if (!page->IsMapped()) {
717					atomic_add(&gMappedPagesCount, -1);
718
719					if (updatePageQueue) {
720						if (page->Cache()->temporary)
721							vm_page_set_state(page, PAGE_STATE_INACTIVE);
722						else if (page->modified)
723							vm_page_set_state(page, PAGE_STATE_MODIFIED);
724						else
725							vm_page_set_state(page, PAGE_STATE_CACHED);
726					}
727				}
728
729				DEBUG_PAGE_ACCESS_END(page);
730			}
731		}
732
733		Flush();
734			// flush explicitly, since we directly use the lock
735	} while (start != 0 && start < end);
736
737	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
738	// really critical here, as in all cases this method is used, the unmapped
739	// area range is unmapped for good (resized/cut) and the pages will likely
740	// be freed.
741
742	locker.Unlock();
743
744	// free removed mappings
745	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
746	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
747		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
748	while (vm_page_mapping* mapping = queue.RemoveHead())
749		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
750}
751
752
753void
754X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
755	bool ignoreTopCachePageFlags)
756{
757	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
758		X86VMTranslationMapPAE::UnmapPages(area, area->Base(), area->Size(),
759			true);
760		return;
761	}
762
763	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
764
765	RecursiveLocker locker(fLock);
766
767	VMAreaMappings mappings;
768	mappings.MoveFrom(&area->mappings);
769
770	for (VMAreaMappings::Iterator it = mappings.GetIterator();
771			vm_page_mapping* mapping = it.Next();) {
772		vm_page* page = mapping->page;
773		page->mappings.Remove(mapping);
774
775		VMCache* cache = page->Cache();
776
777		bool pageFullyUnmapped = false;
778		if (!page->IsMapped()) {
779			atomic_add(&gMappedPagesCount, -1);
780			pageFullyUnmapped = true;
781		}
782
783		if (unmapPages || cache != area->cache) {
784			addr_t address = area->Base()
785				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
786
787			pae_page_directory_entry* pageDirEntry
788				= X86PagingMethodPAE::PageDirEntryForAddress(
789					fPagingStructures->VirtualPageDirs(), address);
790			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
791				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
792					"has no page dir entry", page, area, address);
793				continue;
794			}
795
796			ThreadCPUPinner pinner(thread_get_current_thread());
797
798			pae_page_table_entry* pageTable
799				= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
800					*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
801			pae_page_table_entry oldEntry
802				= X86PagingMethodPAE::ClearTableEntry(
803					&pageTable[address / B_PAGE_SIZE
804						% kPAEPageTableEntryCount]);
805
806			pinner.Unlock();
807
808			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
809				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
810					"has no page table entry", page, area, address);
811				continue;
812			}
813
814			T(Unmap(this, address, oldEntry));
815
816			// transfer the accessed/dirty flags to the page and invalidate
817			// the mapping, if necessary
818			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
819				page->accessed = true;
820
821				if (!deletingAddressSpace)
822					InvalidatePage(address);
823			}
824
825			if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
826				page->modified = true;
827
828			if (pageFullyUnmapped) {
829				DEBUG_PAGE_ACCESS_START(page);
830
831				if (cache->temporary)
832					vm_page_set_state(page, PAGE_STATE_INACTIVE);
833				else if (page->modified)
834					vm_page_set_state(page, PAGE_STATE_MODIFIED);
835				else
836					vm_page_set_state(page, PAGE_STATE_CACHED);
837
838				DEBUG_PAGE_ACCESS_END(page);
839			}
840		} else {
841#if TRANSLATION_MAP_TRACING
842			addr_t address = area->Base()
843				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
844
845			ThreadCPUPinner pinner(thread_get_current_thread());
846
847			pae_page_directory_entry* pageDirEntry
848				= X86PagingMethodPAE::PageDirEntryForAddress(
849					fPagingStructures->VirtualPageDirs(), address);
850			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) != 0) {
851				pae_page_table_entry* pageTable
852					= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
853						*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
854				pae_page_table_entry oldEntry = pageTable[
855					address / B_PAGE_SIZE % kPAEPageTableEntryCount];
856
857				pinner.Unlock();
858
859				if ((oldEntry & X86_PAE_PTE_PRESENT) != 0)
860					T(Unmap(this, address, oldEntry));
861			}
862#endif
863		}
864
865		fMapCount--;
866	}
867
868	Flush();
869		// flush explicitely, since we directly use the lock
870
871	locker.Unlock();
872
873	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
874	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
875		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
876	while (vm_page_mapping* mapping = mappings.RemoveHead())
877		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
878}
879
880
881status_t
882X86VMTranslationMapPAE::Query(addr_t virtualAddress,
883	phys_addr_t* _physicalAddress, uint32* _flags)
884{
885	// default the flags to not present
886	*_flags = 0;
887	*_physicalAddress = 0;
888
889	// get the page directory entry
890	pae_page_directory_entry* pageDirEntry
891		= X86PagingMethodPAE::PageDirEntryForAddress(
892			fPagingStructures->VirtualPageDirs(), virtualAddress);
893	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
894		// no pagetable here
895		return B_OK;
896	}
897
898	// get the page table entry
899	Thread* thread = thread_get_current_thread();
900	ThreadCPUPinner pinner(thread);
901
902	pae_page_table_entry* pageTable
903		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
904			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
905	pae_page_table_entry entry
906		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
907
908	pinner.Unlock();
909
910	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
911
912	// translate the page state flags
913	if ((entry & X86_PAE_PTE_USER) != 0) {
914		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
915			| B_READ_AREA
916			| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
917	}
918
919	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
920		| B_KERNEL_READ_AREA
921		| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
922			? B_KERNEL_EXECUTE_AREA : 0)
923		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
924		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
925		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
926
927	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
928		B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
929
930	return B_OK;
931}
932
933
934status_t
935X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
936	phys_addr_t* _physicalAddress, uint32* _flags)
937{
938	// default the flags to not present
939	*_flags = 0;
940	*_physicalAddress = 0;
941
942	// get the page directory entry
943	pae_page_directory_entry* pageDirEntry
944		= X86PagingMethodPAE::PageDirEntryForAddress(
945			fPagingStructures->VirtualPageDirs(), virtualAddress);
946	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
947		// no pagetable here
948		return B_OK;
949	}
950
951	// get the page table entry
952	pae_page_table_entry* pageTable
953		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
954			->PhysicalPageMapper()->InterruptGetPageTableAt(
955				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
956	pae_page_table_entry entry
957		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
958
959	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
960
961	// translate the page state flags
962	if ((entry & X86_PAE_PTE_USER) != 0) {
963		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
964			| B_READ_AREA
965			| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
966	}
967
968	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
969		| B_KERNEL_READ_AREA
970		| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
971			? B_KERNEL_EXECUTE_AREA : 0)
972		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
973		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
974		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
975
976	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
977		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
978
979	return B_OK;
980}
981
982
983status_t
984X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
985	uint32 memoryType)
986{
987	start = ROUNDDOWN(start, B_PAGE_SIZE);
988	if (start >= end)
989		return B_OK;
990
991	TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
992		", attributes: %#" B_PRIx32 "\n", start, end, attributes);
993
994	// compute protection/memory type flags
995	uint64 newFlags
996		= X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(memoryType);
997	if ((attributes & B_USER_PROTECTION) != 0) {
998		newFlags |= X86_PAE_PTE_USER;
999		if ((attributes & B_WRITE_AREA) != 0)
1000			newFlags |= X86_PAE_PTE_WRITABLE;
1001		if ((attributes & B_EXECUTE_AREA) == 0
1002			&& x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
1003			newFlags |= X86_PAE_PTE_NOT_EXECUTABLE;
1004		}
1005	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
1006		newFlags |= X86_PAE_PTE_WRITABLE;
1007
1008	do {
1009		pae_page_directory_entry* pageDirEntry
1010			= X86PagingMethodPAE::PageDirEntryForAddress(
1011				fPagingStructures->VirtualPageDirs(), start);
1012		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
1013			// no page table here, move the start up to access the next page
1014			// table
1015			start = ROUNDUP(start + 1, kPAEPageTableRange);
1016			continue;
1017		}
1018
1019		Thread* thread = thread_get_current_thread();
1020		ThreadCPUPinner pinner(thread);
1021
1022		pae_page_table_entry* pageTable
1023			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1024				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1025
1026		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
1027		for (; index < kPAEPageTableEntryCount && start < end;
1028				index++, start += B_PAGE_SIZE) {
1029			pae_page_table_entry entry = pageTable[index];
1030			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
1031				// page mapping not valid
1032				continue;
1033			}
1034
1035			TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
1036				B_PRIxADDR "\n", start);
1037
1038			// set the new protection flags -- we want to do that atomically,
1039			// without changing the accessed or dirty flag
1040			pae_page_table_entry oldEntry;
1041			while (true) {
1042				oldEntry = X86PagingMethodPAE::TestAndSetTableEntry(
1043					&pageTable[index],
1044					(entry & ~(X86_PAE_PTE_PROTECTION_MASK
1045							| X86_PAE_PTE_MEMORY_TYPE_MASK))
1046						| newFlags,
1047					entry);
1048				if (oldEntry == entry)
1049					break;
1050				entry = oldEntry;
1051			}
1052
1053			T(Protect(this, start, entry,
1054				(entry & ~(X86_PAE_PTE_PROTECTION_MASK
1055						| X86_PAE_PTE_MEMORY_TYPE_MASK))
1056					| newFlags));
1057
1058			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
1059				// Note, that we only need to invalidate the address, if the
1060				// accessed flag was set, since only then the entry could have been
1061				// in any TLB.
1062				InvalidatePage(start);
1063			}
1064		}
1065	} while (start != 0 && start < end);
1066
1067	return B_OK;
1068}
1069
1070
1071status_t
1072X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
1073{
1074	pae_page_directory_entry* pageDirEntry
1075		= X86PagingMethodPAE::PageDirEntryForAddress(
1076			fPagingStructures->VirtualPageDirs(), address);
1077	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
1078		// no pagetable here
1079		return B_OK;
1080	}
1081
1082	uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
1083		| ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
1084
1085	Thread* thread = thread_get_current_thread();
1086	ThreadCPUPinner pinner(thread);
1087
1088	pae_page_table_entry* entry
1089		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1090				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1091			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
1092
1093	// clear out the flags we've been requested to clear
1094	pae_page_table_entry oldEntry
1095		= X86PagingMethodPAE::ClearTableEntryFlags(entry, flagsToClear);
1096
1097	pinner.Unlock();
1098
1099	T(ClearFlags(this, address, oldEntry, flagsToClear));
1100
1101	if ((oldEntry & flagsToClear) != 0)
1102		InvalidatePage(address);
1103
1104	return B_OK;
1105}
1106
1107
1108bool
1109X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
1110	bool unmapIfUnaccessed, bool& _modified)
1111{
1112	ASSERT(address % B_PAGE_SIZE == 0);
1113
1114	TRACE("X86VMTranslationMapPAE::ClearAccessedAndModified(%#" B_PRIxADDR
1115		")\n", address);
1116
1117	pae_page_directory_entry* pageDirEntry
1118		= X86PagingMethodPAE::PageDirEntryForAddress(
1119			fPagingStructures->VirtualPageDirs(), address);
1120
1121	RecursiveLocker locker(fLock);
1122
1123	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
1124		return false;
1125
1126	ThreadCPUPinner pinner(thread_get_current_thread());
1127
1128	pae_page_table_entry* entry
1129		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1130				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1131			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
1132
1133	// perform the deed
1134	pae_page_table_entry oldEntry;
1135
1136	if (unmapIfUnaccessed) {
1137		while (true) {
1138			oldEntry = *entry;
1139			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
1140				// page mapping not valid
1141				return false;
1142			}
1143
1144			if (oldEntry & X86_PAE_PTE_ACCESSED) {
1145				// page was accessed -- just clear the flags
1146				oldEntry = X86PagingMethodPAE::ClearTableEntryFlags(entry,
1147					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
1148				T(ClearFlags(this, address, oldEntry,
1149					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY));
1150				break;
1151			}
1152
1153			// page hasn't been accessed -- unmap it
1154			if (X86PagingMethodPAE::TestAndSetTableEntry(entry, 0, oldEntry)
1155					== oldEntry) {
1156				T(ClearFlagsUnmap(this, address, oldEntry));
1157				break;
1158			}
1159
1160			// something changed -- check again
1161		}
1162	} else {
1163		oldEntry = X86PagingMethodPAE::ClearTableEntryFlags(entry,
1164			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
1165		T(ClearFlags(this, address, oldEntry,
1166			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY));
1167	}
1168
1169	pinner.Unlock();
1170
1171	_modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
1172
1173	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
1174		// Note, that we only need to invalidate the address, if the
1175		// accessed flags was set, since only then the entry could have been
1176		// in any TLB.
1177		InvalidatePage(address);
1178		Flush();
1179
1180		return true;
1181	}
1182
1183	if (!unmapIfUnaccessed)
1184		return false;
1185
1186	// We have unmapped the address. Do the "high level" stuff.
1187
1188	fMapCount--;
1189
1190	locker.Detach();
1191		// UnaccessedPageUnmapped() will unlock for us
1192
1193	UnaccessedPageUnmapped(area,
1194		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1195
1196	return false;
1197}
1198
1199
1200void
1201X86VMTranslationMapPAE::DebugPrintMappingInfo(addr_t virtualAddress)
1202{
1203	// get the page directory
1204	pae_page_directory_entry* const* pdpt
1205		= fPagingStructures->VirtualPageDirs();
1206	pae_page_directory_entry* pageDirectory = pdpt[virtualAddress >> 30];
1207	kprintf("page directory: %p (PDPT[%zu])\n", pageDirectory,
1208		virtualAddress >> 30);
1209
1210	// get the page directory entry
1211	pae_page_directory_entry* pageDirEntry
1212		= X86PagingMethodPAE::PageDirEntryForAddress(pdpt, virtualAddress);
1213	kprintf("page directory entry %zu (%p): %#" B_PRIx64 "\n",
1214		pageDirEntry - pageDirectory, pageDirEntry, *pageDirEntry);
1215
1216	kprintf("  access: ");
1217	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) != 0)
1218		kprintf(" present");
1219	if ((*pageDirEntry & X86_PAE_PDE_WRITABLE) != 0)
1220		kprintf(" writable");
1221	if ((*pageDirEntry & X86_PAE_PDE_USER) != 0)
1222		kprintf(" user");
1223	if ((*pageDirEntry & X86_PAE_PDE_NOT_EXECUTABLE) == 0)
1224		kprintf(" executable");
1225	if ((*pageDirEntry & X86_PAE_PDE_LARGE_PAGE) != 0)
1226		kprintf(" large");
1227
1228	kprintf("\n  caching:");
1229	if ((*pageDirEntry & X86_PAE_PDE_WRITE_THROUGH) != 0)
1230		kprintf(" write-through");
1231	if ((*pageDirEntry & X86_PAE_PDE_CACHING_DISABLED) != 0)
1232		kprintf(" uncached");
1233
1234	kprintf("\n  flags:  ");
1235	if ((*pageDirEntry & X86_PAE_PDE_ACCESSED) != 0)
1236		kprintf(" accessed");
1237	kprintf("\n");
1238
1239	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
1240		return;
1241
1242	// get the page table entry
1243	pae_page_table_entry* pageTable
1244		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
1245			->PhysicalPageMapper()->InterruptGetPageTableAt(
1246				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1247	kprintf("page table: %#" B_PRIx64 "\n",
1248		*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1249	size_t pteIndex = virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
1250	pae_page_table_entry entry = pageTable[pteIndex];
1251	kprintf("page table entry %zu (phys: %#" B_PRIx64 "): %#" B_PRIx64 "\n",
1252		pteIndex,
1253		(*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1254			+ pteIndex * sizeof(pae_page_table_entry),
1255		entry);
1256
1257	kprintf("  access: ");
1258	if ((entry & X86_PAE_PTE_PRESENT) != 0)
1259		kprintf(" present");
1260	if ((entry & X86_PAE_PTE_WRITABLE) != 0)
1261		kprintf(" writable");
1262	if ((entry & X86_PAE_PTE_USER) != 0)
1263		kprintf(" user");
1264	if ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0)
1265		kprintf(" executable");
1266	if ((entry & X86_PAE_PTE_GLOBAL) == 0)
1267		kprintf(" global");
1268
1269	kprintf("\n  caching:");
1270	if ((entry & X86_PAE_PTE_WRITE_THROUGH) != 0)
1271		kprintf(" write-through");
1272	if ((entry & X86_PAE_PTE_CACHING_DISABLED) != 0)
1273		kprintf(" uncached");
1274	if ((entry & X86_PAE_PTE_PAT) != 0)
1275		kprintf(" PAT");
1276
1277	kprintf("\n  flags:  ");
1278	if ((entry & X86_PAE_PTE_ACCESSED) != 0)
1279		kprintf(" accessed");
1280	if ((entry & X86_PAE_PTE_DIRTY) != 0)
1281		kprintf(" dirty");
1282	kprintf("\n");
1283
1284	if ((entry & X86_PAE_PTE_PRESENT) != 0) {
1285		kprintf("  address: %#" B_PRIx64 "\n",
1286			entry & X86_PAE_PTE_ADDRESS_MASK);
1287	}
1288}
1289
1290
1291bool
1292X86VMTranslationMapPAE::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
1293	ReverseMappingInfoCallback& callback)
1294{
1295	pae_page_directory_entry* const* pdpt
1296		= fPagingStructures->VirtualPageDirs();
1297	for (uint32 pageDirIndex = fIsKernelMap ? 2 : 0;
1298		pageDirIndex < uint32(fIsKernelMap ? 4 : 2); pageDirIndex++) {
1299		// iterate through the page directory
1300		pae_page_directory_entry* pageDirectory = pdpt[pageDirIndex];
1301		for (uint32 pdeIndex = 0; pdeIndex < kPAEPageDirEntryCount;
1302			pdeIndex++) {
1303			pae_page_directory_entry& pageDirEntry = pageDirectory[pdeIndex];
1304			if ((pageDirEntry & X86_PAE_PDE_ADDRESS_MASK) == 0)
1305				continue;
1306
1307			// get and iterate through the page table
1308			pae_page_table_entry* pageTable
1309				= (pae_page_table_entry*)X86PagingMethodPAE::Method()
1310					->PhysicalPageMapper()->InterruptGetPageTableAt(
1311						pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1312			for (uint32 pteIndex = 0; pteIndex < kPAEPageTableEntryCount;
1313				pteIndex++) {
1314				pae_page_table_entry entry = pageTable[pteIndex];
1315				if ((entry & X86_PAE_PTE_PRESENT) != 0
1316					&& (entry & X86_PAE_PTE_ADDRESS_MASK) == physicalAddress) {
1317					addr_t virtualAddress = pageDirIndex * kPAEPageDirRange
1318						+ pdeIndex * kPAEPageTableRange
1319						+ pteIndex * B_PAGE_SIZE;
1320					if (callback.HandleVirtualAddress(virtualAddress))
1321						return true;
1322				}
1323			}
1324		}
1325	}
1326
1327	return false;
1328}
1329
1330
1331X86PagingStructures*
1332X86VMTranslationMapPAE::PagingStructures() const
1333{
1334	return fPagingStructures;
1335}
1336
1337
1338#endif	// B_HAIKU_PHYSICAL_BITS == 64
1339