1/*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11#include <arch/vm_translation_map.h>
12
13#include <boot/kernel_args.h>
14#include <safemode.h>
15
16#ifdef __x86_64__
17#	include "paging/64bit/X86PagingMethod64Bit.h"
18#else
19#	include "paging/32bit/X86PagingMethod32Bit.h"
20#	include "paging/pae/X86PagingMethodPAE.h"
21#endif
22
23
24//#define TRACE_VM_TMAP
25#ifdef TRACE_VM_TMAP
26#	define TRACE(x...) dprintf(x)
27#else
28#	define TRACE(x...) ;
29#endif
30
31
32static union {
33	uint64	align;
34#ifdef __x86_64__
35	char	sixty_four[sizeof(X86PagingMethod64Bit)];
36#else
37	char	thirty_two[sizeof(X86PagingMethod32Bit)];
38#if B_HAIKU_PHYSICAL_BITS == 64
39	char	pae[sizeof(X86PagingMethodPAE)];
40#endif
41#endif
42} sPagingMethodBuffer;
43
44
45// #pragma mark - VM API
46
47
48status_t
49arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
50{
51	return gX86PagingMethod->CreateTranslationMap(kernel, _map);
52}
53
54
55status_t
56arch_vm_translation_map_init(kernel_args *args,
57	VMPhysicalPageMapper** _physicalPageMapper)
58{
59	TRACE("vm_translation_map_init: entry\n");
60
61#ifdef TRACE_VM_TMAP
62	TRACE("physical memory ranges:\n");
63	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
64		phys_addr_t start = args->physical_memory_range[i].start;
65		phys_addr_t end = start + args->physical_memory_range[i].size;
66		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
67			end);
68	}
69
70	TRACE("allocated physical ranges:\n");
71	for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
72		phys_addr_t start = args->physical_allocated_range[i].start;
73		phys_addr_t end = start + args->physical_allocated_range[i].size;
74		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
75			end);
76	}
77
78	TRACE("allocated virtual ranges:\n");
79	for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
80		addr_t start = args->virtual_allocated_range[i].start;
81		addr_t end = start + args->virtual_allocated_range[i].size;
82		TRACE("  %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
83	}
84#endif
85
86#ifdef __x86_64__
87	gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit;
88#elif B_HAIKU_PHYSICAL_BITS == 64
89	bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
90	bool paeNeeded = x86_check_feature(IA32_FEATURE_AMD_EXT_NX,
91		FEATURE_EXT_AMD);
92	if (!paeNeeded) {
93		for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
94			phys_addr_t end = args->physical_memory_range[i].start
95				+ args->physical_memory_range[i].size;
96			if (end > 0x100000000LL) {
97				paeNeeded = true;
98				break;
99			}
100		}
101	}
102
103	bool paeDisabled = get_safemode_boolean_early(args,
104		B_SAFEMODE_4_GB_MEMORY_LIMIT, false);
105
106	if (paeAvailable && paeNeeded && !paeDisabled) {
107		dprintf("using PAE paging\n");
108		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethodPAE;
109	} else {
110		dprintf("using 32 bit paging (PAE %s)\n",
111			paeNeeded
112				? "not available"
113				: (paeDisabled ? "disabled" : "not needed"));
114		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
115	}
116#else
117	gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
118#endif
119
120	return gX86PagingMethod->Init(args, _physicalPageMapper);
121}
122
123
124status_t
125arch_vm_translation_map_init_post_sem(kernel_args *args)
126{
127	return B_OK;
128}
129
130
131status_t
132arch_vm_translation_map_init_post_area(kernel_args *args)
133{
134	TRACE("vm_translation_map_init_post_area: entry\n");
135
136	return gX86PagingMethod->InitPostArea(args);
137}
138
139
140status_t
141arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
142	uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
143{
144	TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
145		va);
146
147	return gX86PagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
148}
149
150
151/*!	Verifies that the page at the given virtual address can be accessed in the
152	current context.
153
154	This function is invoked in the kernel debugger. Paranoid checking is in
155	order.
156
157	\param virtualAddress The virtual address to be checked.
158	\param protection The area protection for which to check. Valid is a bitwise
159		or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
160	\return \c true, if the address can be accessed in all ways specified by
161		\a protection, \c false otherwise.
162*/
163bool
164arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
165	uint32 protection)
166{
167	if (!gX86PagingMethod)
168		return true;
169
170	return gX86PagingMethod->IsKernelPageAccessible(virtualAddress, protection);
171}
172