197680106SMichael Lotz/*
297680106SMichael Lotz * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
397680106SMichael Lotz * Distributed under the terms of the MIT License.
497680106SMichael Lotz */
597680106SMichael Lotz
6f474606eSMichael Lotz#include "malloc_debug_api.h"
7f474606eSMichael Lotz
897680106SMichael Lotz
9268ddbd7SMichael Lotz#include <malloc.h>
1097680106SMichael Lotz#include <stdio.h>
1197680106SMichael Lotz#include <stdlib.h>
1297680106SMichael Lotz#include <string.h>
1397680106SMichael Lotz
1497680106SMichael Lotz#include <signal.h>
1597680106SMichael Lotz#include <sys/mman.h>
1697680106SMichael Lotz
1797680106SMichael Lotz#include <locks.h>
1897680106SMichael Lotz
193dfbe1a1SMichael Lotz#include <libroot_private.h>
20bd5dea31SMichael Lotz#include <runtime_loader.h>
21bd5dea31SMichael Lotz
223dfbe1a1SMichael Lotz#include <TLS.h>
233dfbe1a1SMichael Lotz
2497680106SMichael Lotz
2597680106SMichael Lotz// #pragma mark - Debug Helpers
2697680106SMichael Lotz
27158e20e6SMichael Lotzstatic const size_t kMaxStackTraceDepth = 50;
28158e20e6SMichael Lotz
2997680106SMichael Lotz
3097680106SMichael Lotzstatic bool sDebuggerCalls = true;
31ec0190adSMichael Lotzstatic bool sDumpAllocationsOnExit = false;
32158e20e6SMichael Lotzstatic size_t sStackTraceDepth = 0;
333dfbe1a1SMichael Lotzstatic int32 sStackBaseTLSIndex = -1;
343dfbe1a1SMichael Lotzstatic int32 sStackEndTLSIndex = -1;
35abf230a9SMichael Lotz
36abf230a9SMichael Lotz#if __cplusplus >= 201103L
37abf230a9SMichael Lotz#include <cstddef>
38abf230a9SMichael Lotzusing namespace std;
39abf230a9SMichael Lotzstatic size_t sDefaultAlignment = alignof(max_align_t);
40abf230a9SMichael Lotz#else
418fa441bfSMichael Lotzstatic size_t sDefaultAlignment = 8;
42abf230a9SMichael Lotz#endif
4397680106SMichael Lotz
4497680106SMichael Lotz
4597680106SMichael Lotzstatic void
4697680106SMichael Lotzpanic(const char* format, ...)
4797680106SMichael Lotz{
4897680106SMichael Lotz	char buffer[1024];
4997680106SMichael Lotz
5097680106SMichael Lotz	va_list args;
5197680106SMichael Lotz	va_start(args, format);
5297680106SMichael Lotz	vsnprintf(buffer, sizeof(buffer), format, args);
5397680106SMichael Lotz	va_end(args);
5497680106SMichael Lotz
5597680106SMichael Lotz	if (sDebuggerCalls)
5697680106SMichael Lotz		debugger(buffer);
5797680106SMichael Lotz	else
5897680106SMichael Lotz		debug_printf(buffer);
5997680106SMichael Lotz}
6097680106SMichael Lotz
6197680106SMichael Lotz
62ec0190adSMichael Lotzstatic void
63ec0190adSMichael Lotzprint_stdout(const char* format, ...)
64ec0190adSMichael Lotz{
65ec0190adSMichael Lotz	// To avoid any allocations due to dynamic memory need by printf() we use a
66ec0190adSMichael Lotz	// stack buffer and vsnprintf(). Otherwise this does the same as printf().
67ec0190adSMichael Lotz	char buffer[1024];
68ec0190adSMichael Lotz
69ec0190adSMichael Lotz	va_list args;
70ec0190adSMichael Lotz	va_start(args, format);
71ec0190adSMichael Lotz	vsnprintf(buffer, sizeof(buffer), format, args);
72ec0190adSMichael Lotz	va_end(args);
73ec0190adSMichael Lotz
74ec0190adSMichael Lotz	write(STDOUT_FILENO, buffer, strlen(buffer));
75ec0190adSMichael Lotz}
76ec0190adSMichael Lotz
77ec0190adSMichael Lotz
7897680106SMichael Lotz// #pragma mark - Linked List
7997680106SMichael Lotz
8097680106SMichael Lotz
8197680106SMichael Lotz#define GET_ITEM(list, item) ((void *)((uint8 *)item - list->offset))
8297680106SMichael Lotz#define GET_LINK(list, item) ((list_link *)((uint8 *)item + list->offset))
8397680106SMichael Lotz
8497680106SMichael Lotz
8597680106SMichael Lotzstruct list_link {
8697680106SMichael Lotz	list_link*	next;
8797680106SMichael Lotz	list_link*	prev;
8897680106SMichael Lotz};
8997680106SMichael Lotz
9097680106SMichael Lotzstruct list {
9197680106SMichael Lotz	list_link	link;
9297680106SMichael Lotz	int32		offset;
9397680106SMichael Lotz};
9497680106SMichael Lotz
9597680106SMichael Lotz
9697680106SMichael Lotzstatic inline void
9797680106SMichael Lotzlist_remove_item(struct list* list, void* item)
9897680106SMichael Lotz{
9997680106SMichael Lotz	list_link* link = GET_LINK(list, item);
10097680106SMichael Lotz
10197680106SMichael Lotz	link->next->prev = link->prev;
10297680106SMichael Lotz	link->prev->next = link->next;
10397680106SMichael Lotz}
10497680106SMichael Lotz
10597680106SMichael Lotz
10697680106SMichael Lotzstatic inline void
10797680106SMichael Lotzlist_add_item(struct list* list, void* item)
10897680106SMichael Lotz{
10997680106SMichael Lotz	list_link* link = GET_LINK(list, item);
11097680106SMichael Lotz
11197680106SMichael Lotz	link->next = &list->link;
11297680106SMichael Lotz	link->prev = list->link.prev;
11397680106SMichael Lotz
11497680106SMichael Lotz	list->link.prev->next = link;
11597680106SMichael Lotz	list->link.prev = link;
11697680106SMichael Lotz}
11797680106SMichael Lotz
11897680106SMichael Lotz
11997680106SMichael Lotzstatic inline void*
12097680106SMichael Lotzlist_get_next_item(struct list* list, void* item)
12197680106SMichael Lotz{
12297680106SMichael Lotz	if (item == NULL) {
12397680106SMichael Lotz		if (list->link.next == (list_link *)list)
12497680106SMichael Lotz			return NULL;
12597680106SMichael Lotz
12697680106SMichael Lotz		return GET_ITEM(list, list->link.next);
12797680106SMichael Lotz	}
12897680106SMichael Lotz
12997680106SMichael Lotz	list_link* link = GET_LINK(list, item);
13097680106SMichael Lotz	if (link->next == &list->link)
13197680106SMichael Lotz		return NULL;
13297680106SMichael Lotz
13397680106SMichael Lotz	return GET_ITEM(list, link->next);
13497680106SMichael Lotz}
13597680106SMichael Lotz
13697680106SMichael Lotz
13797680106SMichael Lotzstatic inline void
13897680106SMichael Lotzlist_init_etc(struct list* list, int32 offset)
13997680106SMichael Lotz{
14097680106SMichael Lotz	list->link.next = list->link.prev = &list->link;
14197680106SMichael Lotz	list->offset = offset;
14297680106SMichael Lotz}
14397680106SMichael Lotz
14497680106SMichael Lotz
14597680106SMichael Lotz// #pragma mark - Guarded Heap
14697680106SMichael Lotz
14797680106SMichael Lotz
14897680106SMichael Lotz#define GUARDED_HEAP_PAGE_FLAG_USED			0x01
14997680106SMichael Lotz#define GUARDED_HEAP_PAGE_FLAG_FIRST		0x02
15097680106SMichael Lotz#define GUARDED_HEAP_PAGE_FLAG_GUARD		0x04
15197680106SMichael Lotz#define GUARDED_HEAP_PAGE_FLAG_DEAD			0x08
15297680106SMichael Lotz#define GUARDED_HEAP_PAGE_FLAG_AREA			0x10
15397680106SMichael Lotz
15497680106SMichael Lotz#define GUARDED_HEAP_INITIAL_SIZE			1 * 1024 * 1024
15597680106SMichael Lotz#define GUARDED_HEAP_GROW_SIZE				2 * 1024 * 1024
15697680106SMichael Lotz#define GUARDED_HEAP_AREA_USE_THRESHOLD		1 * 1024 * 1024
15797680106SMichael Lotz
15897680106SMichael Lotz
15997680106SMichael Lotzstruct guarded_heap;
16097680106SMichael Lotz
16197680106SMichael Lotzstruct guarded_heap_page {
16297680106SMichael Lotz	uint8				flags;
16397680106SMichael Lotz	size_t				allocation_size;
16497680106SMichael Lotz	void*				allocation_base;
16597680106SMichael Lotz	size_t				alignment;
1665dbea469SMichael Lotz	thread_id			allocating_thread;
1675dbea469SMichael Lotz	thread_id			freeing_thread;
16897680106SMichael Lotz	list_link			free_list_link;
169158e20e6SMichael Lotz	size_t				alloc_stack_trace_depth;
170158e20e6SMichael Lotz	size_t				free_stack_trace_depth;
171158e20e6SMichael Lotz	addr_t				stack_trace[kMaxStackTraceDepth];
17297680106SMichael Lotz};
17397680106SMichael Lotz
17497680106SMichael Lotzstruct guarded_heap_area {
17597680106SMichael Lotz	guarded_heap*		heap;
17697680106SMichael Lotz	guarded_heap_area*	next;
17797680106SMichael Lotz	area_id				area;
17897680106SMichael Lotz	addr_t				base;
17997680106SMichael Lotz	size_t				size;
18097680106SMichael Lotz	size_t				page_count;
18197680106SMichael Lotz	size_t				used_pages;
18297680106SMichael Lotz	mutex				lock;
18397680106SMichael Lotz	struct list			free_list;
18497680106SMichael Lotz	guarded_heap_page	pages[0];
18597680106SMichael Lotz};
18697680106SMichael Lotz
18797680106SMichael Lotzstruct guarded_heap {
18897680106SMichael Lotz	rw_lock				lock;
18997680106SMichael Lotz	size_t				page_count;
19097680106SMichael Lotz	size_t				used_pages;
19197680106SMichael Lotz	uint32				area_creation_counter;
19297680106SMichael Lotz	bool				reuse_memory;
19397680106SMichael Lotz	guarded_heap_area*	areas;
19497680106SMichael Lotz};
19597680106SMichael Lotz
19697680106SMichael Lotz
19797680106SMichael Lotzstatic guarded_heap sGuardedHeap = {
19897680106SMichael Lotz	RW_LOCK_INITIALIZER("guarded heap lock"),
19997680106SMichael Lotz	0, 0, 0, true, NULL
20097680106SMichael Lotz};
20197680106SMichael Lotz
20297680106SMichael Lotz
20397680106SMichael Lotzstatic void dump_guarded_heap_page(void* address, bool doPanic = false);
20497680106SMichael Lotz
20597680106SMichael Lotz
20697680106SMichael Lotzstatic void
20797680106SMichael Lotzguarded_heap_segfault_handler(int signal, siginfo_t* signalInfo, void* vregs)
20897680106SMichael Lotz{
20997680106SMichael Lotz	if (signal != SIGSEGV)
21097680106SMichael Lotz		return;
21197680106SMichael Lotz
21297680106SMichael Lotz	if (signalInfo->si_code != SEGV_ACCERR) {
21397680106SMichael Lotz		// Not ours.
21497680106SMichael Lotz		panic("generic segfault");
21597680106SMichael Lotz		return;
21697680106SMichael Lotz	}
21797680106SMichael Lotz
21897680106SMichael Lotz	dump_guarded_heap_page(signalInfo->si_addr, true);
21997680106SMichael Lotz
22097680106SMichael Lotz	exit(-1);
22197680106SMichael Lotz}
22297680106SMichael Lotz
22397680106SMichael Lotz
22497680106SMichael Lotzstatic void
22597680106SMichael Lotzguarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
22697680106SMichael Lotz	uint32 protection)
22797680106SMichael Lotz{
22897680106SMichael Lotz	addr_t address = area.base + pageIndex * B_PAGE_SIZE;
22997680106SMichael Lotz	mprotect((void*)address, B_PAGE_SIZE, protection);
23097680106SMichael Lotz}
23197680106SMichael Lotz
23297680106SMichael Lotz
233158e20e6SMichael Lotzstatic void
234158e20e6SMichael Lotzguarded_heap_print_stack_trace(addr_t stackTrace[], size_t depth)
235158e20e6SMichael Lotz{
236bd5dea31SMichael Lotz	char* imageName;
237bd5dea31SMichael Lotz	char* symbolName;
238bd5dea31SMichael Lotz	void* location;
239158e20e6SMichael Lotz	bool exactMatch;
240158e20e6SMichael Lotz
241158e20e6SMichael Lotz	for (size_t i = 0; i < depth; i++) {
242158e20e6SMichael Lotz		addr_t address = stackTrace[i];
243158e20e6SMichael Lotz
244bd5dea31SMichael Lotz		status_t status = __gRuntimeLoader->get_nearest_symbol_at_address(
245bd5dea31SMichael Lotz			(void*)address, NULL, NULL, &imageName, &symbolName, NULL,
246bd5dea31SMichael Lotz			&location, &exactMatch);
247158e20e6SMichael Lotz		if (status != B_OK) {
248ec0190adSMichael Lotz			print_stdout("\t%#" B_PRIxADDR " (lookup failed: %s)\n", address,
249158e20e6SMichael Lotz				strerror(status));
250158e20e6SMichael Lotz			continue;
251158e20e6SMichael Lotz		}
252158e20e6SMichael Lotz
253ec0190adSMichael Lotz		print_stdout("\t<%s> %s + %#" B_PRIxADDR "%s\n", imageName, symbolName,
254bd5dea31SMichael Lotz			address - (addr_t)location, exactMatch ? "" : " (nearest)");
255158e20e6SMichael Lotz	}
256158e20e6SMichael Lotz}
257158e20e6SMichael Lotz
258158e20e6SMichael Lotz
259158e20e6SMichael Lotzstatic void
260158e20e6SMichael Lotzguarded_heap_print_stack_traces(guarded_heap_page& page)
261158e20e6SMichael Lotz{
262158e20e6SMichael Lotz	if (page.alloc_stack_trace_depth > 0) {
2638b9bb054SMichael Lotz		print_stdout("alloc stack trace (%" B_PRIuSIZE "):\n",
264158e20e6SMichael Lotz			page.alloc_stack_trace_depth);
265158e20e6SMichael Lotz		guarded_heap_print_stack_trace(page.stack_trace,
266158e20e6SMichael Lotz			page.alloc_stack_trace_depth);
267158e20e6SMichael Lotz	}
268158e20e6SMichael Lotz
269158e20e6SMichael Lotz	if (page.free_stack_trace_depth > 0) {
2708b9bb054SMichael Lotz		print_stdout("free stack trace (%" B_PRIuSIZE "):\n",
271158e20e6SMichael Lotz			page.free_stack_trace_depth);
272158e20e6SMichael Lotz		guarded_heap_print_stack_trace(
273158e20e6SMichael Lotz			&page.stack_trace[page.alloc_stack_trace_depth],
274158e20e6SMichael Lotz			page.free_stack_trace_depth);
275158e20e6SMichael Lotz	}
276158e20e6SMichael Lotz}
277158e20e6SMichael Lotz
278158e20e6SMichael Lotz
279158e20e6SMichael Lotzstatic size_t
280158e20e6SMichael Lotzguarded_heap_fill_stack_trace(addr_t stackTrace[], size_t maxDepth,
281158e20e6SMichael Lotz	size_t skipFrames)
282158e20e6SMichael Lotz{
283158e20e6SMichael Lotz	if (maxDepth == 0)
284158e20e6SMichael Lotz		return 0;
285158e20e6SMichael Lotz
2863dfbe1a1SMichael Lotz	void** stackBase = tls_address(sStackBaseTLSIndex);
2873dfbe1a1SMichael Lotz	void** stackEnd = tls_address(sStackEndTLSIndex);
2883dfbe1a1SMichael Lotz	if (*stackBase == NULL || *stackEnd == NULL) {
2893dfbe1a1SMichael Lotz		thread_info threadInfo;
2903dfbe1a1SMichael Lotz		status_t result = get_thread_info(find_thread(NULL), &threadInfo);
2913dfbe1a1SMichael Lotz		if (result != B_OK)
2923dfbe1a1SMichael Lotz			return 0;
2933dfbe1a1SMichael Lotz
2943dfbe1a1SMichael Lotz		*stackBase = (void*)threadInfo.stack_base;
2953dfbe1a1SMichael Lotz		*stackEnd = (void*)threadInfo.stack_end;
2963dfbe1a1SMichael Lotz	}
2973dfbe1a1SMichael Lotz
2983dfbe1a1SMichael Lotz	int32 traceDepth = __arch_get_stack_trace(stackTrace, maxDepth, skipFrames,
2993dfbe1a1SMichael Lotz		(addr_t)*stackBase, (addr_t)*stackEnd);
300158e20e6SMichael Lotz
3013dfbe1a1SMichael Lotz	return traceDepth < 0 ? 0 : traceDepth;
302158e20e6SMichael Lotz}
303158e20e6SMichael Lotz
304158e20e6SMichael Lotz
30597680106SMichael Lotzstatic void
30697680106SMichael Lotzguarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
30797680106SMichael Lotz	size_t pagesNeeded, size_t allocationSize, size_t alignment,
30897680106SMichael Lotz	void* allocationBase)
30997680106SMichael Lotz{
31097680106SMichael Lotz	if (pagesNeeded < 2) {
31197680106SMichael Lotz		panic("need to allocate at least 2 pages, one for guard\n");
31297680106SMichael Lotz		return;
31397680106SMichael Lotz	}
31497680106SMichael Lotz
31597680106SMichael Lotz	guarded_heap_page* firstPage = NULL;
31697680106SMichael Lotz	for (size_t i = 0; i < pagesNeeded; i++) {
31797680106SMichael Lotz		guarded_heap_page& page = area.pages[startPageIndex + i];
31897680106SMichael Lotz		page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
31997680106SMichael Lotz		if (i == 0) {
3205dbea469SMichael Lotz			page.allocating_thread = find_thread(NULL);
3215dbea469SMichael Lotz			page.freeing_thread = -1;
32297680106SMichael Lotz			page.allocation_size = allocationSize;
32397680106SMichael Lotz			page.allocation_base = allocationBase;
32497680106SMichael Lotz			page.alignment = alignment;
32597680106SMichael Lotz			page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
326158e20e6SMichael Lotz			page.alloc_stack_trace_depth = guarded_heap_fill_stack_trace(
327158e20e6SMichael Lotz				page.stack_trace, sStackTraceDepth, 2);
328158e20e6SMichael Lotz			page.free_stack_trace_depth = 0;
32997680106SMichael Lotz			firstPage = &page;
33097680106SMichael Lotz		} else {
3315dbea469SMichael Lotz			page.allocating_thread = firstPage->allocating_thread;
3325dbea469SMichael Lotz			page.freeing_thread = -1;
33397680106SMichael Lotz			page.allocation_size = allocationSize;
33497680106SMichael Lotz			page.allocation_base = allocationBase;
33597680106SMichael Lotz			page.alignment = alignment;
336158e20e6SMichael Lotz			page.alloc_stack_trace_depth = 0;
337158e20e6SMichael Lotz			page.free_stack_trace_depth = 0;
33897680106SMichael Lotz		}
33997680106SMichael Lotz
34097680106SMichael Lotz		list_remove_item(&area.free_list, &page);
34197680106SMichael Lotz
34297680106SMichael Lotz		if (i == pagesNeeded - 1) {
34397680106SMichael Lotz			page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
34497680106SMichael Lotz			guarded_heap_page_protect(area, startPageIndex + i, 0);
34597680106SMichael Lotz		} else {
34697680106SMichael Lotz			guarded_heap_page_protect(area, startPageIndex + i,
34797680106SMichael Lotz				B_READ_AREA | B_WRITE_AREA);
34897680106SMichael Lotz		}
34997680106SMichael Lotz	}
35097680106SMichael Lotz}
35197680106SMichael Lotz
35297680106SMichael Lotz
35397680106SMichael Lotzstatic void
35497680106SMichael Lotzguarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
35597680106SMichael Lotz	bool force = false)
35697680106SMichael Lotz{
35797680106SMichael Lotz	guarded_heap_page& page = area.pages[pageIndex];
35897680106SMichael Lotz
35997680106SMichael Lotz	if (area.heap->reuse_memory || force)
36097680106SMichael Lotz		page.flags = 0;
36197680106SMichael Lotz	else
36297680106SMichael Lotz		page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
36397680106SMichael Lotz
3645dbea469SMichael Lotz	page.freeing_thread = find_thread(NULL);
36597680106SMichael Lotz
36697680106SMichael Lotz	list_add_item(&area.free_list, &page);
36797680106SMichael Lotz
36897680106SMichael Lotz	guarded_heap_page_protect(area, pageIndex, 0);
36997680106SMichael Lotz}
37097680106SMichael Lotz
37197680106SMichael Lotz
37297680106SMichael Lotzstatic void
37397680106SMichael Lotzguarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
37497680106SMichael Lotz{
37537acb83eSMichael Lotz	atomic_add((int32*)&heap.used_pages, pagesAllocated);
37697680106SMichael Lotz}
37797680106SMichael Lotz
37897680106SMichael Lotz
37997680106SMichael Lotzstatic void*
38097680106SMichael Lotzguarded_heap_area_allocate(guarded_heap_area& area, size_t pagesNeeded,
38197680106SMichael Lotz	size_t size, size_t alignment)
38297680106SMichael Lotz{
38397680106SMichael Lotz	if (pagesNeeded > area.page_count - area.used_pages)
38497680106SMichael Lotz		return NULL;
38597680106SMichael Lotz
38697680106SMichael Lotz	// We use the free list this way so that the page that has been free for
38797680106SMichael Lotz	// the longest time is allocated. This keeps immediate re-use (that may
38897680106SMichael Lotz	// hide bugs) to a minimum.
38997680106SMichael Lotz	guarded_heap_page* page
39097680106SMichael Lotz		= (guarded_heap_page*)list_get_next_item(&area.free_list, NULL);
39197680106SMichael Lotz
39297680106SMichael Lotz	for (; page != NULL;
39397680106SMichael Lotz		page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
39497680106SMichael Lotz
39597680106SMichael Lotz		if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
39697680106SMichael Lotz			continue;
39797680106SMichael Lotz
39897680106SMichael Lotz		size_t pageIndex = page - area.pages;
39997680106SMichael Lotz		if (pageIndex > area.page_count - pagesNeeded)
40097680106SMichael Lotz			continue;
40197680106SMichael Lotz
40297680106SMichael Lotz		// Candidate, check if we have enough pages going forward
40397680106SMichael Lotz		// (including the guard page).
40497680106SMichael Lotz		bool candidate = true;
40597680106SMichael Lotz		for (size_t j = 1; j < pagesNeeded; j++) {
40697680106SMichael Lotz			if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
40797680106SMichael Lotz					!= 0) {
40897680106SMichael Lotz				candidate = false;
40997680106SMichael Lotz				break;
41097680106SMichael Lotz			}
41197680106SMichael Lotz		}
41297680106SMichael Lotz
41397680106SMichael Lotz		if (!candidate)
41497680106SMichael Lotz			continue;
41597680106SMichael Lotz
41697680106SMichael Lotz		size_t offset = size & (B_PAGE_SIZE - 1);
41797680106SMichael Lotz		void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
41897680106SMichael Lotz			+ (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
41997680106SMichael Lotz
42097680106SMichael Lotz		guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
42197680106SMichael Lotz			alignment, result);
42297680106SMichael Lotz
42397680106SMichael Lotz		area.used_pages += pagesNeeded;
42497680106SMichael Lotz		guarded_heap_pages_allocated(*area.heap, pagesNeeded);
42597680106SMichael Lotz		return result;
42697680106SMichael Lotz	}
42797680106SMichael Lotz
42897680106SMichael Lotz	return NULL;
42997680106SMichael Lotz}
43097680106SMichael Lotz
43197680106SMichael Lotz
43297680106SMichael Lotzstatic bool
43397680106SMichael Lotzguarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
43497680106SMichael Lotz	size_t size)
43597680106SMichael Lotz{
43697680106SMichael Lotz	guarded_heap_area* area = (guarded_heap_area*)baseAddress;
43797680106SMichael Lotz	area->heap = &heap;
43897680106SMichael Lotz	area->area = id;
43997680106SMichael Lotz	area->size = size;
44097680106SMichael Lotz	area->page_count = area->size / B_PAGE_SIZE;
44197680106SMichael Lotz	area->used_pages = 0;
44297680106SMichael Lotz
44397680106SMichael Lotz	size_t pagesNeeded = (sizeof(guarded_heap_area)
44497680106SMichael Lotz		+ area->page_count * sizeof(guarded_heap_page)
44597680106SMichael Lotz		+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
44697680106SMichael Lotz
44797680106SMichael Lotz	area->page_count -= pagesNeeded;
44897680106SMichael Lotz	area->size = area->page_count * B_PAGE_SIZE;
44997680106SMichael Lotz	area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
45097680106SMichael Lotz
45197680106SMichael Lotz	mutex_init(&area->lock, "guarded_heap_area_lock");
45297680106SMichael Lotz
45397680106SMichael Lotz	list_init_etc(&area->free_list,
45497680106SMichael Lotz		offsetof(guarded_heap_page, free_list_link));
45597680106SMichael Lotz
45697680106SMichael Lotz	for (size_t i = 0; i < area->page_count; i++)
45797680106SMichael Lotz		guarded_heap_free_page(*area, i, true);
45897680106SMichael Lotz
45997680106SMichael Lotz	area->next = heap.areas;
46097680106SMichael Lotz	heap.areas = area;
46197680106SMichael Lotz	heap.page_count += area->page_count;
46297680106SMichael Lotz
46397680106SMichael Lotz	return true;
46497680106SMichael Lotz}
46597680106SMichael Lotz
46697680106SMichael Lotz
46797680106SMichael Lotzstatic bool
46897680106SMichael Lotzguarded_heap_area_create(guarded_heap& heap, size_t size)
46997680106SMichael Lotz{
47097680106SMichael Lotz	for (size_t trySize = size; trySize >= 1 * 1024 * 1024;
47197680106SMichael Lotz		trySize /= 2) {
47297680106SMichael Lotz
47397680106SMichael Lotz		void* baseAddress = NULL;
47497680106SMichael Lotz		area_id id = create_area("guarded_heap_area", &baseAddress,
47597680106SMichael Lotz			B_ANY_ADDRESS, trySize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
47697680106SMichael Lotz
47797680106SMichael Lotz		if (id < 0)
47897680106SMichael Lotz			continue;
47997680106SMichael Lotz
48097680106SMichael Lotz		if (guarded_heap_area_init(heap, id, baseAddress, trySize))
48197680106SMichael Lotz			return true;
48297680106SMichael Lotz
48397680106SMichael Lotz		delete_area(id);
48497680106SMichael Lotz	}
48597680106SMichael Lotz
48697680106SMichael Lotz	panic("failed to allocate a new heap area");