1/*
2 * Copyright 2003-2013, Axel D��rfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "runtime_loader_private.h"
8
9#include <syscalls.h>
10
11#ifdef HEAP_TEST
12#	include <stdio.h>
13#endif
14#include <stdlib.h>
15#include <string.h>
16
17#include <algorithm>
18
19#include <util/SplayTree.h>
20
21#include <locks.h>
22
23
24/*!	This is a very simple malloc()/free() implementation - it only
25	manages a free list.
26	After heap_init() is called, all free memory is contained in one
27	big chunk, the only entry in the free link list (which is a single
28	linked list).
29	When memory is allocated, the smallest free chunk that contains
30	the requested size is split (or taken as a whole if it can't be
31	splitted anymore), and it's lower half will be removed from the
32	free list.
33	The free list is ordered by size, starting with the smallest
34	free chunk available. When a chunk is freed, it will be joint
35	with its predecessor or successor, if possible.
36	To ease list handling, the list anchor itself is a free chunk with
37	size 0 that can't be allocated.
38*/
39const static size_t kAlignment = 8;
40	// all memory chunks will be a multiple of this
41
42const static size_t kInitialHeapSize = 64 * 1024;
43const static size_t kHeapGrowthAlignment = 32 * 1024;
44
45static const char* const kLockName = "runtime_loader heap";
46static recursive_lock sLock = RECURSIVE_LOCK_INITIALIZER(kLockName);
47
48
49class Chunk {
50public:
51	size_t CompleteSize() const
52	{
53		return fSize;
54	}
55
56protected:
57	union {
58		size_t	fSize;
59		char	fAlignment[kAlignment];
60	};
61};
62
63
64class FreeChunk;
65
66
67struct FreeChunkData : SplayTreeLink<FreeChunk> {
68
69	FreeChunk* Next() const
70	{
71		return fNext;
72	}
73
74	FreeChunk** NextLink()
75	{
76		return &fNext;
77	}
78
79protected:
80	FreeChunk*	fNext;
81};
82
83
84class FreeChunk : public Chunk, public FreeChunkData {
85public:
86			void				SetTo(size_t size);
87
88			size_t				Size() const;
89
90			FreeChunk*			Split(size_t splitSize);
91			bool				IsTouching(FreeChunk* link);
92			FreeChunk*			Join(FreeChunk* link);
93
94			void*				AllocatedAddress() const;
95	static	FreeChunk*			SetToAllocated(void* allocated);
96};
97
98
99struct FreeChunkKey {
100	FreeChunkKey(size_t size)
101		:
102		fSize(size),
103		fChunk(NULL)
104	{
105	}
106
107	FreeChunkKey(const FreeChunk* chunk)
108		:
109		fSize(chunk->Size()),
110		fChunk(chunk)
111	{
112	}
113
114	int Compare(const FreeChunk* chunk) const
115	{
116		size_t chunkSize = chunk->Size();
117		if (chunkSize != fSize)
118			return fSize < chunkSize ? -1 : 1;
119
120		if (fChunk == chunk)
121			return 0;
122		return fChunk < chunk ? -1 : 1;
123	}
124
125private:
126	size_t				fSize;
127	const FreeChunk*	fChunk;
128};
129
130
131struct FreeChunkTreeDefinition {
132	typedef FreeChunkKey	KeyType;
133	typedef FreeChunk		NodeType;
134
135	static FreeChunkKey GetKey(const FreeChunk* node)
136	{
137		return FreeChunkKey(node);
138	}
139
140	static SplayTreeLink<FreeChunk>* GetLink(FreeChunk* node)
141	{
142		return node;
143	}
144
145	static int Compare(const FreeChunkKey& key, const FreeChunk* node)
146	{
147		return key.Compare(node);
148	}
149
150	static FreeChunk** GetListLink(FreeChunk* node)
151	{
152		return node->NextLink();
153	}
154};
155
156
157typedef IteratableSplayTree<FreeChunkTreeDefinition> FreeChunkTree;
158
159
160static size_t sAvailable;
161static FreeChunkTree sFreeChunkTree;
162
163
164static inline size_t
165align(size_t size, size_t alignment = kAlignment)
166{
167	return (size + alignment - 1) & ~(alignment - 1);
168}
169
170
171void
172FreeChunk::SetTo(size_t size)
173{
174	fSize = size;
175	fNext = NULL;
176}
177
178
179/*!	Returns the amount of bytes that can be allocated
180	in this chunk.
181*/
182size_t
183FreeChunk::Size() const
184{
185	return (addr_t)this + fSize - (addr_t)AllocatedAddress();
186}
187
188
189/*!	Splits the upper half at the requested location and returns it. This chunk
190	will no longer be a valid FreeChunk object; only its fSize will be valid.
191 */
192FreeChunk*
193FreeChunk::Split(size_t splitSize)
194{
195	splitSize = align(splitSize);
196
197	FreeChunk* chunk = (FreeChunk*)((addr_t)AllocatedAddress() + splitSize);
198	size_t newSize = (addr_t)chunk - (addr_t)this;
199	chunk->fSize = fSize - newSize;
200	chunk->fNext = NULL;
201
202	fSize = newSize;
203
204	return chunk;
205}
206
207
208/*!	Checks if the specified chunk touches this chunk, so
209	that they could be joined.
210*/
211bool
212FreeChunk::IsTouching(FreeChunk* chunk)
213{
214	return chunk
215		&& (((uint8*)this + fSize == (uint8*)chunk)
216			|| (uint8*)chunk + chunk->fSize == (uint8*)this);
217}
218
219
220/*!	Joins the chunk to this chunk and returns the pointer
221	to the new chunk - which will either be one of the
222	two chunks.
223	Note, the chunks must be joinable, or else this method
224	doesn't work correctly. Use FreeChunk::IsTouching()
225	to check if this method can be applied.
226*/
227FreeChunk*
228FreeChunk::Join(FreeChunk* chunk)
229{
230	if (chunk < this) {
231		chunk->fSize += fSize;
232		chunk->fNext = fNext;
233
234		return chunk;
235	}
236
237	fSize += chunk->fSize;
238	fNext = chunk->fNext;
239
240	return this;
241}
242
243
244void*
245FreeChunk::AllocatedAddress() const
246{
247	return (void*)static_cast<const FreeChunkData*>(this);
248}
249
250
251FreeChunk*
252FreeChunk::SetToAllocated(void* allocated)
253{
254	return static_cast<FreeChunk*>((FreeChunkData*)allocated);
255}
256
257
258//	#pragma mark -
259
260
261static status_t
262add_area(size_t size)
263{
264	void* base;
265	area_id area = _kern_create_area("rld heap", &base,
266		B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
267	if (area < 0)
268		return area;
269
270	// declare the whole area as one chunk, and add it to the free tree
271	FreeChunk* chunk = (FreeChunk*)base;
272	chunk->SetTo(size);
273	sFreeChunkTree.Insert(chunk);
274
275	sAvailable += chunk->Size();
276	return B_OK;
277}
278
279
280static status_t
281grow_heap(size_t bytes)
282{
283	return add_area(align(align(sizeof(Chunk)) + bytes, kHeapGrowthAlignment));
284}
285
286
287//	#pragma mark -
288
289
290status_t
291heap_init()
292{
293	return add_area(kInitialHeapSize);
294}
295
296
297status_t
298heap_reinit_after_fork()
299{
300	recursive_lock_init(&sLock, kLockName);
301	return B_OK;
302}
303
304
305#ifdef HEAP_TEST
306void
307dump_chunks(void)
308{
309	FreeChunk* chunk = sFreeChunkTree.FindMin();
310	while (chunk != NULL) {
311		printf("\t%p: chunk size = %ld, end = %p, next = %p\n", chunk,
312			chunk->Size(), (uint8*)chunk + chunk->CompleteSize(),
313			chunk->Next());
314		chunk = chunk->Next();
315	}
316}
317#endif
318
319
320void*
321malloc(size_t size)
322{
323	if (size == 0)
324		return NULL;
325
326	RecursiveLocker _(sLock);
327
328	// align the size requirement to a kAlignment bytes boundary
329	if (size < sizeof(FreeChunkData))
330		size = sizeof(FreeChunkData);
331	size = align(size);
332
333	if (size > sAvailable) {
334		// try to enlarge heap
335		if (grow_heap(size) != B_OK)
336			return NULL;
337	}
338
339	FreeChunkKey key(size);
340	FreeChunk* chunk = sFreeChunkTree.FindClosest(key, true, true);
341	if (chunk == NULL) {
342		// could not find a free chunk as large as needed
343		if (grow_heap(size) != B_OK)
344			return NULL;
345
346		chunk = sFreeChunkTree.FindClosest(key, true, true);
347		if (chunk == NULL) {
348			TRACE(("no allocation chunk found after growing the heap\n"));
349			return NULL;
350		}
351	}
352
353	sFreeChunkTree.Remove(chunk);
354	sAvailable -= chunk->Size();
355
356	void* allocatedAddress = chunk->AllocatedAddress();
357
358	// If this chunk is bigger than the requested size and there's enough space
359	// left over for a new chunk, we split it.
360	if (chunk->Size() >= size + align(sizeof(FreeChunk))) {
361		FreeChunk* freeChunk = chunk->Split(size);
362		sFreeChunkTree.Insert(freeChunk);
363		sAvailable += freeChunk->Size();
364	}
365
366	TRACE(("malloc(%lu) -> %p\n", size, allocatedAddress));
367	return allocatedAddress;
368}
369
370
371void*
372realloc(void* oldBuffer, size_t newSize)
373{
374	if (newSize == 0) {
375		TRACE(("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize));
376		free(oldBuffer);
377		return NULL;
378	}
379
380	RecursiveLocker _(sLock);
381
382	size_t oldSize = 0;
383	if (oldBuffer != NULL) {
384		FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
385		oldSize = oldChunk->Size();
386
387		// Check if the old buffer still fits, and if it makes sense to keep it.
388		if (oldSize >= newSize
389			&& (oldSize < 128 || newSize > oldSize / 3)) {
390			TRACE(("realloc(%p, %lu) old buffer is large enough\n",
391				oldBuffer, newSize));
392			return oldBuffer;
393		}
394	}
395
396	void* newBuffer = malloc(newSize);
397	if (newBuffer == NULL)
398		return NULL;
399
400	if (oldBuffer != NULL) {
401		memcpy(newBuffer, oldBuffer, std::min(oldSize, newSize));
402		free(oldBuffer);
403	}
404
405	TRACE(("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer));
406	return newBuffer;
407}
408
409
410void
411free(void* allocated)
412{
413	if (allocated == NULL)
414		return;
415
416	RecursiveLocker _(sLock);
417
418	TRACE(("free(%p)\n", allocated));
419
420
421	FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
422
423	// try to join the new free chunk with an existing one
424	// it may be joined with up to two chunks
425
426	FreeChunk* chunk = sFreeChunkTree.FindMin();
427	int32 joinCount = 0;
428
429	while (chunk) {
430		FreeChunk* nextChunk = chunk->Next();
431
432		if (chunk->IsTouching(freedChunk)) {
433			sFreeChunkTree.Remove(chunk);
434			sAvailable -= chunk->Size();
435
436			freedChunk = chunk->Join(freedChunk);
437
438			if (++joinCount == 2)
439				break;
440		}
441
442		chunk = nextChunk;
443	}
444
445	sFreeChunkTree.Insert(freedChunk);
446	sAvailable += freedChunk->Size();
447}
448