1/*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2002, Manuel J. Petit. All rights reserved.
7 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11#include "images.h"
12
13#include <stdio.h>
14#include <stdlib.h>
15#include <string.h>
16
17#include <algorithm>
18
19#include <syscalls.h>
20#include <image_defs.h>
21#include <vm_defs.h>
22
23#include "add_ons.h"
24#include "elf_tls.h"
25#include "runtime_loader_private.h"
26
27#include <util/kernel_cpp.h>
28
29
30// keep in sync with app ldscript
31#ifdef __x86_64__
32	// runtime_loader potentially occupies 0x200000 - 0x600000 due to large
33	// page segment alignment.
34#	define RLD_PROGRAM_BASE	0x600000
35#	define MAX_PAGE_SIZE	0x200000
36#else
37#	define RLD_PROGRAM_BASE	0x200000
38#	define MAX_PAGE_SIZE	B_PAGE_SIZE
39#endif
40
41
42bool gInvalidImageIDs;
43
44static image_queue_t sLoadedImages = {0, 0};
45static image_queue_t sDisposableImages = {0, 0};
46static uint32 sLoadedImageCount = 0;
47
48
49//! Remaps the image ID of \a image after fork.
50static status_t
51update_image_id(image_t* image)
52{
53	int32 cookie = 0;
54	image_info info;
55	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
56			sizeof(image_info)) == B_OK) {
57		for (uint32 i = 0; i < image->num_regions; i++) {
58			if (image->regions[i].vmstart == (addr_t)info.text) {
59				image->id = info.id;
60				return B_OK;
61			}
62		}
63	}
64
65	FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id);
66	return B_ENTRY_NOT_FOUND;
67}
68
69
70static void
71enqueue_image(image_queue_t* queue, image_t* image)
72{
73	image->next = NULL;
74
75	image->prev = queue->tail;
76	if (queue->tail)
77		queue->tail->next = image;
78
79	queue->tail = image;
80	if (!queue->head)
81		queue->head = image;
82}
83
84
85static void
86dequeue_image(image_queue_t* queue, image_t* image)
87{
88	if (image->next)
89		image->next->prev = image->prev;
90	else
91		queue->tail = image->prev;
92
93	if (image->prev)
94		image->prev->next = image->next;
95	else
96		queue->head = image->next;
97
98	image->prev = NULL;
99	image->next = NULL;
100}
101
102
103static image_t*
104find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
105	uint32 typeMask)
106{
107	for (image_t* image = queue->head; image; image = image->next) {
108		const char* imageName = isPath ? image->path : image->name;
109		int length = isPath ? sizeof(image->path) : sizeof(image->name);
110
111		if (!strncmp(imageName, name, length)
112			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
113			return image;
114		}
115	}
116
117	return NULL;
118}
119
120
121static void
122update_image_flags_recursively(image_t* image, uint32 flagsToSet,
123	uint32 flagsToClear)
124{
125	image_t* queue[sLoadedImageCount];
126	uint32 count = 0;
127	uint32 index = 0;
128	queue[count++] = image;
129	image->flags |= RFLAG_VISITED;
130
131	while (index < count) {
132		// pop next image
133		image = queue[index++];
134
135		// push dependencies
136		for (uint32 i = 0; i < image->num_needed; i++) {
137			image_t* needed = image->needed[i];
138			if ((needed->flags & RFLAG_VISITED) == 0) {
139				queue[count++] = needed;
140				needed->flags |= RFLAG_VISITED;
141			}
142		}
143	}
144
145	// update flags
146	for (uint32 i = 0; i < count; i++) {
147		queue[i]->flags = (queue[i]->flags | flagsToSet)
148			& ~(flagsToClear | RFLAG_VISITED);
149	}
150}
151
152
153static uint32
154topological_sort(image_t* image, uint32 slot, image_t** initList,
155	uint32 sortFlag)
156{
157	if (image->flags & sortFlag)
158		return slot;
159
160	image->flags |= sortFlag; /* make sure we don't visit this one */
161	for (uint32 i = 0; i < image->num_needed; i++)
162		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
163
164	initList[slot] = image;
165	return slot + 1;
166}
167
168
169/*!	Finds the load address and address specifier of the given image region.
170*/
171static void
172get_image_region_load_address(image_t* image, uint32 index, long lastDelta,
173	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
174{
175	if (!fixed) {
176		// relocatable image... we can afford to place wherever
177		if (index == 0) {
178			// but only the first segment gets a free ride
179			loadAddress = RLD_PROGRAM_BASE;
180			addressSpecifier = B_RANDOMIZED_BASE_ADDRESS;
181		} else {
182			loadAddress = image->regions[index].vmstart + lastDelta;
183			addressSpecifier = B_EXACT_ADDRESS;
184		}
185	} else {
186		// not relocatable, put it where it asks or die trying
187		loadAddress = image->regions[index].vmstart;
188		addressSpecifier = B_EXACT_ADDRESS;
189	}
190}
191
192
193// #pragma mark -
194
195
196image_t*
197create_image(const char* name, const char* path, int regionCount)
198{
199	size_t allocSize = sizeof(image_t)
200		+ (regionCount - 1) * sizeof(elf_region_t);
201
202	image_t* image = (image_t*)malloc(allocSize);
203	if (image == NULL) {
204		FATAL("no memory for image %s\n", path);
205		return NULL;
206	}
207
208	memset(image, 0, allocSize);
209
210	strlcpy(image->path, path, sizeof(image->path));
211
212	// Make the last component of the supplied name the image name.
213	// If present, DT_SONAME will replace this name.
214	const char* lastSlash = strrchr(name, '/');
215	if (lastSlash != NULL)
216		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
217	else
218		strlcpy(image->name, name, sizeof(image->name));
219
220	image->ref_count = 1;
221	image->num_regions = regionCount;
222
223	return image;
224}
225
226
227void
228delete_image_struct(image_t* image)
229{
230#ifdef DEBUG
231	size_t size = sizeof(image_t)
232		+ (image->num_regions - 1) * sizeof(elf_region_t);
233	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
234#endif
235	free(image->needed);
236	free(image->versions);
237
238	while (RuntimeLoaderSymbolPatcher* patcher
239			= image->defined_symbol_patchers) {
240		image->defined_symbol_patchers = patcher->next;
241		delete patcher;
242	}
243	while (RuntimeLoaderSymbolPatcher* patcher
244			= image->undefined_symbol_patchers) {
245		image->undefined_symbol_patchers = patcher->next;
246		delete patcher;
247	}
248
249#ifdef DEBUG
250	// overwrite images to make sure they aren't accidently reused anywhere
251	memset(image, 0xa5, size);
252#endif
253	free(image);
254}
255
256
257void
258delete_image(image_t* image)
259{
260	if (image == NULL)
261		return;
262
263	_kern_unregister_image(image->id);
264		// registered in load_container()
265
266	delete_image_struct(image);
267}
268
269
270void
271put_image(image_t* image)
272{
273	// If all references to the image are gone, add it to the disposable list
274	// and remove all dependencies
275
276	if (atomic_add(&image->ref_count, -1) == 1) {
277		size_t i;
278
279		dequeue_image(&sLoadedImages, image);
280		enqueue_image(&sDisposableImages, image);
281		sLoadedImageCount--;
282
283		for (i = 0; i < image->num_needed; i++)
284			put_image(image->needed[i]);
285	}
286}
287
288
289status_t
290map_image(int fd, char const* path, image_t* image, bool fixed)
291{
292	// cut the file name from the path as base name for the created areas
293	const char* baseName = strrchr(path, '/');
294	if (baseName != NULL)
295		baseName++;
296	else
297		baseName = path;
298
299	// determine how much space we need for all loaded segments
300
301	addr_t reservedAddress = 0;
302	addr_t loadAddress;
303	size_t reservedSize = 0;
304	size_t length = 0;
305	uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS;
306
307	for (uint32 i = 0; i < image->num_regions; i++) {
308		// for BeOS compatibility: if we load an old BeOS executable, we
309		// have to relocate it, if possible - we recognize it because the
310		// vmstart is set to 0 (hopefully always)
311		if (fixed && image->regions[i].vmstart == 0)
312			fixed = false;
313
314		uint32 regionAddressSpecifier;
315		get_image_region_load_address(image, i,
316			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
317			fixed, loadAddress, regionAddressSpecifier);
318		if (i == 0) {
319			reservedAddress = loadAddress;
320			addressSpecifier = regionAddressSpecifier;
321		}
322
323		length += TO_PAGE_SIZE(image->regions[i].vmsize
324			+ (loadAddress % B_PAGE_SIZE));
325
326		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
327			- reservedAddress;
328		if (size > reservedSize)
329			reservedSize = size;
330	}
331
332	// Check whether the segments have an unreasonable amount of unused space
333	// inbetween.
334	if (reservedSize > length + MAX_PAGE_SIZE * 2)
335		return B_BAD_DATA;
336
337	// reserve that space and allocate the areas from that one
338	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
339			reservedSize) != B_OK)
340		return B_NO_MEMORY;
341
342	for (uint32 i = 0; i < image->num_regions; i++) {
343		char regionName[B_OS_NAME_LENGTH];
344
345		snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
346			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
347
348		get_image_region_load_address(image, i,
349			i > 0 ? image->regions[i - 1].delta : 0, fixed, loadAddress,
350			addressSpecifier);
351
352		// If the image position is arbitrary, we must let it point to the start
353		// of the reserved address range.
354		if (addressSpecifier != B_EXACT_ADDRESS)
355			loadAddress = reservedAddress;
356
357		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
358			image->regions[i].id = _kern_create_area(regionName,
359				(void**)&loadAddress, B_EXACT_ADDRESS,
360				image->regions[i].vmsize, B_NO_LOCK,
361				B_READ_AREA | B_WRITE_AREA);
362
363			if (image->regions[i].id < 0) {
364				_kern_unreserve_address_range(reservedAddress, reservedSize);
365				return image->regions[i].id;
366			}
367		} else {
368			// Map all segments r/w first -- write access might be needed for
369			// relocations. When we've done with those we change the protection
370			// of read-only segments back to read-only. We map those segments
371			// over-committing, since quite likely only a relatively small
372			// number of pages needs to be touched and we want to avoid a lot
373			// of memory to be committed for them temporarily, just because we
374			// have to write map them.
375			uint32 protection = B_READ_AREA | B_WRITE_AREA
376				| ((image->regions[i].flags & RFLAG_RW) != 0
377					? 0 : B_OVERCOMMITTING_AREA);
378			image->regions[i].id = _kern_map_file(regionName,
379				(void**)&loadAddress, B_EXACT_ADDRESS,
380				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
381				fd, PAGE_BASE(image->regions[i].fdstart));
382
383			if (image->regions[i].id < 0) {
384				_kern_unreserve_address_range(reservedAddress, reservedSize);
385				return image->regions[i].id;
386			}
387
388			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
389				(void *)loadAddress, image->regions[i].vmsize,
390				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
391
392			// handle trailer bits in data segment
393			if (image->regions[i].flags & RFLAG_RW) {
394				addr_t startClearing = loadAddress
395					+ PAGE_OFFSET(image->regions[i].start)
396					+ image->regions[i].size;
397				addr_t toClear = image->regions[i].vmsize
398					- PAGE_OFFSET(image->regions[i].start)
399					- image->regions[i].size;
400
401				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
402					startClearing, toClear));
403				memset((void *)startClearing, 0, toClear);
404			}
405		}
406
407		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
408		image->regions[i].vmstart = loadAddress;
409		if (i == 0) {
410			TLSBlockTemplates::Get().SetBaseAddress(image->dso_tls_id,
411				loadAddress);
412		}
413	}
414
415	if (image->dynamic_ptr != 0)
416		image->dynamic_ptr += image->regions[0].delta;
417
418	return B_OK;
419}
420
421
422void
423unmap_image(image_t* image)
424{
425	for (uint32 i = 0; i < image->num_regions; i++) {
426		_kern_delete_area(image->regions[i].id);
427
428		image->regions[i].id = -1;
429	}
430}
431
432
433/*!	This function will change the protection of all read-only segments to really
434	be read-only (and executable).
435	The areas have to be read/write first, so that they can be relocated.
436	If at least one image is in compatibility mode then we allow execution of
437	all areas.
438*/
439void
440remap_images()
441{
442	for (image_t* image = sLoadedImages.head; image != NULL;
443			image = image->next) {
444		for (uint32 i = 0; i < image->num_regions; i++) {
445			// we only need to do this once, so we remember those we've already
446			// mapped
447			if ((image->regions[i].flags & RFLAG_REMAPPED) != 0)
448				continue;
449
450			status_t result = B_OK;
451			if ((image->regions[i].flags & RFLAG_RW) == 0) {
452				result = _kern_set_area_protection(image->regions[i].id,
453						B_READ_AREA | B_EXECUTE_AREA);
454			} else if (image->abi < B_HAIKU_ABI_GCC_2_HAIKU) {
455				result = _kern_set_area_protection(image->regions[i].id,
456						B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA);
457			}
458
459			if (result == B_OK)
460				image->regions[i].flags |= RFLAG_REMAPPED;
461		}
462	}
463}
464
465
466void
467register_image(image_t* image, int fd, const char* path)
468{
469	struct stat stat;
470	extended_image_info info;
471
472	// TODO: set these correctly
473	info.basic_info.id = 0;
474	info.basic_info.type = image->type;
475	info.basic_info.sequence = 0;
476	info.basic_info.init_order = 0;
477	info.basic_info.init_routine = (void (*)())image->init_routine;
478	info.basic_info.term_routine = (void (*)())image->term_routine;
479
480	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
481		info.basic_info.device = stat.st_dev;
482		info.basic_info.node = stat.st_ino;
483	} else {
484		info.basic_info.device = -1;
485		info.basic_info.node = -1;
486	}
487
488	// We may have split segments into separate regions. Compute the correct
489	// segments for the image info.
490	addr_t textBase = 0;
491	addr_t textEnd = 0;
492	addr_t dataBase = 0;
493	addr_t dataEnd = 0;
494	for (uint32 i= 0; i < image->num_regions; i++) {
495		addr_t base = image->regions[i].vmstart;
496		addr_t end = base + image->regions[i].vmsize;
497		if (image->regions[i].flags & RFLAG_RW) {
498			// data
499			if (dataBase == 0) {
500				dataBase = base;
501				dataEnd = end;
502			} else {
503				dataBase = std::min(dataBase, base);
504				dataEnd = std::max(dataEnd, end);
505			}
506		} else {
507			// text
508			if (textBase == 0) {
509				textBase = base;
510				textEnd = end;
511			} else {
512				textBase = std::min(textBase, base);
513				textEnd = std::max(textEnd, end);
514			}
515		}
516	}
517
518	strlcpy(info.basic_info.name, path, sizeof(info.basic_info.name));
519	info.basic_info.text = (void*)textBase;
520	info.basic_info.text_size = textEnd - textBase;
521	info.basic_info.data = (void*)dataBase;
522	info.basic_info.data_size = dataEnd - dataBase;
523	info.basic_info.api_version = image->api_version;
524	info.basic_info.abi = image->abi;
525	info.text_delta = image->regions[0].delta;
526	info.symbol_table = image->syms;
527	info.symbol_hash = image->symhash;
528	info.string_table = image->strtab;
529	image->id = _kern_register_image(&info, sizeof(info));
530}
531
532
533//! After fork, we lazily rebuild the image IDs of all loaded images.
534status_t
535update_image_ids()
536{
537	for (image_t* image = sLoadedImages.head; image; image = image->next) {
538		status_t status = update_image_id(image);
539		if (status != B_OK)
540			return status;
541	}
542	for (image_t* image = sDisposableImages.head; image; image = image->next) {
543		status_t status = update_image_id(image);
544		if (status != B_OK)
545			return status;
546	}
547
548	gInvalidImageIDs = false;
549	return B_OK;
550}
551
552
553image_queue_t&
554get_loaded_images()
555{
556	return sLoadedImages;
557}
558
559
560image_queue_t&
561get_disposable_images()
562{
563	return sDisposableImages;
564}
565
566
567uint32
568count_loaded_images()
569{
570	return sLoadedImageCount;
571}
572
573
574void
575enqueue_loaded_image(image_t* image)
576{
577	enqueue_image(&sLoadedImages, image);
578	sLoadedImageCount++;
579}
580
581
582void
583dequeue_loaded_image(image_t* image)
584{
585	dequeue_image(&sLoadedImages, image);
586	sLoadedImageCount--;
587}
588
589
590void
591dequeue_disposable_image(image_t* image)
592{
593	dequeue_image(&sDisposableImages, image);
594}
595
596
597image_t*
598find_loaded_image_by_name(char const* name, uint32 typeMask)
599{
600	bool isPath = strchr(name, '/') != NULL;
601	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
602}
603
604
605image_t*
606find_loaded_image_by_id(image_id id, bool ignoreDisposable)
607{
608	if (gInvalidImageIDs) {
609		// After fork, we lazily rebuild the image IDs of all loaded images
610		update_image_ids();
611	}
612
613	for (image_t* image = sLoadedImages.head; image; image = image->next) {
614		if (image->id == id)
615			return image;
616	}
617
618	if (ignoreDisposable)
619		return NULL;
620
621	for (image_t* image = sDisposableImages.head; image; image = image->next) {
622		if (image->id == id)
623			return image;
624	}
625
626	return NULL;
627}
628
629
630image_t*
631find_loaded_image_by_address(addr_t address)
632{
633	for (image_t* image = sLoadedImages.head; image; image = image->next) {
634		for (uint32 i = 0; i < image->num_regions; i++) {
635			elf_region_t& region = image->regions[i];
636			if (region.vmstart <= address
637				&& region.vmstart - 1 + region.vmsize >= address)
638				return image;
639		}
640	}
641
642	return NULL;
643}
644
645
646void
647set_image_flags_recursively(image_t* image, uint32 flags)
648{
649	update_image_flags_recursively(image, flags, 0);
650}
651
652
653void
654clear_image_flags_recursively(image_t* image, uint32 flags)
655{
656	update_image_flags_recursively(image, 0, flags);
657}
658
659
660/*!	Returns a topologically sorted image list.
661
662	If \a image is non-NULL, an array containing the image and all its
663	transitive dependencies is returned. If \a image is NULL, all loaded images
664	are returned. In either case dependencies are listed before images
665	depending on them.
666
667	\param image The image specifying the tree of images that shall be sorted.
668		If NULL, all loaded images are sorted.
669	\param _list On success it will be set to an array of the sorted images.
670		The caller is responsible for free()ing it.
671	\param sortFlags The image flag that shall be used for sorting. Images that
672		already have this flag set are ignored (and their dependencies, unless
673		they are also reachable via another path). The flag will be set on all
674		returned images.
675	\return The number of images in the returned array or an error code on
676		failure.
677*/
678ssize_t
679get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
680{
681	image_t** list;
682
683	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
684	if (list == NULL) {
685		FATAL("memory shortage in get_sorted_image_list()");
686		*_list = NULL;
687		return B_NO_MEMORY;
688	}
689
690	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
691
692	*_list = list;
693
694	if (image != NULL)
695		return topological_sort(image, 0, list, sortFlag);
696
697	// no image given -- sort all loaded images
698	uint32 count = 0;
699	image = sLoadedImages.head;
700	while (image != NULL) {
701		count = topological_sort(image, count, list, sortFlag);
702		image = image->next;
703	}
704
705	return count;
706}
707