Lines Matching defs:B_PAGE_SIZE

465 	uint32 bytes = (area->Size() / B_PAGE_SIZE + 1) / 2;
484 uint32 pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE;
499 uint32 pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE;
550 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
565 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
844 cache->GuardSize() / B_PAGE_SIZE, true, VM_PRIORITY_USER);
1077 if ((address % B_PAGE_SIZE) != 0
1114 pageAddress += B_PAGE_SIZE) {
1221 guardPages = guardSize / B_PAGE_SIZE;
1305 if (wiring == B_CONTIGUOUS && size == B_PAGE_SIZE
1358 reservedPages += size / B_PAGE_SIZE;
1377 size / B_PAGE_SIZE, physicalAddressRestrictions, priority);
1402 isStack ? (min_c(2, size / B_PAGE_SIZE - guardPages)) : 0, guardPages,
1439 address += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
1443 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
1446 - KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
1476 virtualAddress += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
1484 page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1510 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
1517 + (area->Size() - 1); virtualAddress += B_PAGE_SIZE,
1518 offset += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
1519 page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1557 for (i = size / B_PAGE_SIZE; i-- > 0; pageNumber++) {
1599 mapOffset = physicalAddress % B_PAGE_SIZE;
1663 for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
1711 if (vecs[i].base % B_PAGE_SIZE != 0
1712 || vecs[i].length % B_PAGE_SIZE != 0) {
1757 for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
1769 vecOffset += B_PAGE_SIZE;
1852 page_num_t firstPage = cacheOffset / B_PAGE_SIZE;
1853 page_num_t endPage = firstPage + area->Size() / B_PAGE_SIZE;
1867 baseAddress + (page->cache_offset * B_PAGE_SIZE - cacheOffset),
1890 offset = ROUNDDOWN(offset, B_PAGE_SIZE);
2162 offset += B_PAGE_SIZE) {
2321 lowerCache->GuardSize() / B_PAGE_SIZE,
2354 copiedPage->physical_page_number * B_PAGE_SIZE,
2355 page->physical_page_number * B_PAGE_SIZE);
2361 page->cache_offset * B_PAGE_SIZE);
2619 status = cache->Commit(cache->page_count * B_PAGE_SIZE,
2635 && cache->page_count * 2 < area->Size() / B_PAGE_SIZE) {
2673 page_num_t firstPageOffset = area->cache_offset / B_PAGE_SIZE;
2675 = firstPageOffset + area->Size() / B_PAGE_SIZE;
2914 int32 offset = address & (B_PAGE_SIZE - 1);
2915 if (num * itemSize + offset > B_PAGE_SIZE) {
2916 num = (B_PAGE_SIZE - offset) / itemSize;
2920 address = ROUNDDOWN(address, B_PAGE_SIZE);
3016 copyAddress = ROUNDDOWN(copyAddress, B_PAGE_SIZE);
3417 sAvailableMemory, (phys_addr_t)vm_page_num_pages() * B_PAGE_SIZE);
3472 physicalAddress = page->physical_page_number * B_PAGE_SIZE;
3475 physicalAddress -= physicalAddress % B_PAGE_SIZE;
3532 virtualAddress -= virtualAddress % B_PAGE_SIZE;
3626 for (addr_t current = start; current < end; current += B_PAGE_SIZE) {
3632 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
3726 address = (void*)ROUNDDOWN(image->text_region.start, B_PAGE_SIZE);
3734 address = (void*)ROUNDDOWN(image->data_region.start, B_PAGE_SIZE);
3899 args->physical_allocated_range[i].size += B_PAGE_SIZE;
3900 return nextPage / B_PAGE_SIZE;
3908 nextPage = args->physical_allocated_range[i].start - B_PAGE_SIZE;
3919 args->physical_allocated_range[i].start -= B_PAGE_SIZE;
3920 args->physical_allocated_range[i].size += B_PAGE_SIZE;
3921 return nextPage / B_PAGE_SIZE;
3949 for (uint32 i = 0; i < PAGE_ALIGN(physicalSize) / B_PAGE_SIZE; i++) {
3956 arch_vm_translation_map_early_map(args, virtualBase + i * B_PAGE_SIZE,
3957 physicalAddress * B_PAGE_SIZE, attributes,
3980 sAvailableMemory = vm_page_num_pages() * B_PAGE_SIZE;
4028 address = (void*)ROUNDDOWN(heapBase, B_PAGE_SIZE);
4051 void* lastPage = (void*)ROUNDDOWN(~(addr_t)0, B_PAGE_SIZE);
4052 vm_block_address_range("overflow protection", lastPage, B_PAGE_SIZE);
4056 (void *)ROUNDDOWN(0xcccccccc, B_PAGE_SIZE), B_PAGE_SIZE * 64);
4060 (void *)ROUNDDOWN(0xdeadbeef, B_PAGE_SIZE), B_PAGE_SIZE * 64);
4073 if (vm_page_num_free_pages() >= 200 * 1024 * 1024 / B_PAGE_SIZE) {
4078 ROUNDUP(kCacheInfoTableCount * sizeof(cache_info), B_PAGE_SIZE),
4201 addr_t pageAddress = ROUNDDOWN(address, B_PAGE_SIZE);
4411 vec.base = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
4412 generic_size_t bytesRead = vec.length = B_PAGE_SIZE;
4480 vm_memcpy_physical_page(page->physical_page_number * B_PAGE_SIZE,
4481 sourcePage->physical_page_number * B_PAGE_SIZE);
4518 addr_t address = ROUNDDOWN(originalAddress, B_PAGE_SIZE);
4637 && (mappedPage = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
4661 if (area->AddWaiterIfWired(&waiter, address, B_PAGE_SIZE,
4966 info->ram_size = cache->page_count * B_PAGE_SIZE;
4975 // is newSize a multiple of B_PAGE_SIZE?
4976 if (newSize & (B_PAGE_SIZE - 1))
5081 uint32 bytes = (newSize / B_PAGE_SIZE + 1) / 2;
5091 uint32 offset = (oldSize / B_PAGE_SIZE + 1) / 2;
5096 if ((oldSize / B_PAGE_SIZE) % 2 != 0) {
5183 if (size > B_PAGE_SIZE || ROUNDDOWN((addr_t)unsafeMemory, B_PAGE_SIZE)
5184 != ROUNDDOWN((addr_t)unsafeMemory + size - 1, B_PAGE_SIZE)) {
5231 phys_addr_t physicalAddress = page->physical_page_number * B_PAGE_SIZE
5232 + (addr_t)unsafeMemory % B_PAGE_SIZE;
5326 addr_t pageAddress = ROUNDDOWN((addr_t)address, B_PAGE_SIZE);
5327 info->range.SetTo(pageAddress, B_PAGE_SIZE, writable, false);
5376 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
5408 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE
5409 + address % B_PAGE_SIZE;
5476 addr_t lockBaseAddress = ROUNDDOWN((addr_t)address, B_PAGE_SIZE);
5477 addr_t lockEndAddress = ROUNDUP((addr_t)address + numBytes, B_PAGE_SIZE);
5553 for (; nextAddress != areaEnd; nextAddress += B_PAGE_SIZE) {
5560 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
5635 addr_t lockBaseAddress = ROUNDDOWN((addr_t)address, B_PAGE_SIZE);
5636 addr_t lockEndAddress = ROUNDUP((addr_t)address + numBytes, B_PAGE_SIZE);
5710 for (; nextAddress != areaEnd; nextAddress += B_PAGE_SIZE) {
5717 && (page = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
5780 addr_t pageOffset = virtualAddress & (B_PAGE_SIZE - 1);
5811 addr_t bytes = min_c(numBytes - offset, B_PAGE_SIZE);
5830 if (bytes > B_PAGE_SIZE - pageOffset)
5831 bytes = B_PAGE_SIZE - pageOffset;
6366 || (addr_t)address % B_PAGE_SIZE != 0) {
6395 || (addr_t)address % B_PAGE_SIZE != 0) {
6423 if ((address % B_PAGE_SIZE) != 0)
6516 pageAddress < currentAddress; pageAddress += B_PAGE_SIZE) {
6530 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
6568 if ((address % B_PAGE_SIZE) != 0)