diff --git a/targets/_any/include/os/memory.hpp b/targets/_any/include/os/memory.hpp index 5d40d33..1e887c3 100644 --- a/targets/_any/include/os/memory.hpp +++ b/targets/_any/include/os/memory.hpp @@ -51,7 +51,7 @@ struct MemoryRange } }; -void setupPaging(std::span memoryRanges) noexcept; +[[nodiscard]] bool setupPaging(std::span memoryRanges) noexcept; [[nodiscard]] const char* memoryTypeName(MemoryType type) noexcept; [[nodiscard]] void* allocatePages(unsigned numConsecutive) noexcept; @@ -67,6 +67,14 @@ inline void identityMapRegion(void* start, std::size_t bytes) noexcept std::uint64_t lastPage = std::bit_cast(static_cast(start) + bytes) / 4096 + 1; setupIdentityPaging(firstPage, lastPage); } + +template +inline T* pageToPointer(std::uint64_t page) noexcept +{ + return std::bit_cast(page << 12); +} } +void __ba_initInitialHeap() noexcept; + #endif // !defined(BAD_APPLE_OS_OS_MEMORY_HPP_INCLUDED) diff --git a/targets/_any/src/cstdlib/stdlib.cpp b/targets/_any/src/cstdlib/stdlib.cpp index 43d1d70..2196b3c 100644 --- a/targets/_any/src/cstdlib/stdlib.cpp +++ b/targets/_any/src/cstdlib/stdlib.cpp @@ -23,13 +23,14 @@ struct AllocInfo static_assert(sizeof(MallocBlock) <= alignof(max_align_t)); static_assert(sizeof(AllocInfo) <= alignof(max_align_t)); -MallocBlock* gNextBlock = []() +MallocBlock* gNextBlock = nullptr; +} + +void __ba_initInitialHeap() noexcept { - MallocBlock* initialBlock = reinterpret_cast(gInitMallocSpace); - initialBlock->elements = INIT_MALLOC_SPACE_ELEMENTS; - initialBlock->nextBlock = nullptr; - return initialBlock; -}(); + gNextBlock = reinterpret_cast(gInitMallocSpace); + gNextBlock->elements = INIT_MALLOC_SPACE_ELEMENTS; + gNextBlock->nextBlock = nullptr; } void __ba_registerAllocatableMemory(void* memory, size_t size) noexcept diff --git a/targets/_any/src/os/memory.cpp b/targets/_any/src/os/memory.cpp index bfdee4c..287c4ab 100644 --- a/targets/_any/src/os/memory.cpp +++ b/targets/_any/src/os/memory.cpp @@ -3,7 +3,6 @@ #include #include -#include #include #include @@ -11,6 +10,9 @@ namespace baos { namespace { +inline constexpr std::size_t BIG_PAGE_SIZE = 512; +inline constexpr std::size_t BIG_PAGE_ALIGNMENT = 512; + using PageTable = std::array; constinit PageTable gPageMapLevel4 alignas(4096); @@ -40,13 +42,25 @@ void splitPageToIndices(std::uint64_t page, std::uint16_t& outPML4Entry, std::ui outPTEntry = page & 0x1FF; } +bool allocateIdentityMappedPages(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage) noexcept; PageTable& allocatePageTable() noexcept { - PageTable* pageTable = std::bit_cast(&*gPageTableSpace.begin()); - if (!pageTable) + if (gPageTableSpace.size() < sizeof(PageTable)) { - int i = 5; + gPageTableSpace = gNextPageTableSpace; + + // allocate new space + std::uint64_t page = 0; + if (allocateIdentityMappedPages(BIG_PAGE_SIZE, BIG_PAGE_ALIGNMENT, page)) + { + gNextPageTableSpace = {pageToPointer(page), 2ul << 20}; + } + else + { + gNextPageTableSpace = {}; // we're quite short on memory ... + } } + PageTable* pageTable = std::bit_cast(&*gPageTableSpace.begin()); ::new(pageTable) PageTable; gPageTableSpace = {gPageTableSpace.begin() + sizeof(PageTable), gPageTableSpace.end()}; return *pageTable; @@ -145,80 +159,86 @@ PageTable& setupPageTableEntry(PageTableEntry& entry, std::uint64_t page) noexce return *(::new(std::bit_cast(entry.address << 12)) PageTable); } - - - -void cmdDumpPageTable() noexcept +/** Finds a free memory range using the provided constraints (size and page alignment). */ +bool findFreeMemory(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage, MemoryRange*& outRange) noexcept { - std::uint64_t pml4Addr = std::bit_cast(&gPageMapLevel4); - std::printf("pml4 address: 0x%lX\n", pml4Addr); - baos::PageTableEntry* pml4 = std::bit_cast(pml4Addr); - auto printPDE = [](const baos::PageTableEntry& pde) + // first try to find a range that already is already aligned properly + for (MemoryRange& range : gUsableMemoryRanges) { - std::printf("%s%s%s%s%s%s%s%s%s, address=%lX\n", - pde.present ? " P" : "", - pde.writable ? " W" : "", - pde.user ? " U" : "", - pde.writeThrough ? " WT" : "", - pde.cacheDisabled ? " CD" : "", - pde.accessed ? " A" : "", - pde.dirty ? " D" : "", - pde.pageSize ? " PS" : "", - pde.executeDisable ? " XD" : "", - pde.address * 4096); - }; - for (unsigned pml4e = 0; pml4e < 512; ++pml4e) - { - if (!pml4[pml4e].present) { - continue; - } - std::printf("%d]", pml4e); - printPDE(pml4[pml4e]); - - baos::PageTableEntry* pdp = std::bit_cast(pml4[pml4e].address * 4096); - for (unsigned pdpe = 0; pdpe < 512; ++pdpe) + if (range.numPages >= numPages && range.pageBase % pageAlignment == 0) { - if (!pdp[pdpe].present) { - continue; - } - std::printf("%d.%d]", pml4e, pdpe); - printPDE(pdp[pdpe]); - - baos::PageTableEntry* pd = std::bit_cast(pdp[pdpe].address * 4096); - for (unsigned pde = 0; pde < 512; ++pde) - { - if (!pd[pde].present) { - continue; - } - std::printf("%d.%d.%d]", pml4e, pdpe, pde); - printPDE(pd[pde]); - } + outPage = range.pageBase; + outRange = ⦥ + return true; } } -} - -bool canFitPageTableSpace(MemoryRange range, std::uint64_t& outBase) noexcept -{ - if (range.type != MemoryType::USABLE) { - return false; + if (pageAlignment == 1) { + return false; // no need to search further, everything is full! } - const std::uint64_t possibleBase = std::alignUp(range.pageBase, 512ul); // needs to be 2MB aligned - if (range.numPages - (possibleBase - range.pageBase) >= 512) + // now just try to find a range that could be used (needs to be split then) + for (MemoryRange& range : gUsableMemoryRanges) { - outBase = possibleBase; - return true; + const std::uint64_t possibleBase = std::alignUp(range.pageBase, pageAlignment); // needs to be 2MB aligned + if (range.numPages - (possibleBase - range.pageBase) >= numPages) + { + outPage = possibleBase; + outRange = ⦥ + return true; + } } return false; } + +/** Allocates a memory range (sets it as "used", but does not setup paging for it yet. */ +bool allocateMemoryRange(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage) noexcept +{ + std::uint64_t page = 0; + MemoryRange* range = nullptr; + if (!findFreeMemory(numPages, pageAlignment, page, range)) + { + return false; + } + + outPage = page; + // if the memory is allocated from the beginning of the range, just shrink the range + if (page == range->pageBase) + { + range->pageBase += numPages; + range->numPages -= numPages; + return true; + } + // if the memory is allocated from the end of the range, again, just shrink it + if (page + numPages == range->pageBase + range->numPages) + { + range->numPages -= numPages; + return true; + } + + // otherwise split the range + MemoryRange newRange = { + .pageBase = page + numPages, + .numPages = (range->pageBase + range->numPages) - (page + numPages) + }; + range->numPages -= numPages; + addUsableRange(newRange); + return true; } -void setupPaging(std::span memoryRanges) noexcept +/** Allocates the pages and then identity maps them. */ +bool allocateIdentityMappedPages(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage) noexcept +{ + std::uint64_t page = 0; + if (!allocateMemoryRange(numPages, pageAlignment, page)) { + return false; + } + setupIdentityPaging2M(page); + outPage = page; + return true; +} +} + +bool setupPaging(std::span memoryRanges) noexcept { - // addUsableRange({ - // .pageBase = std::bit_cast(gInitPages.data()) >> 12, - // .numPages = gInitPages.size() / 4096, - // .type = MemoryType::USABLE - // }); std::uint64_t firstKernelPage = std::bit_cast(&gKernelStart) / 4096; std::uint64_t lastKernelPage = std::bit_cast(&gKernelEnd) / 4096 + 1; for (MemoryRange range : memoryRanges) @@ -235,37 +255,42 @@ void setupPaging(std::span memoryRanges) noexcept range.pageBase = lastKernelPage; } addUsableRange(range); - - std::uint64_t tableSpaceBase = 0; - if (gPageTableSpace.empty() && canFitPageTableSpace(range, tableSpaceBase)) - { - std::uint8_t* tableSpacePtr = std::bit_cast(tableSpaceBase * 4096); - gPageTableSpace = {tableSpacePtr, 2ul << 30}; - setupIdentityPaging2M(tableSpaceBase); - } } } - // start by setting up identity paging for the kernel - // __get_cpuid() + + // find space for the initial paging space and map it + std::uint64_t page = 0; + if (!allocateMemoryRange(BIG_PAGE_SIZE, BIG_PAGE_ALIGNMENT, page)) { + return false; + } + // note that contrary to "regular" allocation, the assignment has to happen BEFORE setting up paging + // otherwise there would be no space for the page table + gPageTableSpace = {pageToPointer(page), 2ul << 20}; + setupIdentityPaging2M(page); + + // the second pointer can be allocated normally + if (!allocateIdentityMappedPages(BIG_PAGE_SIZE, BIG_PAGE_ALIGNMENT, page)) { + return false; + } + gNextPageTableSpace = {pageToPointer(page), 2ul << 20}; + setupIdentityPaging(firstKernelPage, lastKernelPage); - // std::printf("Kernel pages: %ld-%ld\n", firstKernelPage, lastKernelPage); - // setup identity paging for all reserved ranges so we can continue using them - // for (const MemoryRange& range : memoryRanges) - // { - // if (range.type == MemoryType::RESERVED || range.type == MemoryType::MMIO) - // { - // setupIdentityPaging(range.pageBase, range.pageBase + range.numPages); - // } - // } + for (const MemoryRange& range : memoryRanges) + { + if (range.type == MemoryType::RESERVED || range.type == MemoryType::MMIO) + { + setupIdentityPaging(range.pageBase, range.pageBase + range.numPages); + } + } - // cmdDumpPageTable(); std::uint64_t cr3 = std::bit_cast(gPageMapLevel4.data()); __asm__ __volatile__ ( "mov %0, %%cr3" : : "a"(cr3) ); + return true; } const char* memoryTypeName(MemoryType type) noexcept diff --git a/targets/x86_64/src/kernel/startup.cpp b/targets/x86_64/src/kernel/startup.cpp index fd7a674..0e33463 100644 --- a/targets/x86_64/src/kernel/startup.cpp +++ b/targets/x86_64/src/kernel/startup.cpp @@ -231,14 +231,19 @@ void kernel_main() // done initializing OS stuff, enable interrupts __asm__ __volatile__("sti"); - initGlobals(); + // setup the "initial heap", required so we can use std::vector for detecting the memory ranges and stuff + __ba_initInitialHeap(); // init the heap (required for the double buffer) const std::vector ranges = detectMemoryRangesFromEfi(gBootInfo->memoryMap); - setupPaging({ranges.begin(), ranges.end()}); // TODO: move this up when it's done + setupPaging({ranges.begin(), ranges.end()}); + + // make sure boot info and the frame buffer are accessible identityMapRegion(gBootInfo, sizeof(*gBootInfo)); identityMapRegion(gBootInfo->displayInfo.frameBufferBase, gBootInfo->displayInfo.frameBufferPitch * gBootInfo->displayInfo.frameBufferHeight); + initGlobals(); + // initHeapFromEfiMemoryMap(gBootInfo->memoryMap); // initialize the framebuffer