(kind of) finished the paging setup. We'll see...

This commit is contained in:
Patrick 2024-01-26 18:03:23 +01:00
parent 836d589c9b
commit 9c70cfa846
4 changed files with 133 additions and 94 deletions

View File

@ -51,7 +51,7 @@ struct MemoryRange
}
};
void setupPaging(std::span<const MemoryRange> memoryRanges) noexcept;
[[nodiscard]] bool setupPaging(std::span<const MemoryRange> memoryRanges) noexcept;
[[nodiscard]] const char* memoryTypeName(MemoryType type) noexcept;
[[nodiscard]] void* allocatePages(unsigned numConsecutive) noexcept;
@ -67,6 +67,14 @@ inline void identityMapRegion(void* start, std::size_t bytes) noexcept
std::uint64_t lastPage = std::bit_cast<std::uint64_t>(static_cast<std::uint8_t*>(start) + bytes) / 4096 + 1;
setupIdentityPaging(firstPage, lastPage);
}
template<typename T = void>
inline T* pageToPointer(std::uint64_t page) noexcept
{
return std::bit_cast<T*>(page << 12);
}
}
void __ba_initInitialHeap() noexcept;
#endif // !defined(BAD_APPLE_OS_OS_MEMORY_HPP_INCLUDED)

View File

@ -23,13 +23,14 @@ struct AllocInfo
static_assert(sizeof(MallocBlock) <= alignof(max_align_t));
static_assert(sizeof(AllocInfo) <= alignof(max_align_t));
MallocBlock* gNextBlock = []()
MallocBlock* gNextBlock = nullptr;
}
void __ba_initInitialHeap() noexcept
{
MallocBlock* initialBlock = reinterpret_cast<MallocBlock*>(gInitMallocSpace);
initialBlock->elements = INIT_MALLOC_SPACE_ELEMENTS;
initialBlock->nextBlock = nullptr;
return initialBlock;
}();
gNextBlock = reinterpret_cast<MallocBlock*>(gInitMallocSpace);
gNextBlock->elements = INIT_MALLOC_SPACE_ELEMENTS;
gNextBlock->nextBlock = nullptr;
}
void __ba_registerAllocatableMemory(void* memory, size_t size) noexcept

View File

@ -3,7 +3,6 @@
#include <array>
#include <bit>
#include <cstdio>
#include <memory>
#include <new>
@ -11,6 +10,9 @@ namespace baos
{
namespace
{
inline constexpr std::size_t BIG_PAGE_SIZE = 512;
inline constexpr std::size_t BIG_PAGE_ALIGNMENT = 512;
using PageTable = std::array<PageTableEntry, 512>;
constinit PageTable gPageMapLevel4 alignas(4096);
@ -40,13 +42,25 @@ void splitPageToIndices(std::uint64_t page, std::uint16_t& outPML4Entry, std::ui
outPTEntry = page & 0x1FF;
}
bool allocateIdentityMappedPages(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage) noexcept;
PageTable& allocatePageTable() noexcept
{
PageTable* pageTable = std::bit_cast<PageTable*>(&*gPageTableSpace.begin());
if (!pageTable)
if (gPageTableSpace.size() < sizeof(PageTable))
{
int i = 5;
gPageTableSpace = gNextPageTableSpace;
// allocate new space
std::uint64_t page = 0;
if (allocateIdentityMappedPages(BIG_PAGE_SIZE, BIG_PAGE_ALIGNMENT, page))
{
gNextPageTableSpace = {pageToPointer<std::uint8_t>(page), 2ul << 20};
}
else
{
gNextPageTableSpace = {}; // we're quite short on memory ...
}
}
PageTable* pageTable = std::bit_cast<PageTable*>(&*gPageTableSpace.begin());
::new(pageTable) PageTable;
gPageTableSpace = {gPageTableSpace.begin() + sizeof(PageTable), gPageTableSpace.end()};
return *pageTable;
@ -145,80 +159,86 @@ PageTable& setupPageTableEntry(PageTableEntry& entry, std::uint64_t page) noexce
return *(::new(std::bit_cast<void*>(entry.address << 12)) PageTable);
}
void cmdDumpPageTable() noexcept
/** Finds a free memory range using the provided constraints (size and page alignment). */
bool findFreeMemory(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage, MemoryRange*& outRange) noexcept
{
std::uint64_t pml4Addr = std::bit_cast<std::uint64_t>(&gPageMapLevel4);
std::printf("pml4 address: 0x%lX\n", pml4Addr);
baos::PageTableEntry* pml4 = std::bit_cast<baos::PageTableEntry*>(pml4Addr);
auto printPDE = [](const baos::PageTableEntry& pde)
// first try to find a range that already is already aligned properly
for (MemoryRange& range : gUsableMemoryRanges)
{
std::printf("%s%s%s%s%s%s%s%s%s, address=%lX\n",
pde.present ? " P" : "",
pde.writable ? " W" : "",
pde.user ? " U" : "",
pde.writeThrough ? " WT" : "",
pde.cacheDisabled ? " CD" : "",
pde.accessed ? " A" : "",
pde.dirty ? " D" : "",
pde.pageSize ? " PS" : "",
pde.executeDisable ? " XD" : "",
pde.address * 4096);
};
for (unsigned pml4e = 0; pml4e < 512; ++pml4e)
if (range.numPages >= numPages && range.pageBase % pageAlignment == 0)
{
if (!pml4[pml4e].present) {
continue;
}
std::printf("%d]", pml4e);
printPDE(pml4[pml4e]);
baos::PageTableEntry* pdp = std::bit_cast<baos::PageTableEntry*>(pml4[pml4e].address * 4096);
for (unsigned pdpe = 0; pdpe < 512; ++pdpe)
{
if (!pdp[pdpe].present) {
continue;
}
std::printf("%d.%d]", pml4e, pdpe);
printPDE(pdp[pdpe]);
baos::PageTableEntry* pd = std::bit_cast<baos::PageTableEntry*>(pdp[pdpe].address * 4096);
for (unsigned pde = 0; pde < 512; ++pde)
{
if (!pd[pde].present) {
continue;
}
std::printf("%d.%d.%d]", pml4e, pdpe, pde);
printPDE(pd[pde]);
}
}
}
}
bool canFitPageTableSpace(MemoryRange range, std::uint64_t& outBase) noexcept
{
if (range.type != MemoryType::USABLE) {
return false;
}
const std::uint64_t possibleBase = std::alignUp(range.pageBase, 512ul); // needs to be 2MB aligned
if (range.numPages - (possibleBase - range.pageBase) >= 512)
{
outBase = possibleBase;
outPage = range.pageBase;
outRange = &range;
return true;
}
}
if (pageAlignment == 1) {
return false; // no need to search further, everything is full!
}
// now just try to find a range that could be used (needs to be split then)
for (MemoryRange& range : gUsableMemoryRanges)
{
const std::uint64_t possibleBase = std::alignUp(range.pageBase, pageAlignment); // needs to be 2MB aligned
if (range.numPages - (possibleBase - range.pageBase) >= numPages)
{
outPage = possibleBase;
outRange = &range;
return true;
}
}
return false;
}
/** Allocates a memory range (sets it as "used", but does not setup paging for it yet. */
bool allocateMemoryRange(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage) noexcept
{
std::uint64_t page = 0;
MemoryRange* range = nullptr;
if (!findFreeMemory(numPages, pageAlignment, page, range))
{
return false;
}
outPage = page;
// if the memory is allocated from the beginning of the range, just shrink the range
if (page == range->pageBase)
{
range->pageBase += numPages;
range->numPages -= numPages;
return true;
}
// if the memory is allocated from the end of the range, again, just shrink it
if (page + numPages == range->pageBase + range->numPages)
{
range->numPages -= numPages;
return true;
}
// otherwise split the range
MemoryRange newRange = {
.pageBase = page + numPages,
.numPages = (range->pageBase + range->numPages) - (page + numPages)
};
range->numPages -= numPages;
addUsableRange(newRange);
return true;
}
/** Allocates the pages and then identity maps them. */
bool allocateIdentityMappedPages(std::size_t numPages, std::size_t pageAlignment, std::uint64_t& outPage) noexcept
{
std::uint64_t page = 0;
if (!allocateMemoryRange(numPages, pageAlignment, page)) {
return false;
}
setupIdentityPaging2M(page);
outPage = page;
return true;
}
}
void setupPaging(std::span<const MemoryRange> memoryRanges) noexcept
bool setupPaging(std::span<const MemoryRange> memoryRanges) noexcept
{
// addUsableRange({
// .pageBase = std::bit_cast<std::uint64_t>(gInitPages.data()) >> 12,
// .numPages = gInitPages.size() / 4096,
// .type = MemoryType::USABLE
// });
std::uint64_t firstKernelPage = std::bit_cast<std::uint64_t>(&gKernelStart) / 4096;
std::uint64_t lastKernelPage = std::bit_cast<std::uint64_t>(&gKernelEnd) / 4096 + 1;
for (MemoryRange range : memoryRanges)
@ -235,37 +255,42 @@ void setupPaging(std::span<const MemoryRange> memoryRanges) noexcept
range.pageBase = lastKernelPage;
}
addUsableRange(range);
}
}
std::uint64_t tableSpaceBase = 0;
if (gPageTableSpace.empty() && canFitPageTableSpace(range, tableSpaceBase))
{
std::uint8_t* tableSpacePtr = std::bit_cast<std::uint8_t*>(tableSpaceBase * 4096);
gPageTableSpace = {tableSpacePtr, 2ul << 30};
setupIdentityPaging2M(tableSpaceBase);
// find space for the initial paging space and map it
std::uint64_t page = 0;
if (!allocateMemoryRange(BIG_PAGE_SIZE, BIG_PAGE_ALIGNMENT, page)) {
return false;
}
// note that contrary to "regular" allocation, the assignment has to happen BEFORE setting up paging
// otherwise there would be no space for the page table
gPageTableSpace = {pageToPointer<std::uint8_t>(page), 2ul << 20};
setupIdentityPaging2M(page);
// the second pointer can be allocated normally
if (!allocateIdentityMappedPages(BIG_PAGE_SIZE, BIG_PAGE_ALIGNMENT, page)) {
return false;
}
}
// start by setting up identity paging for the kernel
// __get_cpuid()
gNextPageTableSpace = {pageToPointer<std::uint8_t>(page), 2ul << 20};
setupIdentityPaging(firstKernelPage, lastKernelPage);
// std::printf("Kernel pages: %ld-%ld\n", firstKernelPage, lastKernelPage);
// setup identity paging for all reserved ranges so we can continue using them
// for (const MemoryRange& range : memoryRanges)
// {
// if (range.type == MemoryType::RESERVED || range.type == MemoryType::MMIO)
// {
// setupIdentityPaging(range.pageBase, range.pageBase + range.numPages);
// }
// }
for (const MemoryRange& range : memoryRanges)
{
if (range.type == MemoryType::RESERVED || range.type == MemoryType::MMIO)
{
setupIdentityPaging(range.pageBase, range.pageBase + range.numPages);
}
}
// cmdDumpPageTable();
std::uint64_t cr3 = std::bit_cast<std::uint64_t>(gPageMapLevel4.data());
__asm__ __volatile__ (
"mov %0, %%cr3"
:
: "a"(cr3)
);
return true;
}
const char* memoryTypeName(MemoryType type) noexcept

View File

@ -231,14 +231,19 @@ void kernel_main()
// done initializing OS stuff, enable interrupts
__asm__ __volatile__("sti");
initGlobals();
// setup the "initial heap", required so we can use std::vector for detecting the memory ranges and stuff
__ba_initInitialHeap();
// init the heap (required for the double buffer)
const std::vector<MemoryRange> ranges = detectMemoryRangesFromEfi(gBootInfo->memoryMap);
setupPaging({ranges.begin(), ranges.end()}); // TODO: move this up when it's done
setupPaging({ranges.begin(), ranges.end()});
// make sure boot info and the frame buffer are accessible
identityMapRegion(gBootInfo, sizeof(*gBootInfo));
identityMapRegion(gBootInfo->displayInfo.frameBufferBase, gBootInfo->displayInfo.frameBufferPitch * gBootInfo->displayInfo.frameBufferHeight);
initGlobals();
// initHeapFromEfiMemoryMap(gBootInfo->memoryMap);
// initialize the framebuffer