diff --git a/source/mijin/memory/object_pool.hpp b/source/mijin/memory/object_pool.hpp index 397172e..fcef2c8 100644 --- a/source/mijin/memory/object_pool.hpp +++ b/source/mijin/memory/object_pool.hpp @@ -7,56 +7,317 @@ #include #include #include -#include -#include -#include +#include "../internal/common.hpp" #include "../debug/assert.hpp" +#include "../util/align.hpp" namespace mijin { -template -class ObjectPool +namespace impl { -private: - struct GapInfo - { - std::uint32_t objectCount; - std::uint32_t nextGap; - }; - static constexpr std::size_t ALIGN = std::max(alignof(TObject), alignof(GapInfo)); +struct ObjectPoolGap +{ + std::uint16_t nextGapOffset; + std::uint16_t remainingObjects; - struct alignas(ALIGN) ObjectData - { - std::array bytes; - }; - struct Page - { - std::uint32_t freeOffset = 0; - std::array data; - }; - - std::vector> pages; -public: - [[nodiscard]] TObject* allocate(std::size_t count = 1) noexcept; - void free(TObject* object, std::size_t count = 1) noexcept; + ObjectPoolGap* nextGap() noexcept { + return nextGapOffset == 0 ? nullptr : reinterpret_cast(reinterpret_cast(this) + nextGapOffset); + } }; template -TObject* ObjectPool::allocate(std::size_t count) noexcept +struct ObjectPoolPage { - MIJIN_ASSERT(count <= OBJECTS_PER_PAGE, "Cannot allocate more than OBJECTS_PER_PAGE elements at once!"); - // first try to find a free spot in the existing pages + ObjectPoolGap* firstGap; + ObjectPoolPage* nextPage = nullptr; + std::array data; - for (std::unique_ptr& page : pages) + ObjectPoolPage() noexcept { - std::uint32_t offset = page->freeOffset; - while (offset < OBJECTS_PER_PAGE) + firstGap = reinterpret_cast(data.data()); + firstGap->nextGapOffset = 0; + firstGap->remainingObjects = OBJECTS_PER_PAGE; + } +}; +} + +template +class ObjectPoolIterator +{ +public: + using value_type = TObject; + using difference_type = std::ptrdiff_t; +private: + using gap_t = impl::ObjectPoolGap; + using page_t = impl::ObjectPoolPage; + + page_t* currentPage_ = nullptr; + gap_t* currentGap_ = nullptr; + std::uint16_t currentObject_ = 0; + + explicit ObjectPoolIterator(page_t& firstPage) noexcept + { + seekNextOnPage(firstPage); + } +public: + ObjectPoolIterator() noexcept = default; + ObjectPoolIterator(const ObjectPoolIterator&) noexcept = default; + + ObjectPoolIterator& operator=(const ObjectPoolIterator&) noexcept = default; + + auto operator<=>(const ObjectPoolIterator&) const noexcept = default; + + TObject& operator*() const noexcept { return getObject(); } + TObject* operator->() const noexcept { return &getObject(); } + + ObjectPoolIterator& operator++() noexcept + { + seekNext(); + return *this; + } + + ObjectPoolIterator operator++(int) const noexcept + { + ObjectPoolIterator copy(*this); + ++(*this); + return copy; + } +private: + TObject& getObject() const noexcept + { + MIJIN_ASSERT(currentGap_ != nullptr, "Attempting to dereference an invalid iterator."); + return *(reinterpret_cast(currentGap_) + currentGap_->remainingObjects + currentObject_); + } + + bool seekNextOnPage(page_t& firstPage) noexcept + { + for (page_t* page = &firstPage; page != nullptr; page = page->nextPage) { - GapInfo& gapInfo = *std::bit_cast(&page->data[offset]); - + if (page->firstGap != nullptr && seekNextInGap(*page->firstGap)) + { + currentPage_ = page; + return true; + } + } + return false; + } + + bool seekNextInGap(gap_t& firstGap) noexcept + { + for (gap_t* gap = &firstGap; gap != nullptr; gap = gap->nextGap()) + { + if (gap->remainingObjects < OBJECTS_PER_PAGE) + { + currentGap_ = gap; + currentObject_ = 0; + return true; + } + } + return false; + } + + void seekNext() noexcept + { + if (++currentObject_ < OBJECTS_PER_PAGE - currentGap_->remainingObjects) + { + return; + } + if (currentGap_->nextGapOffset != 0 && seekNextInGap(*currentGap_->nextGap())) { + return; + } + if (currentPage_->nextPage != nullptr && seekNextOnPage(*currentPage_->nextPage)) { + return; + } + currentPage_ = nullptr; + currentGap_ = nullptr; + currentObject_ = 0; + } + + template typename TAllocator> + friend class ObjectPool; +}; + +template typename TAllocator = MIJIN_DEFAULT_ALLOCATOR> +class ObjectPool +{ +public: + using iterator = ObjectPoolIterator; + using const_iterator = iterator; +private: + using gap_t = impl::ObjectPoolGap; + using page_t = impl::ObjectPoolPage; + + static_assert(sizeof(gap_t) <= sizeof(TObject)); + + [[no_unique_address]] TAllocator allocator_; + page_t* firstPage = nullptr; +public: + explicit ObjectPool(TAllocator allocator = {}) MIJIN_NOEXCEPT_IF((std::is_nothrow_constructible_v, TAllocator&&>)) + : allocator_(std::move(allocator)) + { + } + ObjectPool(const ObjectPool&) = delete; + ObjectPool(ObjectPool&&) = default; + + ObjectPool& operator=(const ObjectPool&) = delete; + ObjectPool& operator=(ObjectPool&&) = default; + + [[nodiscard]] + TObject* allocate(std::size_t count = 1) noexcept + { + MIJIN_ASSERT(count <= OBJECTS_PER_PAGE, "Cannot allocate that many objects at once."); + + // first try to find a free spot in the existing pages + for (page_t* page = firstPage; page != nullptr; page = page->nextPage) + { + if (TObject* result = allocateFromPage(*page, count); result != nullptr) { + return result; + } + } + + // nothing found in the existing pages, allocate a new one and return memory from there + page_t* newPage = ::new (allocator_.allocate(1)) page_t; + + if (firstPage == nullptr) + { + firstPage = newPage; + } + else + { + page_t* lastPage = firstPage; + for (; lastPage->nextPage != nullptr; lastPage = lastPage->nextPage); + lastPage->nextPage = newPage; + } + return allocateFromPage(*newPage, count); + } + + void deallocate(TObject* object, std::size_t count = 1) noexcept + { + std::byte* const objectPtr = reinterpret_cast(object); // for easier comparison + for (page_t* page = firstPage; page != nullptr; page = page->nextPage) + { + // first find the page it's in + std::byte* pageStart = page->data.data(); + std::byte* pageEnd = pageStart + (OBJECTS_PER_PAGE * sizeof(TObject)); + + if (objectPtr >= pageStart && objectPtr <= pageEnd) + { + // then the corresponding gap + if (page->firstGap == nullptr) + { + // everything is used, create a new gap + gap_t* newGap = reinterpret_cast(objectPtr); + newGap->remainingObjects = count; + newGap->nextGapOffset = 0; + page->firstGap = newGap; + return; + } + + for (gap_t* gap = page->firstGap; gap != nullptr; gap = gap->nextGap()) + { + std::byte* gapStart = reinterpret_cast(gap); + std::byte* gapEnd = gap->nextGapOffset == 0 ? pageEnd : gapStart + gap->nextGapOffset; + if (objectPtr > gapStart && objectPtr < gapEnd) + { + if (objectPtr + (count * sizeof(TObject)) == gapEnd) + { + // deallocating right from the end -> just increase the gap remainingObjects + gap->remainingObjects += count; + mergeGaps(gap); + } + else + { + // deallocating from the middle -> create a new gap + gap_t* newGap = reinterpret_cast(objectPtr); + newGap->remainingObjects = count; + newGap->nextGapOffset = gap->nextGapOffset == 0 ? 0 : (gapEnd - reinterpret_cast(newGap)); + gap->nextGapOffset -= objectPtr - gapStart; + mergeGaps(newGap); + } + return; + } + } + } } } -} + + template + [[nodiscard]] + TObject* create(TArgs&&... args) noexcept + { + TObject* result = allocate(); + if (result == nullptr) { + return nullptr; + } + return ::new (result) TObject(std::forward(args)...); + } + + [[nodiscard]] + TObject* createMultiple(std::size_t count = 1) noexcept + { + TObject* result = allocate(count); + if (result == nullptr) { + return nullptr; + } + return ::new (result) TObject[count]; + } + + void destroy(TObject* ptr, std::size_t count = 1) noexcept + { + for (std::size_t idx = 0; idx < count; ++idx) { + ptr[idx].~TObject(); + } + deallocate(ptr, count); + } + + [[nodiscard]] + iterator begin() const noexcept { return firstPage != nullptr ? iterator(*firstPage) : iterator(); } + + [[nodiscard]] + iterator end() const noexcept { return {}; } +private: + TObject* allocateFromPage(page_t& page, std::size_t count) + { + gap_t* previousGap = nullptr; + for (gap_t* gap = page.firstGap; gap != nullptr; previousGap = gap, gap = gap->nextGap()) + { + if (gap->remainingObjects == count) + { + // exactly the correct size + if (previousGap != nullptr) { + previousGap->nextGapOffset += gap->nextGapOffset; + } + else { + page.firstGap = gap->nextGap(); + } + return reinterpret_cast(gap); + } + else if (gap->remainingObjects > count) + { + // still enough space + TObject* memory = reinterpret_cast(gap) + gap->remainingObjects - count; + gap->remainingObjects -= count; + return reinterpret_cast(memory); + } + } + return nullptr; + } + + void mergeGaps(gap_t* start) noexcept + { + while (start->nextGapOffset != 0 && start->remainingObjects == start->nextGapOffset * sizeof(TObject)) + { + gap_t& next = *start->nextGap(); + start->remainingObjects += start->nextGap()->remainingObjects; + if (next.nextGapOffset == 0) { + start->nextGapOffset = 0; + } + else { + start->nextGapOffset += next.nextGapOffset; + } + } + } +}; } // namespace mijin #endif // !defined(MIJIN_MEMORY_OBJECT_POOL_HPP_INCLUDED)