Finally implemented object pools.

This commit is contained in:
Patrick 2025-07-25 00:51:32 +02:00
parent d35140d278
commit 3d507ddef7

View File

@ -7,56 +7,317 @@
#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <vector>
#include "../internal/common.hpp"
#include "../debug/assert.hpp"
#include "../util/align.hpp"
namespace mijin
{
template<typename TObject, std::size_t OBJECTS_PER_PAGE = 1024>
class ObjectPool
namespace impl
{
private:
struct GapInfo
{
std::uint32_t objectCount;
std::uint32_t nextGap;
};
static constexpr std::size_t ALIGN = std::max(alignof(TObject), alignof(GapInfo));
struct ObjectPoolGap
{
std::uint16_t nextGapOffset;
std::uint16_t remainingObjects;
struct alignas(ALIGN) ObjectData
{
std::array<std::byte, sizeof(TObject)> bytes;
};
struct Page
{
std::uint32_t freeOffset = 0;
std::array<ObjectData, OBJECTS_PER_PAGE> data;
};
std::vector<std::unique_ptr<Page>> pages;
public:
[[nodiscard]] TObject* allocate(std::size_t count = 1) noexcept;
void free(TObject* object, std::size_t count = 1) noexcept;
ObjectPoolGap* nextGap() noexcept {
return nextGapOffset == 0 ? nullptr : reinterpret_cast<ObjectPoolGap*>(reinterpret_cast<std::byte*>(this) + nextGapOffset);
}
};
template<typename TObject, std::size_t OBJECTS_PER_PAGE>
TObject* ObjectPool<TObject, OBJECTS_PER_PAGE>::allocate(std::size_t count) noexcept
struct ObjectPoolPage
{
MIJIN_ASSERT(count <= OBJECTS_PER_PAGE, "Cannot allocate more than OBJECTS_PER_PAGE elements at once!");
// first try to find a free spot in the existing pages
ObjectPoolGap* firstGap;
ObjectPoolPage* nextPage = nullptr;
std::array<std::byte, OBJECTS_PER_PAGE * MIJIN_STRIDEOF(TObject)> data;
for (std::unique_ptr<Page>& page : pages)
ObjectPoolPage() noexcept
{
std::uint32_t offset = page->freeOffset;
while (offset < OBJECTS_PER_PAGE)
firstGap = reinterpret_cast<ObjectPoolGap*>(data.data());
firstGap->nextGapOffset = 0;
firstGap->remainingObjects = OBJECTS_PER_PAGE;
}
};
}
template<typename TObject, std::size_t OBJECTS_PER_PAGE>
class ObjectPoolIterator
{
public:
using value_type = TObject;
using difference_type = std::ptrdiff_t;
private:
using gap_t = impl::ObjectPoolGap;
using page_t = impl::ObjectPoolPage<TObject, OBJECTS_PER_PAGE>;
page_t* currentPage_ = nullptr;
gap_t* currentGap_ = nullptr;
std::uint16_t currentObject_ = 0;
explicit ObjectPoolIterator(page_t& firstPage) noexcept
{
seekNextOnPage(firstPage);
}
public:
ObjectPoolIterator() noexcept = default;
ObjectPoolIterator(const ObjectPoolIterator&) noexcept = default;
ObjectPoolIterator& operator=(const ObjectPoolIterator&) noexcept = default;
auto operator<=>(const ObjectPoolIterator&) const noexcept = default;
TObject& operator*() const noexcept { return getObject(); }
TObject* operator->() const noexcept { return &getObject(); }
ObjectPoolIterator& operator++() noexcept
{
seekNext();
return *this;
}
ObjectPoolIterator operator++(int) const noexcept
{
ObjectPoolIterator copy(*this);
++(*this);
return copy;
}
private:
TObject& getObject() const noexcept
{
MIJIN_ASSERT(currentGap_ != nullptr, "Attempting to dereference an invalid iterator.");
return *(reinterpret_cast<TObject*>(currentGap_) + currentGap_->remainingObjects + currentObject_);
}
bool seekNextOnPage(page_t& firstPage) noexcept
{
for (page_t* page = &firstPage; page != nullptr; page = page->nextPage)
{
GapInfo& gapInfo = *std::bit_cast<GapInfo*>(&page->data[offset]);
if (page->firstGap != nullptr && seekNextInGap(*page->firstGap))
{
currentPage_ = page;
return true;
}
}
return false;
}
bool seekNextInGap(gap_t& firstGap) noexcept
{
for (gap_t* gap = &firstGap; gap != nullptr; gap = gap->nextGap())
{
if (gap->remainingObjects < OBJECTS_PER_PAGE)
{
currentGap_ = gap;
currentObject_ = 0;
return true;
}
}
return false;
}
void seekNext() noexcept
{
if (++currentObject_ < OBJECTS_PER_PAGE - currentGap_->remainingObjects)
{
return;
}
if (currentGap_->nextGapOffset != 0 && seekNextInGap(*currentGap_->nextGap())) {
return;
}
if (currentPage_->nextPage != nullptr && seekNextOnPage(*currentPage_->nextPage)) {
return;
}
currentPage_ = nullptr;
currentGap_ = nullptr;
currentObject_ = 0;
}
template<typename TObject2, std::size_t OBJECTS_PER_PAGE2, template<typename> typename TAllocator>
friend class ObjectPool;
};
template<typename TObject, std::size_t OBJECTS_PER_PAGE = 1024, template<typename> typename TAllocator = MIJIN_DEFAULT_ALLOCATOR>
class ObjectPool
{
public:
using iterator = ObjectPoolIterator<TObject, OBJECTS_PER_PAGE>;
using const_iterator = iterator;
private:
using gap_t = impl::ObjectPoolGap;
using page_t = impl::ObjectPoolPage<TObject, OBJECTS_PER_PAGE>;
static_assert(sizeof(gap_t) <= sizeof(TObject));
[[no_unique_address]] TAllocator<page_t> allocator_;
page_t* firstPage = nullptr;
public:
explicit ObjectPool(TAllocator<void> allocator = {}) MIJIN_NOEXCEPT_IF((std::is_nothrow_constructible_v<TAllocator<page_t>, TAllocator<void>&&>))
: allocator_(std::move(allocator))
{
}
ObjectPool(const ObjectPool&) = delete;
ObjectPool(ObjectPool&&) = default;
ObjectPool& operator=(const ObjectPool&) = delete;
ObjectPool& operator=(ObjectPool&&) = default;
[[nodiscard]]
TObject* allocate(std::size_t count = 1) noexcept
{
MIJIN_ASSERT(count <= OBJECTS_PER_PAGE, "Cannot allocate that many objects at once.");
// first try to find a free spot in the existing pages
for (page_t* page = firstPage; page != nullptr; page = page->nextPage)
{
if (TObject* result = allocateFromPage(*page, count); result != nullptr) {
return result;
}
}
// nothing found in the existing pages, allocate a new one and return memory from there
page_t* newPage = ::new (allocator_.allocate(1)) page_t;
if (firstPage == nullptr)
{
firstPage = newPage;
}
else
{
page_t* lastPage = firstPage;
for (; lastPage->nextPage != nullptr; lastPage = lastPage->nextPage);
lastPage->nextPage = newPage;
}
return allocateFromPage(*newPage, count);
}
void deallocate(TObject* object, std::size_t count = 1) noexcept
{
std::byte* const objectPtr = reinterpret_cast<std::byte*>(object); // for easier comparison
for (page_t* page = firstPage; page != nullptr; page = page->nextPage)
{
// first find the page it's in
std::byte* pageStart = page->data.data();
std::byte* pageEnd = pageStart + (OBJECTS_PER_PAGE * sizeof(TObject));
if (objectPtr >= pageStart && objectPtr <= pageEnd)
{
// then the corresponding gap
if (page->firstGap == nullptr)
{
// everything is used, create a new gap
gap_t* newGap = reinterpret_cast<gap_t*>(objectPtr);
newGap->remainingObjects = count;
newGap->nextGapOffset = 0;
page->firstGap = newGap;
return;
}
for (gap_t* gap = page->firstGap; gap != nullptr; gap = gap->nextGap())
{
std::byte* gapStart = reinterpret_cast<std::byte*>(gap);
std::byte* gapEnd = gap->nextGapOffset == 0 ? pageEnd : gapStart + gap->nextGapOffset;
if (objectPtr > gapStart && objectPtr < gapEnd)
{
if (objectPtr + (count * sizeof(TObject)) == gapEnd)
{
// deallocating right from the end -> just increase the gap remainingObjects
gap->remainingObjects += count;
mergeGaps(gap);
}
else
{
// deallocating from the middle -> create a new gap
gap_t* newGap = reinterpret_cast<gap_t*>(objectPtr);
newGap->remainingObjects = count;
newGap->nextGapOffset = gap->nextGapOffset == 0 ? 0 : (gapEnd - reinterpret_cast<std::byte*>(newGap));
gap->nextGapOffset -= objectPtr - gapStart;
mergeGaps(newGap);
}
return;
}
}
}
}
}
}
template<typename... TArgs>
[[nodiscard]]
TObject* create(TArgs&&... args) noexcept
{
TObject* result = allocate();
if (result == nullptr) {
return nullptr;
}
return ::new (result) TObject(std::forward<TArgs>(args)...);
}
[[nodiscard]]
TObject* createMultiple(std::size_t count = 1) noexcept
{
TObject* result = allocate(count);
if (result == nullptr) {
return nullptr;
}
return ::new (result) TObject[count];
}
void destroy(TObject* ptr, std::size_t count = 1) noexcept
{
for (std::size_t idx = 0; idx < count; ++idx) {
ptr[idx].~TObject();
}
deallocate(ptr, count);
}
[[nodiscard]]
iterator begin() const noexcept { return firstPage != nullptr ? iterator(*firstPage) : iterator(); }
[[nodiscard]]
iterator end() const noexcept { return {}; }
private:
TObject* allocateFromPage(page_t& page, std::size_t count)
{
gap_t* previousGap = nullptr;
for (gap_t* gap = page.firstGap; gap != nullptr; previousGap = gap, gap = gap->nextGap())
{
if (gap->remainingObjects == count)
{
// exactly the correct size
if (previousGap != nullptr) {
previousGap->nextGapOffset += gap->nextGapOffset;
}
else {
page.firstGap = gap->nextGap();
}
return reinterpret_cast<TObject*>(gap);
}
else if (gap->remainingObjects > count)
{
// still enough space
TObject* memory = reinterpret_cast<TObject*>(gap) + gap->remainingObjects - count;
gap->remainingObjects -= count;
return reinterpret_cast<TObject*>(memory);
}
}
return nullptr;
}
void mergeGaps(gap_t* start) noexcept
{
while (start->nextGapOffset != 0 && start->remainingObjects == start->nextGapOffset * sizeof(TObject))
{
gap_t& next = *start->nextGap();
start->remainingObjects += start->nextGap()->remainingObjects;
if (next.nextGapOffset == 0) {
start->nextGapOffset = 0;
}
else {
start->nextGapOffset += next.nextGapOffset;
}
}
}
};
} // namespace mijin
#endif // !defined(MIJIN_MEMORY_OBJECT_POOL_HPP_INCLUDED)