summaryrefslogtreecommitdiff
path: root/asl
diff options
context:
space:
mode:
authorSteven Le Rouzic <steven.lerouzic@gmail.com>2025-05-26 00:47:54 +0200
committerSteven Le Rouzic <steven.lerouzic@gmail.com>2025-05-26 00:48:06 +0200
commita1db1cd9e22e77041d5f1360f1d1ccdc52b86306 (patch)
treec1cc6dc9c17885a0789028f7a55c7126f33beee7 /asl
parent54b95b16629f0cd4bc30e6899e00019b3ab94012 (diff)
Implement chunked_buffer
Diffstat (limited to 'asl')
-rw-r--r--asl/base/bit_tests.cpp4
-rw-r--r--asl/base/meta.hpp20
-rw-r--r--asl/base/numeric.hpp17
-rw-r--r--asl/base/numeric_tests.cpp52
-rw-r--r--asl/containers/BUILD.bazel17
-rw-r--r--asl/containers/buffer.hpp87
-rw-r--r--asl/containers/buffer_tests.cpp69
-rw-r--r--asl/containers/chunked_buffer.hpp399
-rw-r--r--asl/containers/chunked_buffer_tests.cpp335
-rw-r--r--asl/memory/allocator.hpp15
-rw-r--r--asl/tests/BUILD.bazel2
-rw-r--r--asl/tests/counting_allocator.hpp48
-rw-r--r--asl/types/array.hpp8
-rw-r--r--asl/types/maybe_uninit_tests.cpp4
14 files changed, 998 insertions, 79 deletions
diff --git a/asl/base/bit_tests.cpp b/asl/base/bit_tests.cpp
index fa05fab..972bc72 100644
--- a/asl/base/bit_tests.cpp
+++ b/asl/base/bit_tests.cpp
@@ -39,6 +39,10 @@ ASL_TEST(popcount) // NOLINT(*-cognitive-complexity)
ASL_TEST(countr_zero)
{
ASL_TEST_EXPECT(asl::countr_zero(uint8_t{0}) == 8);
+ ASL_TEST_EXPECT(asl::countr_zero(uint8_t{1}) == 0);
+ ASL_TEST_EXPECT(asl::countr_zero(uint8_t{2}) == 1);
+ ASL_TEST_EXPECT(asl::countr_zero(uint8_t{4}) == 2);
+ ASL_TEST_EXPECT(asl::countr_zero(uint8_t{8}) == 3);
ASL_TEST_EXPECT(asl::countr_zero(uint8_t{255}) == 0);
ASL_TEST_EXPECT(asl::countr_zero(uint8_t{0b00011100}) == 2);
ASL_TEST_EXPECT(asl::countr_zero(uint8_t{0b10101010}) == 1);
diff --git a/asl/base/meta.hpp b/asl/base/meta.hpp
index e050e69..b17d761 100644
--- a/asl/base/meta.hpp
+++ b/asl/base/meta.hpp
@@ -249,28 +249,32 @@ template<> struct _integer_traits<uint8_t>
{
static constexpr bool kSigned = false;
static constexpr bool kUnsigned = true;
- using as_signed = int8_t;
+ using as_signed = int8_t;
+ using as_unsigned = uint8_t;
};
template<> struct _integer_traits<uint16_t>
{
static constexpr bool kSigned = false;
static constexpr bool kUnsigned = true;
- using as_signed = int16_t;
+ using as_signed = int16_t;
+ using as_unsigned = uint16_t;
};
template<> struct _integer_traits<uint32_t>
{
static constexpr bool kSigned = false;
static constexpr bool kUnsigned = true;
- using as_signed = int32_t;
+ using as_signed = int32_t;
+ using as_unsigned = uint32_t;
};
template<> struct _integer_traits<uint64_t>
{
static constexpr bool kSigned = false;
static constexpr bool kUnsigned = true;
- using as_signed = int64_t;
+ using as_signed = int64_t;
+ using as_unsigned = uint64_t;
};
template<> struct _integer_traits<int8_t>
@@ -278,6 +282,7 @@ template<> struct _integer_traits<int8_t>
static constexpr bool kSigned = true;
static constexpr bool kUnsigned = false;
using as_unsigned = uint8_t;
+ using as_signed = int8_t;
};
template<> struct _integer_traits<int16_t>
@@ -285,6 +290,7 @@ template<> struct _integer_traits<int16_t>
static constexpr bool kSigned = true;
static constexpr bool kUnsigned = false;
using as_unsigned = uint16_t;
+ using as_signed = int16_t;
};
template<> struct _integer_traits<int32_t>
@@ -292,6 +298,7 @@ template<> struct _integer_traits<int32_t>
static constexpr bool kSigned = true;
static constexpr bool kUnsigned = false;
using as_unsigned = uint32_t;
+ using as_signed = int32_t;
};
template<> struct _integer_traits<int64_t>
@@ -299,6 +306,7 @@ template<> struct _integer_traits<int64_t>
static constexpr bool kSigned = true;
static constexpr bool kUnsigned = false;
using as_unsigned = uint64_t;
+ using as_signed = int64_t;
};
template<typename T> concept is_signed_integer = _integer_traits<T>::kSigned;
@@ -306,8 +314,8 @@ template<typename T> concept is_unsigned_integer = _integer_traits<T>::kUnsigned
template<typename T> concept is_integer = is_signed_integer<T> || is_unsigned_integer<T>;
-template<is_signed_integer T> using as_unsigned_integer = _integer_traits<T>::as_unsigned;
-template<is_unsigned_integer T> using as_signed_integer = _integer_traits<T>::as_signed;
+template<is_integer T> using as_unsigned_integer = _integer_traits<T>::as_unsigned;
+template<is_integer T> using as_signed_integer = _integer_traits<T>::as_signed;
template<typename T> concept is_enum = __is_enum(T);
diff --git a/asl/base/numeric.hpp b/asl/base/numeric.hpp
index bbd229d..8d3b8ef 100644
--- a/asl/base/numeric.hpp
+++ b/asl/base/numeric.hpp
@@ -7,6 +7,7 @@
#include "asl/base/integers.hpp"
#include "asl/base/bit.hpp"
#include "asl/base/meta.hpp"
+#include "asl/base/assert.hpp"
namespace asl
{
@@ -14,10 +15,24 @@ namespace asl
template<is_integer T>
constexpr bool is_pow2(T x)
{
- using unsigned_type = select_t<is_unsigned_integer<T>, T, as_unsigned_integer<T>>;
+ using unsigned_type = as_unsigned_integer<T>;
return x > 0 && has_single_bit(static_cast<unsigned_type>(x));
}
+template<is_integer T>
+constexpr T round_down_pow2(T x, T div)
+{
+ ASL_ASSERT(is_pow2(div));
+ return x & (-div);
+}
+
+template<is_integer T>
+constexpr T round_up_pow2(T x, T div)
+{
+ ASL_ASSERT(is_pow2(div));
+ return (x + (div - 1)) & (-div);
+}
+
template<typename T>
concept is_numeric = is_integer<T> || is_floating_point<T>;
diff --git a/asl/base/numeric_tests.cpp b/asl/base/numeric_tests.cpp
index cfbc1ac..afcc12a 100644
--- a/asl/base/numeric_tests.cpp
+++ b/asl/base/numeric_tests.cpp
@@ -13,4 +13,56 @@ ASL_TEST(is_pow2)
ASL_TEST_EXPECT(!asl::is_pow2(6));
ASL_TEST_EXPECT(!asl::is_pow2(1978));
ASL_TEST_EXPECT(!asl::is_pow2(0));
+ ASL_TEST_EXPECT(asl::is_pow2(4U));
+ ASL_TEST_EXPECT(asl::is_pow2(uint64_t{65536}));
+}
+
+ASL_TEST(round_down_pow2) // NOLINT
+{
+ ASL_TEST_EXPECT(asl::round_down_pow2(0, 1) == 0);
+ ASL_TEST_EXPECT(asl::round_down_pow2(1, 1) == 1);
+ ASL_TEST_EXPECT(asl::round_down_pow2(2, 1) == 2);
+ ASL_TEST_EXPECT(asl::round_down_pow2(3, 1) == 3);
+ ASL_TEST_EXPECT(asl::round_down_pow2(-1, 1) == -1);
+ ASL_TEST_EXPECT(asl::round_down_pow2(-2, 1) == -2);
+ ASL_TEST_EXPECT(asl::round_down_pow2(-3, 1) == -3);
+
+ ASL_TEST_EXPECT(asl::round_down_pow2(0U, 1U) == 0U);
+ ASL_TEST_EXPECT(asl::round_down_pow2(1U, 1U) == 1U);
+ ASL_TEST_EXPECT(asl::round_down_pow2(2U, 1U) == 2U);
+ ASL_TEST_EXPECT(asl::round_down_pow2(3U, 1U) == 3U);
+
+ ASL_TEST_EXPECT(asl::round_down_pow2(0, 16) == 0);
+ ASL_TEST_EXPECT(asl::round_down_pow2(1, 16) == 0);
+ ASL_TEST_EXPECT(asl::round_down_pow2(8, 16) == 0);
+ ASL_TEST_EXPECT(asl::round_down_pow2(15, 16) == 0);
+ ASL_TEST_EXPECT(asl::round_down_pow2(16, 16) == 16);
+ ASL_TEST_EXPECT(asl::round_down_pow2(17, 16) == 16);
+ ASL_TEST_EXPECT(asl::round_down_pow2(255, 16) == 240);
+ ASL_TEST_EXPECT(asl::round_down_pow2(-255, 16) == -256);
+}
+
+ASL_TEST(round_up_pow2) // NOLINT
+{
+ ASL_TEST_EXPECT(asl::round_up_pow2(0, 1) == 0);
+ ASL_TEST_EXPECT(asl::round_up_pow2(1, 1) == 1);
+ ASL_TEST_EXPECT(asl::round_up_pow2(2, 1) == 2);
+ ASL_TEST_EXPECT(asl::round_up_pow2(3, 1) == 3);
+ ASL_TEST_EXPECT(asl::round_up_pow2(-1, 1) == -1);
+ ASL_TEST_EXPECT(asl::round_up_pow2(-2, 1) == -2);
+ ASL_TEST_EXPECT(asl::round_up_pow2(-3, 1) == -3);
+
+ ASL_TEST_EXPECT(asl::round_up_pow2(0U, 1U) == 0U);
+ ASL_TEST_EXPECT(asl::round_up_pow2(1U, 1U) == 1U);
+ ASL_TEST_EXPECT(asl::round_up_pow2(2U, 1U) == 2U);
+ ASL_TEST_EXPECT(asl::round_up_pow2(3U, 1U) == 3U);
+
+ ASL_TEST_EXPECT(asl::round_up_pow2(0, 16) == 0);
+ ASL_TEST_EXPECT(asl::round_up_pow2(1, 16) == 16);
+ ASL_TEST_EXPECT(asl::round_up_pow2(8, 16) == 16);
+ ASL_TEST_EXPECT(asl::round_up_pow2(15, 16) == 16);
+ ASL_TEST_EXPECT(asl::round_up_pow2(16, 16) == 16);
+ ASL_TEST_EXPECT(asl::round_up_pow2(17, 16) == 32);
+ ASL_TEST_EXPECT(asl::round_up_pow2(255, 16) == 256);
+ ASL_TEST_EXPECT(asl::round_up_pow2(-255, 16) == -240);
}
diff --git a/asl/containers/BUILD.bazel b/asl/containers/BUILD.bazel
index ba9c6f1..d27aedc 100644
--- a/asl/containers/BUILD.bazel
+++ b/asl/containers/BUILD.bazel
@@ -22,6 +22,22 @@ cc_library(
)
cc_library(
+ name = "chunked_buffer",
+ hdrs = [
+ "chunked_buffer.hpp",
+ ],
+ deps = [
+ "//asl/memory",
+ "//asl/memory:allocator",
+ "//asl/base",
+ "//asl/containers:buffer",
+ "//asl/types:array",
+ "//asl/types:maybe_uninit",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
name = "hash_set",
hdrs = [
"hash_set.hpp",
@@ -75,6 +91,7 @@ cc_library(
],
) for name in [
"buffer",
+ "chunked_buffer",
"hash_map",
"hash_set",
"intrusive_list",
diff --git a/asl/containers/buffer.hpp b/asl/containers/buffer.hpp
index f554218..43339ca 100644
--- a/asl/containers/buffer.hpp
+++ b/asl/containers/buffer.hpp
@@ -91,7 +91,7 @@ private:
return is_on_heap(load_size_encoded());
}
- constexpr T* push_uninit()
+ constexpr void* push_uninit()
{
const isize_t sz = size();
resize_uninit_inner(sz + 1);
@@ -100,10 +100,13 @@ private:
constexpr void resize_uninit_inner(isize_t new_size)
{
- const isize_t old_size = size();
- if (!trivially_destructible<T> && new_size < old_size)
+ if constexpr (!trivially_destructible<T>)
{
- destroy_n(data() + new_size, old_size - new_size);
+ const isize_t old_size = size();
+ if (new_size < old_size)
+ {
+ destroy_n(data() + new_size, old_size - new_size);
+ }
}
reserve_capacity(new_size);
set_size(new_size);
@@ -136,18 +139,26 @@ private:
{
if (other.is_on_heap())
{
+ // If the other in on heap, destroy here and adopt their
+ // data. We'll soon adopt the allocator as well.
destroy();
m_data = other.m_data;
m_capacity = other.m_capacity;
store_size_encoded(other.load_size_encoded());
}
- else if (trivially_move_constructible<T>)
- {
- destroy();
- asl::memcpy(this, &other, kInlineRegionSize);
- }
else if (!assign || m_allocator == other.m_allocator)
{
+ // If allocators are compatible, we can move other's inline
+ // data here, even if it's on heap here, because that
+ // memory can be freed by other's allocator, which we will
+ // soon adopt.
+ //
+ // @Note There is an argument to be made for not doing this and
+ // instead destroying our data here and moving into inline
+ // storage, which frees one allocation. But also this avoids
+ // freeing. So I don't know.
+ // Maybe If this storage is much much larger than the inline
+ // data, it's worth freeing.
const isize_t other_n = other.size();
const isize_t this_n = size();
resize_uninit_inner(other_n);
@@ -163,11 +174,28 @@ private:
}
else
{
+ // Otherwise, if we have to free, because the allocators are
+ // not compatible, well we free and move into our inline
+ // storage region.
+ // There is an optimization here when the data is trivially
+ // move constructible (which implies trivially destructible),
+ // we copy the whole inline region, which includes the size.
+ // Very magic.
+
destroy();
- const isize_t n = other.size();
- ASL_ASSERT(n <= kInlineCapacity);
- relocate_uninit_n(data(), other.data(), n);
- set_size_inline(n);
+ if constexpr (trivially_move_constructible<T>)
+ {
+ ASL_ASSERT(!is_on_heap());
+ asl::memcpy(this, &other, kInlineRegionSize);
+ }
+ else
+ {
+ const isize_t n = other.size();
+ ASL_ASSERT(n <= kInlineCapacity);
+ resize_uninit_inner(n);
+ ASL_ASSERT(!is_on_heap());
+ relocate_uninit_n(data(), other.data(), n);
+ }
}
other.set_size_inline(0);
@@ -270,6 +298,14 @@ public:
destroy();
}
+ constexpr Allocator allocator_copy() const
+ requires copy_constructible<Allocator>
+ {
+ return m_allocator;
+ }
+
+ constexpr Allocator& allocator() { return m_allocator; }
+
[[nodiscard]] constexpr isize_t size() const
{
return decode_size(load_size_encoded());
@@ -315,7 +351,6 @@ public:
void reserve_capacity(isize_t new_capacity)
{
ASL_ASSERT(new_capacity >= 0);
- ASL_ASSERT_RELEASE(new_capacity <= 0x4000'0000'0000'0000);
if (new_capacity <= capacity()) { return; }
ASL_ASSERT(new_capacity > kInlineCapacity);
@@ -352,17 +387,16 @@ public:
}
constexpr void resize_uninit(isize_t new_size)
- requires trivially_default_constructible<T> && trivially_destructible<T>
+ requires trivially_default_constructible<T>
{
- reserve_capacity(new_size);
- set_size(new_size);
+ resize_uninit_inner(new_size);
}
constexpr void resize_zero(isize_t new_size)
- requires trivially_default_constructible<T> && trivially_destructible<T>
+ requires trivially_default_constructible<T>
{
const isize_t old_size = size();
- resize_uninit(new_size);
+ resize_uninit_inner(new_size);
if (new_size > old_size)
{
@@ -373,7 +407,14 @@ public:
void resize(isize_t new_size)
requires default_constructible<T>
{
- resize_inner(new_size);
+ if constexpr (trivially_default_constructible<T>)
+ {
+ resize_zero(new_size);
+ }
+ else
+ {
+ resize_inner(new_size);
+ }
}
void resize(isize_t new_size, const T& value)
@@ -384,14 +425,14 @@ public:
constexpr T& push(auto&&... args)
requires constructible_from<T, decltype(args)&&...>
{
- T* uninit = push_uninit();
+ void* uninit = push_uninit();
T* init = construct_at<T>(uninit, std::forward<decltype(args)>(args)...);
return *init;
}
auto data(this auto&& self)
{
- using return_type = copy_const_t<un_ref_t<decltype(self)>, T>*;
+ using return_type = as_ptr_t<copy_const_t<un_ref_t<decltype(self)>, T>>;
// NOLINTNEXTLINE(*-reinterpret-cast)
auto&& buffer = reinterpret_cast<copy_cref_t<decltype(self), class buffer>>(self);
if constexpr (kInlineCapacity == 0)
@@ -437,7 +478,7 @@ public:
constexpr auto&& operator[](this auto&& self, isize_t i)
{
- ASL_ASSERT(i >= 0 && i <= self.size());
+ ASL_ASSERT(i >= 0 && i < self.size());
return std::forward_like<decltype(self)>(std::forward<decltype(self)>(self).data()[i]);
}
diff --git a/asl/containers/buffer_tests.cpp b/asl/containers/buffer_tests.cpp
index eb95ffe..dbb8fa9 100644
--- a/asl/containers/buffer_tests.cpp
+++ b/asl/containers/buffer_tests.cpp
@@ -6,6 +6,7 @@
#include "asl/testing/testing.hpp"
#include "asl/tests/types.hpp"
+#include "asl/tests/counting_allocator.hpp"
struct Big
{
@@ -14,6 +15,7 @@ struct Big
static_assert(asl::buffer<int32_t>::kInlineCapacity == 5);
static_assert(asl::buffer<int64_t>::kInlineCapacity == 2);
+static_assert(asl::buffer<void*>::kInlineCapacity == 2);
static_assert(asl::buffer<char>::kInlineCapacity == 23);
static_assert(asl::buffer<Big>::kInlineCapacity == 0);
@@ -30,32 +32,6 @@ ASL_TEST(default_size)
ASL_TEST_EXPECT(b2.data() == nullptr);
}
-struct CounterAllocator
-{
- isize_t* count;
-
- [[nodiscard]]
- void* alloc(const asl::layout& layout) const
- {
- *count += 1;
- return asl::GlobalHeap::alloc(layout);
- }
-
- void* realloc(void* ptr, const asl::layout& old, const asl::layout& new_layout) const
- {
- *count += 1;
- return asl::GlobalHeap::realloc(ptr, old, new_layout);
- }
-
- static void dealloc(void* ptr, const asl::layout& layout)
- {
- asl::GlobalHeap::dealloc(ptr, layout);
- }
-
- constexpr bool operator==(const CounterAllocator&) const { return true; }
-};
-static_assert(asl::allocator<CounterAllocator>);
-
struct IncompatibleAllocator
{
static void* alloc(const asl::layout& layout)
@@ -80,31 +56,31 @@ static_assert(asl::allocator<IncompatibleAllocator>);
// NOLINTNEXTLINE(*-complexity)
ASL_TEST(reserve_capacity)
{
- isize_t count = 0;
- asl::buffer<int32_t, CounterAllocator> b(CounterAllocator{&count});
+ CountingAllocator::Stats stats;
+ asl::buffer<int32_t, CountingAllocator> b(CountingAllocator{&stats});
ASL_TEST_EXPECT(b.size() == 0);
ASL_TEST_EXPECT(b.capacity() == 5);
- ASL_TEST_EXPECT(count == 0);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 0);
b.reserve_capacity(4);
ASL_TEST_EXPECT(b.size() == 0);
ASL_TEST_EXPECT(b.capacity() == 5);
- ASL_TEST_EXPECT(count == 0);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 0);
b.reserve_capacity(12);
ASL_TEST_EXPECT(b.size() == 0);
ASL_TEST_EXPECT(b.capacity() >= 12);
- ASL_TEST_EXPECT(count == 1);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 1);
b.reserve_capacity(13);
ASL_TEST_EXPECT(b.size() == 0);
ASL_TEST_EXPECT(b.capacity() >= 13);
- ASL_TEST_EXPECT(count == 1);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 1);
b.reserve_capacity(130);
ASL_TEST_EXPECT(b.size() == 0);
ASL_TEST_EXPECT(b.capacity() >= 130);
- ASL_TEST_EXPECT(count == 2);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 2);
}
// NOLINTNEXTLINE(*-complexity)
@@ -399,21 +375,21 @@ ASL_TEST(move_assign_from_heap)
ASL_TEST(move_assign_trivial_heap_to_inline)
{
- isize_t alloc_count = 0;
- asl::buffer<int64_t, CounterAllocator> buf{CounterAllocator{&alloc_count}};
- asl::buffer<int64_t, CounterAllocator> buf2{CounterAllocator{&alloc_count}};
+ CountingAllocator::Stats stats;
+ asl::buffer<int64_t, CountingAllocator> buf{CountingAllocator{&stats}};
+ asl::buffer<int64_t, CountingAllocator> buf2{CountingAllocator{&stats}};
buf.push(1);
buf.push(2);
- ASL_TEST_EXPECT(alloc_count == 0);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 0);
buf2.push(3);
buf2.push(4);
buf2.push(5);
- ASL_TEST_EXPECT(alloc_count == 1);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 1);
buf = std::move(buf2);
- ASL_TEST_EXPECT(alloc_count == 1);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 1);
ASL_TEST_EXPECT(buf.size() == 3);
ASL_TEST_EXPECT(buf[0] == 3);
@@ -423,21 +399,21 @@ ASL_TEST(move_assign_trivial_heap_to_inline)
ASL_TEST(move_assign_trivial_inline_to_heap)
{
- isize_t alloc_count = 0;
- asl::buffer<int64_t, CounterAllocator> buf{CounterAllocator{&alloc_count}};
- asl::buffer<int64_t, CounterAllocator> buf2{CounterAllocator{&alloc_count}};
+ CountingAllocator::Stats stats;
+ asl::buffer<int64_t, CountingAllocator> buf{CountingAllocator{&stats}};
+ asl::buffer<int64_t, CountingAllocator> buf2{CountingAllocator{&stats}};
buf.push(1);
buf.push(2);
- ASL_TEST_EXPECT(alloc_count == 0);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 0);
buf2.push(3);
buf2.push(4);
buf2.push(5);
- ASL_TEST_EXPECT(alloc_count == 1);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 1);
buf2 = std::move(buf);
- ASL_TEST_EXPECT(alloc_count == 1);
+ ASL_TEST_EXPECT(stats.any_alloc_count() == 1);
ASL_TEST_EXPECT(buf2.size() == 2);
ASL_TEST_EXPECT(buf2[0] == 1);
@@ -635,3 +611,6 @@ ASL_TEST(resize_zero)
}
}
+static_assert(asl::same_as<decltype(asl::declval<asl::buffer<int>>().data()), int*>);
+static_assert(asl::same_as<decltype(asl::declval<const asl::buffer<int>>().data()), const int*>);
+
diff --git a/asl/containers/chunked_buffer.hpp b/asl/containers/chunked_buffer.hpp
new file mode 100644
index 0000000..3af7aa2
--- /dev/null
+++ b/asl/containers/chunked_buffer.hpp
@@ -0,0 +1,399 @@
+// Copyright 2025 Steven Le Rouzic
+//
+// SPDX-License-Identifier: BSD-3-Clause
+
+#pragma once
+
+#include "asl/base/utility.hpp"
+#include "asl/base/assert.hpp"
+#include "asl/base/numeric.hpp"
+#include "asl/containers/buffer.hpp"
+#include "asl/memory/allocator.hpp"
+#include "asl/types/array.hpp"
+#include "asl/types/maybe_uninit.hpp"
+
+namespace asl
+{
+
+template<
+ is_object T,
+ isize_t kChunkSize,
+ allocator Allocator = DefaultAllocator>
+class chunked_buffer
+{
+ static_assert(kChunkSize > 0 && is_pow2(kChunkSize));
+
+ using Chunk = array<maybe_uninit<T>, kChunkSize>;
+
+ static constexpr isize_t chunk_index(isize_t i)
+ {
+ static constexpr int kChunkSizeLog2 = countr_zero(uint64_t{kChunkSize});
+ return i >> kChunkSizeLog2;
+ }
+
+ static constexpr isize_t index_in_chunk(isize_t i)
+ {
+ static constexpr isize_t kMask = kChunkSize - 1;
+ return i & kMask;
+ }
+
+ struct PerChunkIterator
+ {
+ isize_t from_chunk;
+ isize_t to_chunk;
+
+ isize_t from_index_in_chunk;
+ isize_t to_index_in_chunk;
+
+ [[nodiscard]] constexpr bool has_more() const
+ {
+ return from_chunk <= to_chunk;
+ }
+
+ void advance()
+ {
+ from_chunk += 1;
+ from_index_in_chunk = 0;
+ }
+
+ [[nodiscard]] constexpr isize_t chunk() const { return from_chunk; }
+
+ span<maybe_uninit<T>> make_span(Chunk& chunk) const
+ {
+ isize_t from = from_index_in_chunk;
+ isize_t to = (from_chunk == to_chunk) ? to_index_in_chunk : kChunkSize - 1;
+ return chunk.as_span().subspan(from, to - from + 1);
+ }
+ };
+
+ PerChunkIterator make_index_iterator(isize_t from, isize_t to)
+ {
+ return PerChunkIterator {
+ chunk_index(from), chunk_index(to),
+ index_in_chunk(from), index_in_chunk(to)
+ };
+ }
+
+ buffer<Chunk*, Allocator> m_chunks;
+ isize_t m_size{};
+
+ void resize_uninit_inner(isize_t new_size)
+ {
+ ASL_ASSERT(new_size >= 0);
+
+ if constexpr (!trivially_destructible<T>)
+ {
+ const isize_t old_size = size();
+ if (new_size < old_size)
+ {
+ for (PerChunkIterator it = make_index_iterator(new_size, old_size - 1);
+ it.has_more();
+ it.advance())
+ {
+ auto span = it.make_span(*m_chunks[it.chunk()]);
+ for (auto& el: span)
+ {
+ el.destroy_unsafe();
+ }
+ }
+ }
+ }
+
+ reserve_capacity(new_size);
+ m_size = new_size;
+ }
+
+ template<typename... Args>
+ void resize_construct(isize_t new_size, Args&&... args)
+ requires constructible_from<T, Args&&...>
+ {
+ const isize_t old_size = m_size;
+ resize_uninit_inner(new_size);
+
+ if (new_size > old_size)
+ {
+ for (PerChunkIterator it = make_index_iterator(old_size, new_size - 1);
+ it.has_more();
+ it.advance())
+ {
+ auto span = it.make_span(*m_chunks[it.chunk()]);
+ for (auto& uninit: span)
+ {
+ uninit.construct_unsafe(std::forward<Args>(args)...);
+ }
+ }
+ }
+ }
+
+ void copy_from(const chunked_buffer& other)
+ requires copyable<T>
+ {
+ const isize_t this_size = size();
+ isize_t to_copy_assign = asl::min(other.size(), this_size);
+
+ resize_uninit_inner(other.size());
+
+ for (PerChunkIterator it = make_index_iterator(0, to_copy_assign - 1);
+ it.has_more();
+ it.advance())
+ {
+ auto to_span = it.make_span(*m_chunks[it.chunk()]);
+ auto from_span = it.make_span(*other.m_chunks[it.chunk()]);
+
+ copy_assign_n(
+ reinterpret_cast<T*>(to_span.data()), // NOLINT(*-reinterpret-cast)
+ reinterpret_cast<const T*>(from_span.data()), // NOLINT(*-reinterpret-cast)
+ to_span.size());
+ }
+
+ if (other.size() > this_size)
+ {
+ for (PerChunkIterator it = make_index_iterator(to_copy_assign, other.size() - 1);
+ it.has_more();
+ it.advance())
+ {
+ auto to_span = it.make_span(*m_chunks[it.chunk()]);
+ auto from_span = it.make_span(*other.m_chunks[it.chunk()]);
+
+ copy_uninit_n(
+ reinterpret_cast<T*>(to_span.data()), // NOLINT(*-reinterpret-cast)
+ reinterpret_cast<const T*>(from_span.data()), // NOLINT(*-reinterpret-cast)
+ to_span.size());
+ }
+ }
+
+ ASL_ASSERT(size() == other.size());
+ }
+
+public:
+ constexpr chunked_buffer()
+ requires default_constructible<Allocator>
+ = default;
+
+ explicit constexpr chunked_buffer(Allocator allocator)
+ : m_chunks{std::move(allocator)}
+ {}
+
+ constexpr chunked_buffer(const chunked_buffer& other)
+ requires copyable<T> && copy_constructible<Allocator>
+ : m_chunks{other.m_chunks.allocator_copy()}
+ {
+ copy_from(other);
+ }
+
+ constexpr chunked_buffer(chunked_buffer&& other)
+ : m_chunks{std::move(other.m_chunks)}
+ , m_size{asl::exchange(other.m_size, 0)}
+ {
+ ASL_ASSERT(other.m_chunks.size() == 0);
+ }
+
+ constexpr chunked_buffer& operator=(const chunked_buffer& other)
+ requires copyable<T>
+ {
+ if (&other == this) { return *this; }
+ copy_from(other);
+ return *this;
+ }
+
+ constexpr chunked_buffer& operator=(chunked_buffer&& other)
+ {
+ if (&other == this) { return *this; }
+ destroy();
+ m_chunks = std::move(other.m_chunks);
+ m_size = asl::exchange(other.m_size, 0);
+ ASL_ASSERT(other.m_chunks.size() == 0);
+ return *this;
+ }
+
+ ~chunked_buffer()
+ {
+ destroy();
+ }
+
+ void clear()
+ {
+ if constexpr (trivially_destructible<T>)
+ {
+ m_size = 0;
+ }
+ else if (m_size > 0)
+ {
+ resize_uninit_inner(0);
+ ASL_ASSERT(m_size == 0);
+ }
+ }
+
+ void destroy()
+ {
+ clear();
+ ASL_ASSERT(size() == 0);
+
+ for (Chunk* chunk: m_chunks)
+ {
+ alloc_delete(m_chunks.allocator(), chunk);
+ }
+
+ m_chunks.destroy();
+ }
+
+ [[nodiscard]] constexpr isize_t size() const { return m_size; }
+
+ [[nodiscard]] constexpr bool is_empty() const { return size() == 0; }
+
+ [[nodiscard]] constexpr isize_t capacity() const
+ {
+ return m_chunks.size() * kChunkSize;
+ }
+
+ constexpr auto&& operator[](this auto&& self, isize_t i)
+ {
+ ASL_ASSERT(i >= 0 && i < self.m_size);
+ return std::forward_like<decltype(self)>(
+ (*std::forward<decltype(self)>(self).m_chunks[chunk_index(i)])
+ [index_in_chunk(i)].as_init_unsafe()
+ );
+ }
+
+ constexpr T& push(auto&&... args)
+ requires constructible_from<T, decltype(args)&&...>
+ {
+ const isize_t chunk = chunk_index(m_size);
+ const isize_t in_chunk = index_in_chunk(m_size);
+
+ if (m_size == capacity())
+ {
+ resize_uninit_inner(m_size + 1);
+ }
+ else
+ {
+ m_size += 1;
+ }
+
+ void* uninit = &(*m_chunks[chunk])[in_chunk];
+ return *construct_at<T>(uninit, std::forward<decltype(args)>(args)...);
+ }
+
+ void reserve_capacity(isize_t new_capacity)
+ {
+ new_capacity = round_up_pow2(new_capacity, kChunkSize);
+ if (new_capacity <= capacity()) { return; }
+
+ const isize_t required_chunks = new_capacity / kChunkSize;
+ const isize_t additional_chunks = required_chunks - m_chunks.size();
+ ASL_ASSERT(additional_chunks > 0);
+
+ m_chunks.reserve_capacity(required_chunks);
+ for (isize_t i = 0; i < additional_chunks; ++i)
+ {
+ // @Todo(C++26) _unsafe shouldn't be needed with trivial unions
+ auto* chunk = alloc_uninit_unsafe<Chunk>(m_chunks.allocator());
+ m_chunks.push(chunk);
+ }
+ }
+
+ void resize(isize_t new_size)
+ requires default_constructible<T>
+ {
+ if constexpr (trivially_default_constructible<T>)
+ {
+ resize_zero(new_size);
+ }
+ else
+ {
+ resize_construct(new_size);
+ }
+ }
+
+ void resize(isize_t new_size, const T& value)
+ requires copy_constructible<T>
+ {
+ resize_construct(new_size, value);
+ }
+
+ void resize_zero(isize_t new_size)
+ requires trivially_default_constructible<T>
+ {
+ const isize_t old_size = m_size;
+ resize_uninit_inner(new_size);
+
+ if (new_size > old_size)
+ {
+ for (PerChunkIterator it = make_index_iterator(old_size, new_size - 1);
+ it.has_more();
+ it.advance())
+ {
+ auto span = it.make_span(*m_chunks[it.chunk()]);
+ asl::memzero(span.data(), span.size_bytes());
+ }
+ }
+ }
+
+ void resize_uninit(isize_t new_size)
+ requires trivially_default_constructible<T>
+ {
+ resize_uninit_inner(new_size);
+ }
+
+ template<typename Chunk>
+ class generic_iterator
+ {
+ isize_t m_index;
+ span<Chunk> m_chunks;
+
+ public:
+ constexpr generic_iterator(isize_t index, span<Chunk> chunks)
+ : m_index{index}
+ , m_chunks{chunks}
+ {}
+
+ constexpr generic_iterator& operator++()
+ {
+ m_index += 1;
+ return *this;
+ }
+
+ constexpr generic_iterator operator++(int)
+ {
+ auto tmp = *this;
+ m_index += 1;
+ return tmp;
+ }
+
+ constexpr bool operator==(this generic_iterator self, generic_iterator other)
+ {
+ ASL_ASSERT(self.m_chunks.data() == other.m_chunks.data());
+ return self.m_index == other.m_index;
+ }
+<