This commit is contained in:
2026-03-23 12:11:07 +01:00
commit e64eb40b38
4573 changed files with 3117439 additions and 0 deletions

View File

@@ -0,0 +1,83 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "aligned_alloc.hpp"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#ifdef _WIN32
#include <malloc.h>
#endif
namespace Util
{
void *memalign_alloc(size_t boundary, size_t size)
{
#if defined(_WIN32)
return _aligned_malloc(size, boundary);
#elif defined(_ISOC11_SOURCE)
return aligned_alloc(boundary, (size + boundary - 1) & ~(boundary - 1));
#elif (_POSIX_C_SOURCE >= 200112L) || (_XOPEN_SOURCE >= 600)
void *ptr = nullptr;
if (posix_memalign(&ptr, boundary, size) < 0)
return nullptr;
return ptr;
#else
// Align stuff ourselves. Kinda ugly, but will work anywhere.
void **place;
uintptr_t addr = 0;
void *ptr = malloc(boundary + size + sizeof(uintptr_t));
if (ptr == nullptr)
return nullptr;
addr = ((uintptr_t)ptr + sizeof(uintptr_t) + boundary) & ~(boundary - 1);
place = (void **) addr;
place[-1] = ptr;
return (void *) addr;
#endif
}
void *memalign_calloc(size_t boundary, size_t size)
{
void *ret = memalign_alloc(boundary, size);
if (ret)
memset(ret, 0, size);
return ret;
}
void memalign_free(void *ptr)
{
#if defined(_WIN32)
_aligned_free(ptr);
#elif !defined(_ISOC11_SOURCE) && !((_POSIX_C_SOURCE >= 200112L) || (_XOPEN_SOURCE >= 600))
if (ptr != nullptr)
{
void **p = (void **) ptr;
free(p[-1]);
}
#else
free(ptr);
#endif
}
}

View File

@@ -0,0 +1,68 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <stddef.h>
#include <stdexcept>
#include <new>
namespace Util
{
void *memalign_alloc(size_t boundary, size_t size);
void *memalign_calloc(size_t boundary, size_t size);
void memalign_free(void *ptr);
struct AlignedDeleter { void operator()(void *ptr) { memalign_free(ptr); }};
template <typename T>
struct AlignedAllocation
{
static void *operator new(size_t size)
{
void *ret = ::Util::memalign_alloc(alignof(T), size);
#ifdef __EXCEPTIONS
if (!ret) throw std::bad_alloc();
#endif
return ret;
}
static void *operator new[](size_t size)
{
void *ret = ::Util::memalign_alloc(alignof(T), size);
#ifdef __EXCEPTIONS
if (!ret) throw std::bad_alloc();
#endif
return ret;
}
static void operator delete(void *ptr)
{
return ::Util::memalign_free(ptr);
}
static void operator delete[](void *ptr)
{
return ::Util::memalign_free(ptr);
}
};
}

View File

@@ -0,0 +1,197 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "arena_allocator.hpp"
#include "bitops.hpp"
#include <assert.h>
namespace Util
{
void LegionAllocator::allocate(uint32_t num_blocks, uint32_t &out_mask, uint32_t &out_offset)
{
assert(NumSubBlocks >= num_blocks);
assert(num_blocks != 0);
uint32_t block_mask;
if (num_blocks == NumSubBlocks)
block_mask = ~0u;
else
block_mask = ((1u << num_blocks) - 1u);
uint32_t mask = free_blocks[num_blocks - 1];
uint32_t b = trailing_zeroes(mask);
assert(((free_blocks[0] >> b) & block_mask) == block_mask);
uint32_t sb = block_mask << b;
free_blocks[0] &= ~sb;
update_longest_run();
out_mask = sb;
out_offset = b;
}
void LegionAllocator::free(uint32_t mask)
{
assert((free_blocks[0] & mask) == 0);
free_blocks[0] |= mask;
update_longest_run();
}
void LegionAllocator::update_longest_run()
{
uint32_t f = free_blocks[0];
longest_run = 0;
while (f)
{
free_blocks[longest_run++] = f;
f &= f >> 1;
}
}
bool SliceSubAllocator::allocate_backing_heap(AllocatedSlice *allocation)
{
uint32_t count = sub_block_size * Util::LegionAllocator::NumSubBlocks;
if (parent)
{
return parent->allocate(count, allocation);
}
else if (global_allocator)
{
uint32_t index = global_allocator->allocate(count);
if (index == UINT32_MAX)
return false;
*allocation = {};
allocation->count = count;
allocation->buffer_index = index;
return true;
}
else
{
return false;
}
}
void SliceSubAllocator::free_backing_heap(AllocatedSlice *allocation) const
{
if (parent)
parent->free(allocation->heap, allocation->mask);
else if (global_allocator)
global_allocator->free(allocation->buffer_index);
}
void SliceSubAllocator::prepare_allocation(AllocatedSlice *allocation, Util::IntrusiveList<MiniHeap>::Iterator heap,
const Util::SuballocationResult &suballoc)
{
allocation->buffer_index = heap->allocation.buffer_index;
allocation->offset = heap->allocation.offset + suballoc.offset;
allocation->count = suballoc.size;
allocation->mask = suballoc.mask;
allocation->heap = heap;
allocation->alloc = this;
}
void SliceAllocator::init(uint32_t sub_block_size, uint32_t num_sub_blocks_in_arena_log2,
Util::SliceBackingAllocator *alloc)
{
global_allocator = alloc;
assert(num_sub_blocks_in_arena_log2 < SliceAllocatorCount * 5 && num_sub_blocks_in_arena_log2 >= 5);
unsigned num_hierarchies = (num_sub_blocks_in_arena_log2 + 4) / 5;
assert(num_hierarchies <= SliceAllocatorCount);
for (unsigned i = 0; i < num_hierarchies - 1; i++)
allocators[i].parent = &allocators[i + 1];
allocators[num_hierarchies - 1].global_allocator = alloc;
unsigned shamt[SliceAllocatorCount] = {};
shamt[num_hierarchies - 1] = num_sub_blocks_in_arena_log2 - Util::floor_log2(Util::LegionAllocator::NumSubBlocks);
// Spread out the multiplier if possible.
for (unsigned i = num_hierarchies - 1; i > 1; i--)
{
shamt[i - 1] = shamt[i] - shamt[i] / (i);
assert(shamt[i] - shamt[i - 1] <= Util::floor_log2(Util::LegionAllocator::NumSubBlocks));
}
for (unsigned i = 0; i < num_hierarchies; i++)
{
allocators[i].set_sub_block_size(sub_block_size << shamt[i]);
allocators[i].set_object_pool(&object_pool);
}
}
void SliceAllocator::free(const Util::AllocatedSlice &slice)
{
if (slice.alloc)
slice.alloc->free(slice.heap, slice.mask);
else if (slice.buffer_index != UINT32_MAX)
global_allocator->free(slice.buffer_index);
}
void SliceAllocator::prime(const void *opaque_meta)
{
for (auto &alloc : allocators)
{
if (alloc.global_allocator)
{
alloc.global_allocator->prime(alloc.get_sub_block_size() * Util::LegionAllocator::NumSubBlocks, opaque_meta);
break;
}
}
}
bool SliceAllocator::allocate(uint32_t count, Util::AllocatedSlice *slice)
{
for (auto &alloc : allocators)
{
uint32_t max_alloc_size = alloc.get_max_allocation_size();
if (count <= max_alloc_size)
return alloc.allocate(count, slice);
}
LOGE("Allocation of %u elements is too large for SliceAllocator.\n", count);
return false;
}
void SliceBackingAllocatorVA::free(uint32_t)
{
allocated = false;
}
uint32_t SliceBackingAllocatorVA::allocate(uint32_t)
{
if (allocated)
return UINT32_MAX;
else
{
allocated = true;
return 0;
}
}
void SliceBackingAllocatorVA::prime(uint32_t, const void *)
{
}
}

View File

@@ -0,0 +1,336 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <stdint.h>
#include <assert.h>
#include "intrusive_list.hpp"
#include "logging.hpp"
#include "object_pool.hpp"
#include "bitops.hpp"
namespace Util
{
// Expands the buddy allocator to consider 32 "buddies".
// The allocator is logical and works in terms of units, not bytes.
class LegionAllocator
{
public:
enum
{
NumSubBlocks = 32u,
AllFree = ~0u
};
LegionAllocator(const LegionAllocator &) = delete;
void operator=(const LegionAllocator &) = delete;
LegionAllocator()
{
for (auto &v : free_blocks)
v = AllFree;
longest_run = 32;
}
~LegionAllocator()
{
if (free_blocks[0] != AllFree)
LOGE("Memory leak in block detected.\n");
}
inline bool full() const
{
return free_blocks[0] == 0;
}
inline bool empty() const
{
return free_blocks[0] == AllFree;
}
inline uint32_t get_longest_run() const
{
return longest_run;
}
void allocate(uint32_t num_blocks, uint32_t &mask, uint32_t &offset);
void free(uint32_t mask);
private:
uint32_t free_blocks[NumSubBlocks];
uint32_t longest_run = 0;
void update_longest_run();
};
// Represents that a legion heap is backed by some kind of allocation.
template <typename BackingAllocation>
struct LegionHeap : Util::IntrusiveListEnabled<LegionHeap<BackingAllocation>>
{
BackingAllocation allocation;
Util::LegionAllocator heap;
};
template <typename BackingAllocation>
struct AllocationArena
{
Util::IntrusiveList<LegionHeap<BackingAllocation>> heaps[Util::LegionAllocator::NumSubBlocks];
Util::IntrusiveList<LegionHeap<BackingAllocation>> full_heaps;
uint32_t heap_availability_mask = 0;
};
struct SuballocationResult
{
uint32_t offset;
uint32_t size;
uint32_t mask;
};
template <typename DerivedAllocator, typename BackingAllocation>
class ArenaAllocator
{
public:
using MiniHeap = LegionHeap<BackingAllocation>;
~ArenaAllocator()
{
bool error = false;
if (heap_arena.full_heaps.begin())
error = true;
for (auto &h : heap_arena.heaps)
if (h.begin())
error = true;
if (error)
LOGE("Memory leaked in class allocator!\n");
}
inline void set_sub_block_size(uint32_t size)
{
assert(Util::is_pow2(size));
sub_block_size_log2 = Util::floor_log2(size);
sub_block_size = size;
}
inline uint32_t get_max_allocation_size() const
{
return sub_block_size * Util::LegionAllocator::NumSubBlocks;
}
inline uint32_t get_sub_block_size() const
{
return sub_block_size;
}
inline uint32_t get_block_alignment() const
{
return get_sub_block_size();
}
inline bool allocate(uint32_t size, BackingAllocation *alloc)
{
unsigned num_blocks = (size + sub_block_size - 1) >> sub_block_size_log2;
uint32_t size_mask = (1u << (num_blocks - 1)) - 1;
uint32_t index = trailing_zeroes(heap_arena.heap_availability_mask & ~size_mask);
if (index < LegionAllocator::NumSubBlocks)
{
auto itr = heap_arena.heaps[index].begin();
assert(itr);
assert(index >= (num_blocks - 1));
auto &heap = *itr;
static_cast<DerivedAllocator *>(this)->prepare_allocation(alloc, itr, suballocate(num_blocks, heap));
unsigned new_index = heap.heap.get_longest_run() - 1;
if (heap.heap.full())
{
heap_arena.full_heaps.move_to_front(heap_arena.heaps[index], itr);
if (!heap_arena.heaps[index].begin())
heap_arena.heap_availability_mask &= ~(1u << index);
}
else if (new_index != index)
{
auto &new_heap = heap_arena.heaps[new_index];
new_heap.move_to_front(heap_arena.heaps[index], itr);
heap_arena.heap_availability_mask |= 1u << new_index;
if (!heap_arena.heaps[index].begin())
heap_arena.heap_availability_mask &= ~(1u << index);
}
return true;
}
// We didn't find a vacant heap, make a new one.
auto *node = object_pool->allocate();
if (!node)
return false;
auto &heap = *node;
if (!static_cast<DerivedAllocator *>(this)->allocate_backing_heap(&heap.allocation))
{
object_pool->free(node);
return false;
}
// This cannot fail.
static_cast<DerivedAllocator *>(this)->prepare_allocation(alloc, node, suballocate(num_blocks, heap));
if (heap.heap.full())
{
heap_arena.full_heaps.insert_front(node);
}
else
{
unsigned new_index = heap.heap.get_longest_run() - 1;
heap_arena.heaps[new_index].insert_front(node);
heap_arena.heap_availability_mask |= 1u << new_index;
}
return true;
}
inline void free(typename IntrusiveList<MiniHeap>::Iterator itr, uint32_t mask)
{
auto *heap = itr.get();
auto &block = heap->heap;
bool was_full = block.full();
unsigned index = block.get_longest_run() - 1;
block.free(mask);
unsigned new_index = block.get_longest_run() - 1;
if (block.empty())
{
static_cast<DerivedAllocator *>(this)->free_backing_heap(&heap->allocation);
if (was_full)
heap_arena.full_heaps.erase(heap);
else
{
heap_arena.heaps[index].erase(heap);
if (!heap_arena.heaps[index].begin())
heap_arena.heap_availability_mask &= ~(1u << index);
}
object_pool->free(heap);
}
else if (was_full)
{
heap_arena.heaps[new_index].move_to_front(heap_arena.full_heaps, heap);
heap_arena.heap_availability_mask |= 1u << new_index;
}
else if (index != new_index)
{
heap_arena.heaps[new_index].move_to_front(heap_arena.heaps[index], heap);
heap_arena.heap_availability_mask |= 1u << new_index;
if (!heap_arena.heaps[index].begin())
heap_arena.heap_availability_mask &= ~(1u << index);
}
}
inline void set_object_pool(ObjectPool<MiniHeap> *object_pool_)
{
object_pool = object_pool_;
}
protected:
AllocationArena<BackingAllocation> heap_arena;
ObjectPool<LegionHeap<BackingAllocation>> *object_pool = nullptr;
uint32_t sub_block_size = 1;
uint32_t sub_block_size_log2 = 0;
private:
inline SuballocationResult suballocate(uint32_t num_blocks, MiniHeap &heap)
{
SuballocationResult res = {};
res.size = num_blocks << sub_block_size_log2;
heap.heap.allocate(num_blocks, res.mask, res.offset);
res.offset <<= sub_block_size_log2;
return res;
}
};
struct SliceSubAllocator;
struct AllocatedSlice
{
uint32_t buffer_index = UINT32_MAX;
uint32_t offset = 0;
uint32_t count = 0;
uint32_t mask = 0;
SliceSubAllocator *alloc = nullptr;
Util::IntrusiveList<Util::LegionHeap<AllocatedSlice>>::Iterator heap = {};
};
struct SliceBackingAllocator
{
virtual ~SliceBackingAllocator() = default;
virtual uint32_t allocate(uint32_t count) = 0;
virtual void free(uint32_t index) = 0;
virtual void prime(uint32_t count, const void *opaque_meta) = 0;
};
struct SliceBackingAllocatorVA : SliceBackingAllocator
{
uint32_t allocate(uint32_t count) override;
void free(uint32_t index) override;
void prime(uint32_t count, const void *opaque_meta) override;
bool allocated = false;
};
struct SliceSubAllocator : Util::ArenaAllocator<SliceSubAllocator, AllocatedSlice>
{
SliceSubAllocator *parent = nullptr;
SliceBackingAllocator *global_allocator = nullptr;
// Implements curious recurring template pattern calls.
bool allocate_backing_heap(AllocatedSlice *allocation);
void free_backing_heap(AllocatedSlice *allocation) const;
void prepare_allocation(AllocatedSlice *allocation, Util::IntrusiveList<MiniHeap>::Iterator heap,
const Util::SuballocationResult &suballoc);
};
class SliceAllocator
{
public:
bool allocate(uint32_t count, Util::AllocatedSlice *slice);
void free(const Util::AllocatedSlice &slice);
void prime(const void *opaque_meta);
protected:
SliceAllocator() = default;
void init(uint32_t sub_block_size, uint32_t num_sub_blocks_in_arena_log2, SliceBackingAllocator *alloc);
private:
Util::ObjectPool<Util::LegionHeap<Util::AllocatedSlice>> object_pool;
SliceBackingAllocator *global_allocator = nullptr;
enum { SliceAllocatorCount = 5 };
Util::SliceSubAllocator allocators[SliceAllocatorCount];
};
}

View File

@@ -0,0 +1,173 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#ifdef _MSC_VER
#include <intrin.h>
#endif
namespace Util
{
#ifdef __GNUC__
#define leading_zeroes_(x) ((x) == 0 ? 32 : __builtin_clz(x))
#define trailing_zeroes_(x) ((x) == 0 ? 32 : __builtin_ctz(x))
#define trailing_ones_(x) __builtin_ctz(~uint32_t(x))
#define leading_zeroes64_(x) ((x) == 0 ? 64 : __builtin_clzll(x))
#define trailing_zeroes64_(x) ((x) == 0 ? 64 : __builtin_ctzll(x))
#define trailing_ones64_(x) __builtin_ctzll(~uint64_t(x))
#define popcount32_(x) __builtin_popcount(x)
static inline uint32_t leading_zeroes(uint32_t x) { return leading_zeroes_(x); }
static inline uint32_t trailing_zeroes(uint32_t x) { return trailing_zeroes_(x); }
static inline uint32_t trailing_ones(uint32_t x) { return trailing_ones_(x); }
static inline uint32_t leading_zeroes64(uint64_t x) { return leading_zeroes64_(x); }
static inline uint32_t trailing_zeroes64(uint64_t x) { return trailing_zeroes64_(x); }
static inline uint32_t trailing_ones64(uint64_t x) { return trailing_ones64_(x); }
static inline uint32_t popcount32(uint32_t x) { return popcount32_(x); }
#elif defined(_MSC_VER)
namespace Internal
{
static inline uint32_t popcount32(uint32_t x)
{
return __popcnt(x);
}
static inline uint32_t clz(uint32_t x)
{
unsigned long result;
if (_BitScanReverse(&result, x))
return 31 - result;
else
return 32;
}
static inline uint32_t ctz(uint32_t x)
{
unsigned long result;
if (_BitScanForward(&result, x))
return result;
else
return 32;
}
static inline uint32_t clz64(uint64_t x)
{
unsigned long result;
if (_BitScanReverse64(&result, x))
return 63 - result;
else
return 64;
}
static inline uint32_t ctz64(uint64_t x)
{
unsigned long result;
if (_BitScanForward64(&result, x))
return result;
else
return 64;
}
}
static inline uint32_t leading_zeroes(uint32_t x) { return Internal::clz(x); }
static inline uint32_t trailing_zeroes(uint32_t x) { return Internal::ctz(x); }
static inline uint32_t trailing_ones(uint32_t x) { return Internal::ctz(~x); }
static inline uint32_t leading_zeroes64(uint64_t x) { return Internal::clz64(x); }
static inline uint32_t trailing_zeroes64(uint64_t x) { return Internal::ctz64(x); }
static inline uint32_t trailing_ones64(uint64_t x) { return Internal::ctz64(~x); }
static inline uint32_t popcount32(uint32_t x) { return Internal::popcount32(x); }
#else
#error "Implement me."
#endif
template <typename T>
inline void for_each_bit64(uint64_t value, const T &func)
{
while (value)
{
uint32_t bit = trailing_zeroes64(value);
func(bit);
value &= ~(1ull << bit);
}
}
template <typename T>
inline void for_each_bit(uint32_t value, const T &func)
{
while (value)
{
uint32_t bit = trailing_zeroes(value);
func(bit);
value &= ~(1u << bit);
}
}
template <typename T>
inline void for_each_bit_range(uint32_t value, const T &func)
{
if (value == ~0u)
{
func(0, 32);
return;
}
uint32_t bit_offset = 0;
while (value)
{
uint32_t bit = trailing_zeroes(value);
bit_offset += bit;
value >>= bit;
uint32_t range = trailing_ones(value);
func(bit_offset, range);
value &= ~((1u << range) - 1);
}
}
template <typename T>
inline bool is_pow2(T value)
{
return (value & (value - T(1))) == T(0);
}
inline uint32_t next_pow2(uint32_t v)
{
v--;
v |= v >> 16;
v |= v >> 8;
v |= v >> 4;
v |= v >> 2;
v |= v >> 1;
return v + 1;
}
inline uint32_t prev_pow2(uint32_t v)
{
return next_pow2(v + 1) >> 1;
}
inline uint32_t floor_log2(uint32_t v)
{
return 31 - leading_zeroes(v);
}
}

View File

@@ -0,0 +1,59 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include "aligned_alloc.hpp"
#include <memory>
#include <algorithm>
#include <type_traits>
namespace Util
{
template <typename T>
class DynamicArray
{
public:
// Only POD-like types work here since we don't invoke placement new or delete.
static_assert(std::is_trivially_default_constructible<T>::value, "T must be trivially constructible.");
static_assert(std::is_trivially_destructible<T>::value, "T must be trivially destructible.");
inline void reserve(size_t n)
{
if (n > N)
{
buffer.reset(static_cast<T *>(memalign_alloc(std::max<size_t>(64, alignof(T)), n * sizeof(T))));
N = n;
}
}
inline T &operator[](size_t index) { return buffer.get()[index]; }
inline const T &operator[](size_t index) const { return buffer.get()[index]; }
inline T *data() { return buffer.get(); }
inline const T *data() const { return buffer.get(); }
inline size_t get_capacity() const { return N; }
private:
std::unique_ptr<T, AlignedDeleter> buffer;
size_t N = 0;
};
}

View File

@@ -0,0 +1,34 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <type_traits>
namespace Util
{
template <typename T>
constexpr typename std::underlying_type<T>::type ecast(T x)
{
return static_cast<typename std::underlying_type<T>::type>(x);
}
}

View File

@@ -0,0 +1,96 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#include "environment.hpp"
#include <string>
#include <stdlib.h>
namespace Util
{
bool get_environment(const char *env, std::string &str)
{
#ifdef _WIN32
char buf[4096];
DWORD count = GetEnvironmentVariableA(env, buf, sizeof(buf));
if (count)
{
str = { buf, buf + count };
return true;
}
else
return false;
#else
if (const char *v = getenv(env))
{
str = v;
return true;
}
else
return false;
#endif
}
void set_environment(const char *env, const char *value)
{
#ifdef _WIN32
SetEnvironmentVariableA(env, value);
#else
setenv(env, value, 1);
#endif
}
std::string get_environment_string(const char *env, const char *default_value)
{
std::string v;
if (!get_environment(env, v))
v = default_value;
return v;
}
unsigned get_environment_uint(const char *env, unsigned default_value)
{
unsigned value = default_value;
std::string v;
if (get_environment(env, v))
value = unsigned(std::stoul(v));
return value;
}
int get_environment_int(const char *env, int default_value)
{
int value = default_value;
std::string v;
if (get_environment(env, v))
value = int(std::stol(v));
return value;
}
bool get_environment_bool(const char *env, bool default_value)
{
return get_environment_int(env, int(default_value)) != 0;
}
}

View File

@@ -0,0 +1,35 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <string>
namespace Util
{
bool get_environment(const char *env, std::string &str);
std::string get_environment_string(const char *env, const char *default_value);
unsigned get_environment_uint(const char *env, unsigned default_value);
int get_environment_int(const char *env, int default_value);
bool get_environment_bool(const char *env, bool default_value);
void set_environment(const char *env, const char *value);
}

View File

@@ -0,0 +1,105 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <stdint.h>
#include <string>
namespace Util
{
using Hash = uint64_t;
class Hasher
{
public:
explicit Hasher(Hash h_)
: h(h_)
{
}
Hasher() = default;
template <typename T>
inline void data(const T *data_, size_t size)
{
size /= sizeof(*data_);
for (size_t i = 0; i < size; i++)
h = (h * 0x100000001b3ull) ^ data_[i];
}
inline void u32(uint32_t value)
{
h = (h * 0x100000001b3ull) ^ value;
}
inline void s32(int32_t value)
{
u32(uint32_t(value));
}
inline void f32(float value)
{
union
{
float f32;
uint32_t u32;
} u;
u.f32 = value;
u32(u.u32);
}
inline void u64(uint64_t value)
{
u32(value & 0xffffffffu);
u32(value >> 32);
}
template <typename T>
inline void pointer(T *ptr)
{
u64(reinterpret_cast<uintptr_t>(ptr));
}
inline void string(const char *str)
{
char c;
u32(0xff);
while ((c = *str++) != '\0')
u32(uint8_t(c));
}
inline void string(const std::string &str)
{
u32(0xff);
for (auto &c : str)
u32(uint8_t(c));
}
inline Hash get() const
{
return h;
}
private:
Hash h = 0xcbf29ce484222325ull;
};
}

View File

@@ -0,0 +1,310 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <stddef.h>
#include <utility>
#include <memory>
#include <atomic>
#include <type_traits>
namespace Util
{
class SingleThreadCounter
{
public:
inline void add_ref()
{
count++;
}
inline bool release()
{
return --count == 0;
}
private:
uint32_t count = 1;
};
class MultiThreadCounter
{
public:
MultiThreadCounter()
{
count.store(1, std::memory_order_relaxed);
}
inline void add_ref()
{
count.fetch_add(1, std::memory_order_relaxed);
}
inline bool release()
{
auto result = count.fetch_sub(1, std::memory_order_acq_rel);
return result == 1;
}
private:
std::atomic_uint32_t count;
};
template <typename T>
class IntrusivePtr;
template <typename T, typename Deleter = std::default_delete<T>, typename ReferenceOps = SingleThreadCounter>
class IntrusivePtrEnabled
{
public:
using IntrusivePtrType = IntrusivePtr<T>;
using EnabledBase = T;
using EnabledDeleter = Deleter;
using EnabledReferenceOp = ReferenceOps;
void release_reference()
{
if (reference_count.release())
Deleter()(static_cast<T *>(this));
}
void add_reference()
{
reference_count.add_ref();
}
IntrusivePtrEnabled() = default;
IntrusivePtrEnabled(const IntrusivePtrEnabled &) = delete;
void operator=(const IntrusivePtrEnabled &) = delete;
protected:
Util::IntrusivePtr<T> reference_from_this();
private:
ReferenceOps reference_count;
};
template <typename T>
class IntrusivePtr
{
public:
template <typename U>
friend class IntrusivePtr;
IntrusivePtr() = default;
explicit IntrusivePtr(T *handle)
: data(handle)
{
}
T &operator*()
{
return *data;
}
const T &operator*() const
{
return *data;
}
T *operator->()
{
return data;
}
const T *operator->() const
{
return data;
}
explicit operator bool() const
{
return data != nullptr;
}
bool operator==(const IntrusivePtr &other) const
{
return data == other.data;
}
bool operator!=(const IntrusivePtr &other) const
{
return data != other.data;
}
T *get()
{
return data;
}
const T *get() const
{
return data;
}
void reset()
{
using ReferenceBase = IntrusivePtrEnabled<
typename T::EnabledBase,
typename T::EnabledDeleter,
typename T::EnabledReferenceOp>;
// Static up-cast here to avoid potential issues with multiple intrusive inheritance.
// Also makes sure that the pointer type actually inherits from this type.
if (data)
static_cast<ReferenceBase *>(data)->release_reference();
data = nullptr;
}
template <typename U>
IntrusivePtr &operator=(const IntrusivePtr<U> &other)
{
static_assert(std::is_base_of<T, U>::value,
"Cannot safely assign downcasted intrusive pointers.");
using ReferenceBase = IntrusivePtrEnabled<
typename T::EnabledBase,
typename T::EnabledDeleter,
typename T::EnabledReferenceOp>;
reset();
data = static_cast<T *>(other.data);
// Static up-cast here to avoid potential issues with multiple intrusive inheritance.
// Also makes sure that the pointer type actually inherits from this type.
if (data)
static_cast<ReferenceBase *>(data)->add_reference();
return *this;
}
IntrusivePtr &operator=(const IntrusivePtr &other)
{
using ReferenceBase = IntrusivePtrEnabled<
typename T::EnabledBase,
typename T::EnabledDeleter,
typename T::EnabledReferenceOp>;
if (this != &other)
{
reset();
data = other.data;
if (data)
static_cast<ReferenceBase *>(data)->add_reference();
}
return *this;
}
template <typename U>
IntrusivePtr(const IntrusivePtr<U> &other)
{
*this = other;
}
IntrusivePtr(const IntrusivePtr &other)
{
*this = other;
}
~IntrusivePtr()
{
reset();
}
template <typename U>
IntrusivePtr &operator=(IntrusivePtr<U> &&other) noexcept
{
reset();
data = other.data;
other.data = nullptr;
return *this;
}
IntrusivePtr &operator=(IntrusivePtr &&other) noexcept
{
if (this != &other)
{
reset();
data = other.data;
other.data = nullptr;
}
return *this;
}
template <typename U>
IntrusivePtr(IntrusivePtr<U> &&other) noexcept
{
*this = std::move(other);
}
template <typename U>
IntrusivePtr(IntrusivePtr &&other) noexcept
{
*this = std::move(other);
}
T *release() &
{
T *ret = data;
data = nullptr;
return ret;
}
T *release() &&
{
T *ret = data;
data = nullptr;
return ret;
}
private:
T *data = nullptr;
};
template <typename T, typename Deleter, typename ReferenceOps>
IntrusivePtr<T> IntrusivePtrEnabled<T, Deleter, ReferenceOps>::reference_from_this()
{
add_reference();
return IntrusivePtr<T>(static_cast<T *>(this));
}
template <typename Derived>
using DerivedIntrusivePtrType = IntrusivePtr<Derived>;
template <typename T, typename... P>
DerivedIntrusivePtrType<T> make_handle(P &&... p)
{
return DerivedIntrusivePtrType<T>(new T(std::forward<P>(p)...));
}
template <typename Base, typename Derived, typename... P>
typename Base::IntrusivePtrType make_derived_handle(P &&... p)
{
return typename Base::IntrusivePtrType(new Derived(std::forward<P>(p)...));
}
template <typename T>
using ThreadSafeIntrusivePtrEnabled = IntrusivePtrEnabled<T, std::default_delete<T>, MultiThreadCounter>;
}

View File

@@ -0,0 +1,690 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include "hash.hpp"
#include "intrusive_list.hpp"
#include "object_pool.hpp"
#include "read_write_lock.hpp"
#include <assert.h>
#include <vector>
namespace Util
{
template <typename T>
class IntrusiveHashMapEnabled : public IntrusiveListEnabled<T>
{
public:
IntrusiveHashMapEnabled() = default;
IntrusiveHashMapEnabled(Util::Hash hash)
: intrusive_hashmap_key(hash)
{
}
void set_hash(Util::Hash hash)
{
intrusive_hashmap_key = hash;
}
Util::Hash get_hash() const
{
return intrusive_hashmap_key;
}
private:
Hash intrusive_hashmap_key = 0;
};
template <typename T>
struct IntrusivePODWrapper : public IntrusiveHashMapEnabled<IntrusivePODWrapper<T>>
{
template <typename U>
explicit IntrusivePODWrapper(U&& value_)
: value(std::forward<U>(value_))
{
}
IntrusivePODWrapper() = default;
T& get()
{
return value;
}
const T& get() const
{
return value;
}
T value = {};
};
// This HashMap is non-owning. It just arranges a list of pointers.
// It's kind of special purpose container used by the Vulkan backend.
// Dealing with memory ownership is done through composition by a different class.
// T must inherit from IntrusiveHashMapEnabled<T>.
// Each instance of T can only be part of one hashmap.
template <typename T>
class IntrusiveHashMapHolder
{
public:
enum { InitialSize = 16, InitialLoadCount = 3 };
T *find(Hash hash) const
{
if (values.empty())
return nullptr;
Hash hash_mask = values.size() - 1;
auto masked = hash & hash_mask;
for (unsigned i = 0; i < load_count; i++)
{
if (values[masked] && get_hash(values[masked]) == hash)
return values[masked];
masked = (masked + 1) & hash_mask;
}
return nullptr;
}
template <typename P>
bool find_and_consume_pod(Hash hash, P &p) const
{
T *t = find(hash);
if (t)
{
p = t->get();
return true;
}
else
return false;
}
// Inserts, if value already exists, insertion does not happen.
// Return value is the data which is not part of the hashmap.
// It should be deleted or similar.
// Returns nullptr if nothing was in the hashmap for this key.
T *insert_yield(T *&value)
{
if (values.empty())
grow();
Hash hash_mask = values.size() - 1;
auto hash = get_hash(value);
auto masked = hash & hash_mask;
for (unsigned i = 0; i < load_count; i++)
{
if (values[masked] && get_hash(values[masked]) == hash)
{
T *ret = value;
value = values[masked];
return ret;
}
else if (!values[masked])
{
values[masked] = value;
list.insert_front(value);
return nullptr;
}
masked = (masked + 1) & hash_mask;
}
grow();
return insert_yield(value);
}
T *insert_replace(T *value)
{
if (values.empty())
grow();
Hash hash_mask = values.size() - 1;
auto hash = get_hash(value);
auto masked = hash & hash_mask;
for (unsigned i = 0; i < load_count; i++)
{
if (values[masked] && get_hash(values[masked]) == hash)
{
std::swap(values[masked], value);
list.erase(value);
list.insert_front(values[masked]);
return value;
}
else if (!values[masked])
{
assert(!values[masked]);
values[masked] = value;
list.insert_front(value);
return nullptr;
}
masked = (masked + 1) & hash_mask;
}
grow();
return insert_replace(value);
}
T *erase(Hash hash)
{
Hash hash_mask = values.size() - 1;
auto masked = hash & hash_mask;
for (unsigned i = 0; i < load_count; i++)
{
if (values[masked] && get_hash(values[masked]) == hash)
{
auto *value = values[masked];
list.erase(value);
values[masked] = nullptr;
return value;
}
masked = (masked + 1) & hash_mask;
}
return nullptr;
}
void erase(T *value)
{
erase(get_hash(value));
}
void clear()
{
list.clear();
values.clear();
load_count = 0;
}
typename IntrusiveList<T>::Iterator begin() const
{
return list.begin();
}
typename IntrusiveList<T>::Iterator end() const
{
return list.end();
}
IntrusiveList<T> &inner_list()
{
return list;
}
const IntrusiveList<T> &inner_list() const
{
return list;
}
private:
inline bool compare_key(Hash masked, Hash hash) const
{
return get_key_for_index(masked) == hash;
}
inline Hash get_hash(const T *value) const
{
return static_cast<const IntrusiveHashMapEnabled<T> *>(value)->get_hash();
}
inline Hash get_key_for_index(Hash masked) const
{
return get_hash(values[masked]);
}
bool insert_inner(T *value)
{
Hash hash_mask = values.size() - 1;
auto hash = get_hash(value);
auto masked = hash & hash_mask;
for (unsigned i = 0; i < load_count; i++)
{
if (!values[masked])
{
values[masked] = value;
return true;
}
masked = (masked + 1) & hash_mask;
}
return false;
}
void grow()
{
bool success;
do
{
for (auto &v : values)
v = nullptr;
if (values.empty())
{
values.resize(InitialSize);
load_count = InitialLoadCount;
//LOGI("Growing hashmap to %u elements.\n", InitialSize);
}
else
{
values.resize(values.size() * 2);
//LOGI("Growing hashmap to %u elements.\n", unsigned(values.size()));
load_count++;
}
// Re-insert.
success = true;
for (auto &t : list)
{
if (!insert_inner(&t))
{
success = false;
break;
}
}
} while (!success);
}
std::vector<T *> values;
IntrusiveList<T> list;
unsigned load_count = 0;
};
template <typename T>
class IntrusiveHashMap
{
public:
~IntrusiveHashMap()
{
clear();
}
IntrusiveHashMap() = default;
IntrusiveHashMap(const IntrusiveHashMap &) = delete;
void operator=(const IntrusiveHashMap &) = delete;
void clear()
{
auto &list = hashmap.inner_list();
auto itr = list.begin();
while (itr != list.end())
{
auto *to_free = itr.get();
itr = list.erase(itr);
pool.free(to_free);
}
hashmap.clear();
}
T *find(Hash hash) const
{
return hashmap.find(hash);
}
T &operator[](Hash hash)
{
auto *t = find(hash);
if (!t)
t = emplace_yield(hash);
return *t;
}
template <typename P>
bool find_and_consume_pod(Hash hash, P &p) const
{
return hashmap.find_and_consume_pod(hash, p);
}
void erase(T *value)
{
hashmap.erase(value);
pool.free(value);
}
void erase(Hash hash)
{
auto *value = hashmap.erase(hash);
if (value)
pool.free(value);
}
template <typename... P>
T *emplace_replace(Hash hash, P&&... p)
{
T *t = allocate(std::forward<P>(p)...);
return insert_replace(hash, t);
}
template <typename... P>
T *emplace_yield(Hash hash, P&&... p)
{
T *t = allocate(std::forward<P>(p)...);
return insert_yield(hash, t);
}
template <typename... P>
T *allocate(P&&... p)
{
return pool.allocate(std::forward<P>(p)...);
}
void free(T *value)
{
pool.free(value);
}
T *insert_replace(Hash hash, T *value)
{
static_cast<IntrusiveHashMapEnabled<T> *>(value)->set_hash(hash);
T *to_delete = hashmap.insert_replace(value);
if (to_delete)
pool.free(to_delete);
return value;
}
T *insert_yield(Hash hash, T *value)
{
static_cast<IntrusiveHashMapEnabled<T> *>(value)->set_hash(hash);
T *to_delete = hashmap.insert_yield(value);
if (to_delete)
pool.free(to_delete);
return value;
}
typename IntrusiveList<T>::Iterator begin() const
{
return hashmap.begin();
}
typename IntrusiveList<T>::Iterator end() const
{
return hashmap.end();
}
IntrusiveHashMap &get_thread_unsafe()
{
return *this;
}
const IntrusiveHashMap &get_thread_unsafe() const
{
return *this;
}
private:
IntrusiveHashMapHolder<T> hashmap;
ObjectPool<T> pool;
};
template <typename T>
using IntrusiveHashMapWrapper = IntrusiveHashMap<IntrusivePODWrapper<T>>;
template <typename T>
class ThreadSafeIntrusiveHashMap
{
public:
T *find(Hash hash) const
{
lock.lock_read();
T *t = hashmap.find(hash);
lock.unlock_read();
// We can race with the intrusive list internal pointers,
// but that's an internal detail which should never be touched outside the hashmap.
return t;
}
template <typename P>
bool find_and_consume_pod(Hash hash, P &p) const
{
lock.lock_read();
bool ret = hashmap.find_and_consume_pod(hash, p);
lock.unlock_read();
return ret;
}
void clear()
{
lock.lock_write();
hashmap.clear();
lock.unlock_write();
}
// Assumption is that readers will not be erased while in use by any other thread.
void erase(T *value)
{
lock.lock_write();
hashmap.erase(value);
lock.unlock_write();
}
void erase(Hash hash)
{
lock.lock_write();
hashmap.erase(hash);
lock.unlock_write();
}
template <typename... P>
T *allocate(P&&... p)
{
lock.lock_write();
T *t = hashmap.allocate(std::forward<P>(p)...);
lock.unlock_write();
return t;
}
void free(T *value)
{
lock.lock_write();
hashmap.free(value);
lock.unlock_write();
}
T *insert_replace(Hash hash, T *value)
{
lock.lock_write();
value = hashmap.insert_replace(hash, value);
lock.unlock_write();
return value;
}
T *insert_yield(Hash hash, T *value)
{
lock.lock_write();
value = hashmap.insert_yield(hash, value);
lock.unlock_write();
return value;
}
// This one is very sketchy, since callers need to make sure there are no readers of this hash.
template <typename... P>
T *emplace_replace(Hash hash, P&&... p)
{
lock.lock_write();
T *t = hashmap.emplace_replace(hash, std::forward<P>(p)...);
lock.unlock_write();
return t;
}
template <typename... P>
T *emplace_yield(Hash hash, P&&... p)
{
lock.lock_write();
T *t = hashmap.emplace_yield(hash, std::forward<P>(p)...);
lock.unlock_write();
return t;
}
// Not supposed to be called in racy conditions,
// we could have a global read lock and unlock while iterating if necessary.
typename IntrusiveList<T>::Iterator begin()
{
return hashmap.begin();
}
typename IntrusiveList<T>::Iterator end()
{
return hashmap.end();
}
IntrusiveHashMap<T> &get_thread_unsafe()
{
return hashmap;
}
const IntrusiveHashMap<T> &get_thread_unsafe() const
{
return hashmap;
}
private:
IntrusiveHashMap<T> hashmap;
mutable RWSpinLock lock;
};
// A special purpose hashmap which is split into a read-only, immutable portion and a plain thread-safe one.
// User can move read-write thread-safe portion to read-only portion when user knows it's safe to do so.
template <typename T>
class ThreadSafeIntrusiveHashMapReadCached
{
public:
~ThreadSafeIntrusiveHashMapReadCached()
{
clear();
}
T *find(Hash hash) const
{
T *t = read_only.find(hash);
if (t)
return t;
lock.lock_read();
t = read_write.find(hash);
lock.unlock_read();
return t;
}
void move_to_read_only()
{
auto &list = read_write.inner_list();
auto itr = list.begin();
while (itr != list.end())
{
auto *to_move = itr.get();
read_write.erase(to_move);
T *to_delete = read_only.insert_yield(to_move);
if (to_delete)
object_pool.free(to_delete);
itr = list.begin();
}
}
template <typename P>
bool find_and_consume_pod(Hash hash, P &p) const
{
if (read_only.find_and_consume_pod(hash, p))
return true;
lock.lock_read();
bool ret = read_write.find_and_consume_pod(hash, p);
lock.unlock_read();
return ret;
}
void clear()
{
lock.lock_write();
clear_list(read_only.inner_list());
clear_list(read_write.inner_list());
read_only.clear();
read_write.clear();
lock.unlock_write();
}
template <typename... P>
T *allocate(P&&... p)
{
lock.lock_write();
T *t = object_pool.allocate(std::forward<P>(p)...);
lock.unlock_write();
return t;
}
void free(T *ptr)
{
lock.lock_write();
object_pool.free(ptr);
lock.unlock_write();
}
T *insert_yield(Hash hash, T *value)
{
static_cast<IntrusiveHashMapEnabled<T> *>(value)->set_hash(hash);
lock.lock_write();
T *to_delete = read_write.insert_yield(value);
if (to_delete)
object_pool.free(to_delete);
lock.unlock_write();
return value;
}
template <typename... P>
T *emplace_yield(Hash hash, P&&... p)
{
T *t = allocate(std::forward<P>(p)...);
return insert_yield(hash, t);
}
IntrusiveHashMapHolder<T> &get_read_only()
{
return read_only;
}
IntrusiveHashMapHolder<T> &get_read_write()
{
return read_write;
}
private:
IntrusiveHashMapHolder<T> read_only;
IntrusiveHashMapHolder<T> read_write;
ObjectPool<T> object_pool;
mutable RWSpinLock lock;
void clear_list(IntrusiveList<T> &list)
{
auto itr = list.begin();
while (itr != list.end())
{
auto *to_free = itr.get();
itr = list.erase(itr);
object_pool.free(to_free);
}
}
};
}

View File

@@ -0,0 +1,197 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
namespace Util
{
template <typename T>
struct IntrusiveListEnabled
{
IntrusiveListEnabled<T> *prev = nullptr;
IntrusiveListEnabled<T> *next = nullptr;
};
template <typename T>
class IntrusiveList
{
public:
void clear()
{
head = nullptr;
tail = nullptr;
}
class Iterator
{
public:
friend class IntrusiveList<T>;
Iterator(IntrusiveListEnabled<T> *node_)
: node(node_)
{
}
Iterator() = default;
explicit operator bool() const
{
return node != nullptr;
}
bool operator==(const Iterator &other) const
{
return node == other.node;
}
bool operator!=(const Iterator &other) const
{
return node != other.node;
}
T &operator*()
{
return *static_cast<T *>(node);
}
const T &operator*() const
{
return *static_cast<T *>(node);
}
T *get()
{
return static_cast<T *>(node);
}
const T *get() const
{
return static_cast<const T *>(node);
}
T *operator->()
{
return static_cast<T *>(node);
}
const T *operator->() const
{
return static_cast<T *>(node);
}
Iterator &operator++()
{
node = node->next;
return *this;
}
Iterator &operator--()
{
node = node->prev;
return *this;
}
private:
IntrusiveListEnabled<T> *node = nullptr;
};
Iterator begin() const
{
return Iterator(head);
}
Iterator rbegin() const
{
return Iterator(tail);
}
Iterator end() const
{
return Iterator();
}
Iterator erase(Iterator itr)
{
auto *node = itr.get();
auto *next = node->next;
auto *prev = node->prev;
if (prev)
prev->next = next;
else
head = next;
if (next)
next->prev = prev;
else
tail = prev;
return next;
}
void insert_front(Iterator itr)
{
auto *node = itr.get();
if (head)
head->prev = node;
else
tail = node;
node->next = head;
node->prev = nullptr;
head = node;
}
void insert_back(Iterator itr)
{
auto *node = itr.get();
if (tail)
tail->next = node;
else
head = node;
node->prev = tail;
node->next = nullptr;
tail = node;
}
void move_to_front(IntrusiveList<T> &other, Iterator itr)
{
other.erase(itr);
insert_front(itr);
}
void move_to_back(IntrusiveList<T> &other, Iterator itr)
{
other.erase(itr);
insert_back(itr);
}
bool empty() const
{
return head == nullptr;
}
private:
IntrusiveListEnabled<T> *head = nullptr;
IntrusiveListEnabled<T> *tail = nullptr;
};
}

View File

@@ -0,0 +1,72 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "logging.hpp"
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
namespace Util
{
static thread_local LoggingInterface *logging_iface;
bool interface_log(const char *tag, const char *fmt, ...)
{
if (!logging_iface)
return false;
va_list va;
va_start(va, fmt);
bool ret = logging_iface->log(tag, fmt, va);
va_end(va);
return ret;
}
void set_thread_logging_interface(LoggingInterface *iface)
{
logging_iface = iface;
}
#ifdef _WIN32
void debug_output_log(const char *tag, const char *fmt, ...)
{
if (!IsDebuggerPresent())
return;
va_list va;
va_start(va, fmt);
auto len = vsnprintf(nullptr, 0, fmt, va);
if (len > 0)
{
size_t tag_len = strlen(tag);
char *buf = new char[len + tag_len + 1];
memcpy(buf, tag, tag_len);
vsnprintf(buf + tag_len, len + 1, fmt, va);
OutputDebugStringA(buf);
delete[] buf;
}
va_end(va);
}
#endif
}

View File

@@ -0,0 +1,96 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
namespace Util
{
class LoggingInterface
{
public:
virtual ~LoggingInterface() = default;
virtual bool log(const char *tag, const char *fmt, va_list va) = 0;
};
bool interface_log(const char *tag, const char *fmt, ...);
void set_thread_logging_interface(LoggingInterface *iface);
}
#if defined(_WIN32)
namespace Util
{
void debug_output_log(const char *tag, const char *fmt, ...);
}
#define LOGE_FALLBACK(...) do { \
fprintf(stderr, "[ERROR]: " __VA_ARGS__); \
fflush(stderr); \
::Util::debug_output_log("[ERROR]: ", __VA_ARGS__); \
} while(false)
#define LOGW_FALLBACK(...) do { \
fprintf(stderr, "[WARN]: " __VA_ARGS__); \
fflush(stderr); \
::Util::debug_output_log("[WARN]: ", __VA_ARGS__); \
} while(false)
#define LOGI_FALLBACK(...) do { \
fprintf(stderr, "[INFO]: " __VA_ARGS__); \
fflush(stderr); \
::Util::debug_output_log("[INFO]: ", __VA_ARGS__); \
} while(false)
#elif defined(ANDROID)
#include <android/log.h>
#define LOGE_FALLBACK(...) do { __android_log_print(ANDROID_LOG_ERROR, "Granite", __VA_ARGS__); } while(0)
#define LOGW_FALLBACK(...) do { __android_log_print(ANDROID_LOG_WARN, "Granite", __VA_ARGS__); } while(0)
#define LOGI_FALLBACK(...) do { __android_log_print(ANDROID_LOG_INFO, "Granite", __VA_ARGS__); } while(0)
#else
#define LOGE_FALLBACK(...) \
do \
{ \
fprintf(stderr, "[ERROR]: " __VA_ARGS__); \
fflush(stderr); \
} while (false)
#define LOGW_FALLBACK(...) \
do \
{ \
fprintf(stderr, "[WARN]: " __VA_ARGS__); \
fflush(stderr); \
} while (false)
#define LOGI_FALLBACK(...) \
do \
{ \
fprintf(stderr, "[INFO]: " __VA_ARGS__); \
fflush(stderr); \
} while (false)
#endif
#define LOGE(...) do { if (!::Util::interface_log("[ERROR]: ", __VA_ARGS__)) { LOGE_FALLBACK(__VA_ARGS__); }} while(0)
#define LOGW(...) do { if (!::Util::interface_log("[WARN]: ", __VA_ARGS__)) { LOGW_FALLBACK(__VA_ARGS__); }} while(0)
#define LOGI(...) do { if (!::Util::interface_log("[INFO]: ", __VA_ARGS__)) { LOGI_FALLBACK(__VA_ARGS__); }} while(0)

View File

@@ -0,0 +1,132 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <memory>
#include <mutex>
#include <vector>
#include <algorithm>
#include <stdlib.h>
#include "aligned_alloc.hpp"
//#define OBJECT_POOL_DEBUG
namespace Util
{
template<typename T>
class ObjectPool
{
public:
template<typename... P>
T *allocate(P &&... p)
{
#ifndef OBJECT_POOL_DEBUG
if (vacants.empty())
{
unsigned num_objects = 64u << memory.size();
T *ptr = static_cast<T *>(memalign_alloc(std::max<size_t>(64, alignof(T)),
num_objects * sizeof(T)));
if (!ptr)
return nullptr;
for (unsigned i = 0; i < num_objects; i++)
vacants.push_back(&ptr[i]);
memory.emplace_back(ptr);
}
T *ptr = vacants.back();
vacants.pop_back();
new(ptr) T(std::forward<P>(p)...);
return ptr;
#else
return new T(std::forward<P>(p)...);
#endif
}
void free(T *ptr)
{
#ifndef OBJECT_POOL_DEBUG
ptr->~T();
vacants.push_back(ptr);
#else
delete ptr;
#endif
}
void clear()
{
#ifndef OBJECT_POOL_DEBUG
vacants.clear();
memory.clear();
#endif
}
protected:
#ifndef OBJECT_POOL_DEBUG
std::vector<T *> vacants;
struct MallocDeleter
{
void operator()(T *ptr)
{
memalign_free(ptr);
}
};
std::vector<std::unique_ptr<T, MallocDeleter>> memory;
#endif
};
template<typename T>
class ThreadSafeObjectPool : private ObjectPool<T>
{
public:
template<typename... P>
T *allocate(P &&... p)
{
std::lock_guard<std::mutex> holder{lock};
return ObjectPool<T>::allocate(std::forward<P>(p)...);
}
void free(T *ptr)
{
#ifndef OBJECT_POOL_DEBUG
ptr->~T();
std::lock_guard<std::mutex> holder{lock};
this->vacants.push_back(ptr);
#else
delete ptr;
#endif
}
void clear()
{
std::lock_guard<std::mutex> holder{lock};
ObjectPool<T>::clear();
}
private:
std::mutex lock;
};
}

View File

@@ -0,0 +1,149 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <atomic>
#ifdef __SSE2__
#include <emmintrin.h>
#endif
namespace Util
{
class RWSpinLock
{
public:
enum { Reader = 2, Writer = 1 };
RWSpinLock()
{
counter.store(0);
}
inline void lock_read()
{
unsigned v = counter.fetch_add(Reader, std::memory_order_acquire);
while ((v & Writer) != 0)
{
#ifdef __SSE2__
_mm_pause();
#endif
v = counter.load(std::memory_order_acquire);
}
}
inline bool try_lock_read()
{
unsigned v = counter.fetch_add(Reader, std::memory_order_acquire);
if ((v & Writer) != 0)
{
unlock_read();
return false;
}
return true;
}
inline void unlock_read()
{
counter.fetch_sub(Reader, std::memory_order_release);
}
inline void lock_write()
{
uint32_t expected = 0;
while (!counter.compare_exchange_weak(expected, Writer,
std::memory_order_acquire,
std::memory_order_relaxed))
{
#ifdef __SSE2__
_mm_pause();
#endif
expected = 0;
}
}
inline bool try_lock_write()
{
uint32_t expected = 0;
return counter.compare_exchange_strong(expected, Writer,
std::memory_order_acquire,
std::memory_order_relaxed);
}
inline void unlock_write()
{
counter.fetch_and(~Writer, std::memory_order_release);
}
inline void promote_reader_to_writer()
{
uint32_t expected = Reader;
if (!counter.compare_exchange_strong(expected, Writer,
std::memory_order_acquire,
std::memory_order_relaxed))
{
unlock_read();
lock_write();
}
}
private:
std::atomic_uint32_t counter;
};
class RWSpinLockReadHolder
{
public:
explicit RWSpinLockReadHolder(RWSpinLock &lock_)
: lock(lock_)
{
lock.lock_read();
}
~RWSpinLockReadHolder()
{
lock.unlock_read();
}
private:
RWSpinLock &lock;
};
class RWSpinLockWriteHolder
{
public:
explicit RWSpinLockWriteHolder(RWSpinLock &lock_)
: lock(lock_)
{
lock.lock_write();
}
~RWSpinLockWriteHolder()
{
lock.unlock_write();
}
private:
RWSpinLock &lock;
};
}

View File

@@ -0,0 +1,456 @@
/* Copyright (c) 2019-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <stddef.h>
#include <stdlib.h>
#include <utility>
#include <exception>
#include <algorithm>
#include <initializer_list>
namespace Util
{
// std::aligned_storage does not support size == 0, so roll our own.
template <typename T, size_t N>
class AlignedBuffer
{
public:
T *data()
{
return reinterpret_cast<T *>(aligned_char);
}
private:
alignas(T) char aligned_char[sizeof(T) * N];
};
template <typename T>
class AlignedBuffer<T, 0>
{
public:
T *data()
{
return nullptr;
}
};
// An immutable version of SmallVector which erases type information about storage.
template <typename T>
class VectorView
{
public:
T &operator[](size_t i)
{
return ptr[i];
}
const T &operator[](size_t i) const
{
return ptr[i];
}
bool empty() const
{
return buffer_size == 0;
}
size_t size() const
{
return buffer_size;
}
T *data()
{
return ptr;
}
const T *data() const
{
return ptr;
}
T *begin()
{
return ptr;
}
T *end()
{
return ptr + buffer_size;
}
const T *begin() const
{
return ptr;
}
const T *end() const
{
return ptr + buffer_size;
}
T &front()
{
return ptr[0];
}
const T &front() const
{
return ptr[0];
}
T &back()
{
return ptr[buffer_size - 1];
}
const T &back() const
{
return ptr[buffer_size - 1];
}
// Avoid sliced copies. Base class should only be read as a reference.
VectorView(const VectorView &) = delete;
void operator=(const VectorView &) = delete;
protected:
VectorView() = default;
T *ptr = nullptr;
size_t buffer_size = 0;
};
// Simple vector which supports up to N elements inline, without malloc/free.
// We use a lot of throwaway vectors all over the place which triggers allocations.
// This class only implements the subset of std::vector we need in SPIRV-Cross.
// It is *NOT* a drop-in replacement in general projects.
template <typename T, size_t N = 8>
class SmallVector : public VectorView<T>
{
public:
SmallVector()
{
this->ptr = stack_storage.data();
buffer_capacity = N;
}
SmallVector(const T *arg_list_begin, const T *arg_list_end)
: SmallVector()
{
auto count = size_t(arg_list_end - arg_list_begin);
reserve(count);
for (size_t i = 0; i < count; i++, arg_list_begin++)
new (&this->ptr[i]) T(*arg_list_begin);
this->buffer_size = count;
}
SmallVector(SmallVector &&other) noexcept : SmallVector()
{
*this = std::move(other);
}
SmallVector(const std::initializer_list<T> &init_list) : SmallVector()
{
insert(this->end(), init_list.begin(), init_list.end());
}
SmallVector &operator=(SmallVector &&other) noexcept
{
clear();
if (other.ptr != other.stack_storage.data())
{
// Pilfer allocated pointer.
if (this->ptr != stack_storage.data())
free(this->ptr);
this->ptr = other.ptr;
this->buffer_size = other.buffer_size;
buffer_capacity = other.buffer_capacity;
other.ptr = nullptr;
other.buffer_size = 0;
other.buffer_capacity = 0;
}
else
{
// Need to move the stack contents individually.
reserve(other.buffer_size);
for (size_t i = 0; i < other.buffer_size; i++)
{
new (&this->ptr[i]) T(std::move(other.ptr[i]));
other.ptr[i].~T();
}
this->buffer_size = other.buffer_size;
other.buffer_size = 0;
}
return *this;
}
SmallVector(const SmallVector &other)
: SmallVector()
{
*this = other;
}
SmallVector &operator=(const SmallVector &other)
{
clear();
reserve(other.buffer_size);
for (size_t i = 0; i < other.buffer_size; i++)
new (&this->ptr[i]) T(other.ptr[i]);
this->buffer_size = other.buffer_size;
return *this;
}
explicit SmallVector(size_t count)
: SmallVector()
{
resize(count);
}
~SmallVector()
{
clear();
if (this->ptr != stack_storage.data())
free(this->ptr);
}
void clear()
{
for (size_t i = 0; i < this->buffer_size; i++)
this->ptr[i].~T();
this->buffer_size = 0;
}
void push_back(const T &t)
{
reserve(this->buffer_size + 1);
new (&this->ptr[this->buffer_size]) T(t);
this->buffer_size++;
}
void push_back(T &&t)
{
reserve(this->buffer_size + 1);
new (&this->ptr[this->buffer_size]) T(std::move(t));
this->buffer_size++;
}
void pop_back()
{
// Work around false positive warning on GCC 8.3.
// Calling pop_back on empty vector is undefined.
if (!this->empty())
resize(this->buffer_size - 1);
}
template <typename... Ts>
void emplace_back(Ts &&... ts)
{
reserve(this->buffer_size + 1);
new (&this->ptr[this->buffer_size]) T(std::forward<Ts>(ts)...);
this->buffer_size++;
}
void reserve(size_t count)
{
if (count > buffer_capacity)
{
size_t target_capacity = buffer_capacity;
if (target_capacity == 0)
target_capacity = 1;
if (target_capacity < N)
target_capacity = N;
while (target_capacity < count)
target_capacity <<= 1u;
T *new_buffer =
target_capacity > N ? static_cast<T *>(malloc(target_capacity * sizeof(T))) : stack_storage.data();
if (!new_buffer)
std::terminate();
// In case for some reason two allocations both come from same stack.
if (new_buffer != this->ptr)
{
// We don't deal with types which can throw in move constructor.
for (size_t i = 0; i < this->buffer_size; i++)
{
new (&new_buffer[i]) T(std::move(this->ptr[i]));
this->ptr[i].~T();
}
}
if (this->ptr != stack_storage.data())
free(this->ptr);
this->ptr = new_buffer;
buffer_capacity = target_capacity;
}
}
void insert(T *itr, const T *insert_begin, const T *insert_end)
{
auto count = size_t(insert_end - insert_begin);
if (itr == this->end())
{
reserve(this->buffer_size + count);
for (size_t i = 0; i < count; i++, insert_begin++)
new (&this->ptr[this->buffer_size + i]) T(*insert_begin);
this->buffer_size += count;
}
else
{
if (this->buffer_size + count > buffer_capacity)
{
auto target_capacity = this->buffer_size + count;
if (target_capacity == 0)
target_capacity = 1;
if (target_capacity < N)
target_capacity = N;
while (target_capacity < count)
target_capacity <<= 1u;
// Need to allocate new buffer. Move everything to a new buffer.
T *new_buffer =
target_capacity > N ? static_cast<T *>(malloc(target_capacity * sizeof(T))) : stack_storage.data();
if (!new_buffer)
std::terminate();
// First, move elements from source buffer to new buffer.
// We don't deal with types which can throw in move constructor.
auto *target_itr = new_buffer;
auto *original_source_itr = this->begin();
if (new_buffer != this->ptr)
{
while (original_source_itr != itr)
{
new (target_itr) T(std::move(*original_source_itr));
original_source_itr->~T();
++original_source_itr;
++target_itr;
}
}
// Copy-construct new elements.
for (auto *source_itr = insert_begin; source_itr != insert_end; ++source_itr, ++target_itr)
new (target_itr) T(*source_itr);
// Move over the other half.
if (new_buffer != this->ptr || insert_begin != insert_end)
{
while (original_source_itr != this->end())
{
new (target_itr) T(std::move(*original_source_itr));
original_source_itr->~T();
++original_source_itr;
++target_itr;
}
}
if (this->ptr != stack_storage.data())
free(this->ptr);
this->ptr = new_buffer;
buffer_capacity = target_capacity;
}
else
{
// Move in place, need to be a bit careful about which elements are constructed and which are not.
// Move the end and construct the new elements.
auto *target_itr = this->end() + count;
auto *source_itr = this->end();
while (target_itr != this->end() && source_itr != itr)
{
--target_itr;
--source_itr;
new (target_itr) T(std::move(*source_itr));
}
// For already constructed elements we can move-assign.
std::move_backward(itr, source_itr, target_itr);
// For the inserts which go to already constructed elements, we can do a plain copy.
while (itr != this->end() && insert_begin != insert_end)
*itr++ = *insert_begin++;
// For inserts into newly allocated memory, we must copy-construct instead.
while (insert_begin != insert_end)
{
new (itr) T(*insert_begin);
++itr;
++insert_begin;
}
}
this->buffer_size += count;
}
}
void insert(T *itr, const T &value)
{
insert(itr, &value, &value + 1);
}
T *erase(T *itr)
{
std::move(itr + 1, this->end(), itr);
this->ptr[--this->buffer_size].~T();
return itr;
}
void erase(T *start_erase, T *end_erase)
{
if (end_erase == this->end())
{
resize(size_t(start_erase - this->begin()));
}
else
{
auto new_size = this->buffer_size - (end_erase - start_erase);
std::move(end_erase, this->end(), start_erase);
resize(new_size);
}
}
void resize(size_t new_size)
{
if (new_size < this->buffer_size)
{
for (size_t i = new_size; i < this->buffer_size; i++)
this->ptr[i].~T();
}
else if (new_size > this->buffer_size)
{
reserve(new_size);
for (size_t i = this->buffer_size; i < new_size; i++)
new (&this->ptr[i]) T();
}
this->buffer_size = new_size;
}
private:
size_t buffer_capacity = 0;
AlignedBuffer<T, N> stack_storage;
};
}

View File

@@ -0,0 +1,62 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <algorithm>
namespace Util
{
template <typename T, size_t N>
class StackAllocator
{
public:
T *allocate(size_t count)
{
if (count == 0)
return nullptr;
if (offset + count > N)
return nullptr;
T *ret = buffer + offset;
offset += count;
return ret;
}
T *allocate_cleared(size_t count)
{
T *ret = allocate(count);
if (ret)
std::fill(ret, ret + count, T());
return ret;
}
void reset()
{
offset = 0;
}
private:
T buffer[N];
size_t offset = 0;
};
}

View File

@@ -0,0 +1,177 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include "hash.hpp"
#include "object_pool.hpp"
#include "intrusive_list.hpp"
#include "intrusive_hash_map.hpp"
#include <vector>
namespace Util
{
template <typename T>
class TemporaryHashmapEnabled
{
public:
void set_hash(Hash hash_)
{
hash = hash_;
}
void set_index(unsigned index_)
{
index = index_;
}
Hash get_hash()
{
return hash;
}
unsigned get_index() const
{
return index;
}
private:
Hash hash = 0;
unsigned index = 0;
};
template <typename T, unsigned RingSize = 4, bool ReuseObjects = false>
class TemporaryHashmap
{
public:
~TemporaryHashmap()
{
clear();
}
void clear()
{
for (auto &ring : rings)
{
while (!ring.empty())
{
auto itr = ring.begin();
ring.erase(itr);
auto &node = *itr;
object_pool.free(static_cast<T *>(&node));
}
}
hashmap.clear();
for (auto &vacant : vacants)
object_pool.free(static_cast<T *>(&*vacant));
vacants.clear();
object_pool.clear();
}
void begin_frame()
{
index = (index + 1) & (RingSize - 1);
auto &ring = rings[index];
while (!ring.empty())
{
auto itr = ring.begin();
ring.erase(itr);
auto &node = *itr;
hashmap.erase(node.get_hash());
free_object(&node, ReuseTag<ReuseObjects>());
}
}
T *request(Hash hash)
{
auto *v = hashmap.find(hash);
if (v)
{
auto node = v->get();
if (node->get_index() != index)
{
rings[index].move_to_front(rings[node->get_index()], node);
node->set_index(index);
}
return &*node;
}
else
return nullptr;
}
template <typename... P>
void make_vacant(P &&... p)
{
vacants.push_back(object_pool.allocate(std::forward<P>(p)...));
}
T *request_vacant(Hash hash)
{
if (vacants.empty())
return nullptr;
auto top = vacants.back();
vacants.pop_back();
top->set_index(index);
top->set_hash(hash);
hashmap.emplace_replace(hash, top);
rings[index].insert_front(top);
return &*top;
}
template <typename... P>
T *emplace(Hash hash, P &&... p)
{
auto *node = object_pool.allocate(std::forward<P>(p)...);
node->set_index(index);
node->set_hash(hash);
hashmap.emplace_replace(hash, node);
rings[index].insert_front(node);
return node;
}
private:
IntrusiveList<T> rings[RingSize];
ObjectPool<T> object_pool;
unsigned index = 0;
IntrusiveHashMap<IntrusivePODWrapper<typename IntrusiveList<T>::Iterator>> hashmap;
std::vector<typename IntrusiveList<T>::Iterator> vacants;
template <bool reuse>
struct ReuseTag
{
};
void free_object(T *object, const ReuseTag<false> &)
{
object_pool.free(object);
}
void free_object(T *object, const ReuseTag<true> &)
{
vacants.push_back(object);
}
};
}

View File

@@ -0,0 +1,45 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "thread_id.hpp"
#include "logging.hpp"
namespace Util
{
static thread_local unsigned thread_id_to_index = ~0u;
unsigned get_current_thread_index()
{
auto ret = thread_id_to_index;
if (ret == ~0u)
{
LOGE("Thread does not exist in thread manager or is not the main thread.\n");
return 0;
}
return ret;
}
void register_thread_index(unsigned index)
{
thread_id_to_index = index;
}
}

View File

@@ -0,0 +1,29 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
namespace Util
{
unsigned get_current_thread_index();
void register_thread_index(unsigned thread_index);
}

View File

@@ -0,0 +1,59 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "thread_name.hpp"
#if !defined(_WIN32)
#include <pthread.h>
#else
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <string>
#endif
namespace Util
{
void set_current_thread_name(const char *name)
{
#if defined(__linux__)
pthread_setname_np(pthread_self(), name);
#elif defined(__APPLE__)
pthread_setname_np(name);
#elif defined(_WIN32)
using PFN_SetThreadDescription = HRESULT (WINAPI *)(HANDLE, PCWSTR);
auto module = GetModuleHandleA("kernel32.dll");
PFN_SetThreadDescription SetThreadDescription = module ? reinterpret_cast<PFN_SetThreadDescription>(
(void *)GetProcAddress(module, "SetThreadDescription")) : nullptr;
if (SetThreadDescription)
{
std::wstring wname;
while (*name != '\0')
{
wname.push_back(*name);
name++;
}
SetThreadDescription(GetCurrentThread(), wname.c_str());
}
#endif
}
}

View File

@@ -0,0 +1,28 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
namespace Util
{
void set_current_thread_name(const char *name);
}

View File

@@ -0,0 +1,185 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "logging.hpp"
#include "timeline_trace_file.hpp"
#include "thread_name.hpp"
#include "timer.hpp"
#include <string.h>
#include <stdio.h>
namespace Util
{
static thread_local char trace_tid[32];
static thread_local TimelineTraceFile *trace_file;
void TimelineTraceFile::set_tid(const char *tid)
{
snprintf(trace_tid, sizeof(trace_tid), "%s", tid);
}
void TimelineTraceFile::set_per_thread(TimelineTraceFile *file)
{
trace_file = file;
}
TimelineTraceFile *TimelineTraceFile::get_per_thread()
{
return trace_file;
}
void TimelineTraceFile::Event::set_desc(const char *desc_)
{
snprintf(desc, sizeof(desc), "%s", desc_);
}
void TimelineTraceFile::Event::set_tid(const char *tid_)
{
snprintf(tid, sizeof(tid), "%s", tid_);
}
TimelineTraceFile::Event *TimelineTraceFile::begin_event(const char *desc, uint32_t pid)
{
auto *e = event_pool.allocate();
e->pid = pid;
e->set_tid(trace_tid);
e->set_desc(desc);
e->start_ns = get_current_time_nsecs();
return e;
}
TimelineTraceFile::Event *TimelineTraceFile::allocate_event()
{
auto *e = event_pool.allocate();
e->desc[0] = '\0';
e->tid[0] = '\0';
e->pid = 0;
e->start_ns = 0;
e->end_ns = 0;
return e;
}
void TimelineTraceFile::submit_event(Event *e)
{
std::lock_guard<std::mutex> holder{lock};
queued_events.push(e);
cond.notify_one();
}
void TimelineTraceFile::end_event(Event *e)
{
e->end_ns = get_current_time_nsecs();
submit_event(e);
}
TimelineTraceFile::TimelineTraceFile(const std::string &path)
{
thr = std::thread(&TimelineTraceFile::looper, this, path);
}
void TimelineTraceFile::looper(std::string path)
{
set_current_thread_name("json-trace-io");
FILE *file = fopen(path.c_str(), "w");
if (!file)
LOGE("Failed to open file: %s.\n", path.c_str());
if (file)
fputs("[\n", file);
uint64_t base_ts = get_current_time_nsecs();
for (;;)
{
Event *e;
{
std::unique_lock<std::mutex> holder{lock};
cond.wait(holder, [this]() {
return !queued_events.empty();
});
e = queued_events.front();
queued_events.pop();
}
if (!e)
break;
auto start_us = int64_t(e->start_ns - base_ts) * 1e-3;
auto end_us = int64_t(e->end_ns - base_ts) * 1e-3;
if (file && start_us <= end_us)
{
fprintf(file, "{ \"name\": \"%s\", \"ph\": \"B\", \"tid\": \"%s\", \"pid\": \"%u\", \"ts\": %f },\n",
e->desc, e->tid, e->pid, start_us);
fprintf(file, "{ \"name\": \"%s\", \"ph\": \"E\", \"tid\": \"%s\", \"pid\": \"%u\", \"ts\": %f },\n",
e->desc, e->tid, e->pid, end_us);
}
event_pool.free(e);
}
// Intentionally truncate the JSON so that we can emit "," after the last element.
if (file)
fclose(file);
}
TimelineTraceFile::~TimelineTraceFile()
{
submit_event(nullptr);
if (thr.joinable())
thr.join();
}
TimelineTraceFile::ScopedEvent::ScopedEvent(TimelineTraceFile *file_, const char *tag, uint32_t pid)
: file(file_)
{
if (file && tag && *tag != '\0')
event = file->begin_event(tag, pid);
}
TimelineTraceFile::ScopedEvent::~ScopedEvent()
{
if (event)
file->end_event(event);
}
TimelineTraceFile::ScopedEvent &
TimelineTraceFile::ScopedEvent::operator=(TimelineTraceFile::ScopedEvent &&other) noexcept
{
if (this != &other)
{
if (event)
file->end_event(event);
event = other.event;
file = other.file;
other.event = nullptr;
other.file = nullptr;
}
return *this;
}
TimelineTraceFile::ScopedEvent::ScopedEvent(TimelineTraceFile::ScopedEvent &&other) noexcept
{
*this = std::move(other);
}
}

View File

@@ -0,0 +1,96 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <string>
#include <thread>
#include <condition_variable>
#include <mutex>
#include <memory>
#include <queue>
#include "object_pool.hpp"
namespace Util
{
class TimelineTraceFile
{
public:
explicit TimelineTraceFile(const std::string &path);
~TimelineTraceFile();
static void set_tid(const char *tid);
static TimelineTraceFile *get_per_thread();
static void set_per_thread(TimelineTraceFile *file);
struct Event
{
char desc[256];
char tid[32];
uint32_t pid;
uint64_t start_ns, end_ns;
void set_desc(const char *desc);
void set_tid(const char *tid);
};
Event *begin_event(const char *desc, uint32_t pid = 0);
void end_event(Event *e);
Event *allocate_event();
void submit_event(Event *e);
struct ScopedEvent
{
ScopedEvent(TimelineTraceFile *file, const char *tag, uint32_t pid = 0);
ScopedEvent() = default;
~ScopedEvent();
void operator=(const ScopedEvent &) = delete;
ScopedEvent(const ScopedEvent &) = delete;
ScopedEvent(ScopedEvent &&other) noexcept;
ScopedEvent &operator=(ScopedEvent &&other) noexcept;
TimelineTraceFile *file = nullptr;
Event *event = nullptr;
};
private:
void looper(std::string path);
std::thread thr;
std::mutex lock;
std::condition_variable cond;
ThreadSafeObjectPool<Event> event_pool;
std::queue<Event *> queued_events;
};
#ifndef GRANITE_SHIPPING
#define GRANITE_MACRO_CONCAT_IMPL(a, b) a##b
#define GRANITE_MACRO_CONCAT(a, b) GRANITE_MACRO_CONCAT_IMPL(a, b)
#define GRANITE_SCOPED_TIMELINE_EVENT(str) \
::Util::TimelineTraceFile::ScopedEvent GRANITE_MACRO_CONCAT(_timeline_scoped_count_, __COUNTER__){GRANITE_THREAD_GROUP() ? GRANITE_THREAD_GROUP()->get_timeline_trace_file() : nullptr, str}
#define GRANITE_SCOPED_TIMELINE_EVENT_FILE(file, str) \
::Util::TimelineTraceFile::ScopedEvent GRANITE_MACRO_CONCAT(_timeline_scoped_count_, __COUNTER__){file, str}
#else
#define GRANITE_SCOPED_TIMELINE_EVENT(...) ((void)0)
#define GRANITE_SCOPED_TIMELINE_EVENT_FILE(...) ((void)0)
#endif
}

View File

@@ -0,0 +1,131 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "timer.hpp"
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#include <time.h>
#endif
namespace Util
{
FrameTimer::FrameTimer()
{
reset();
}
void FrameTimer::reset()
{
start = get_time();
last = start;
last_period = 0;
}
void FrameTimer::enter_idle()
{
idle_start = get_time();
}
void FrameTimer::leave_idle()
{
auto idle_end = get_time();
idle_time += idle_end - idle_start;
}
double FrameTimer::get_frame_time() const
{
return double(last_period) * 1e-9;
}
double FrameTimer::frame()
{
auto new_time = get_time() - idle_time;
last_period = new_time - last;
last = new_time;
return double(last_period) * 1e-9;
}
double FrameTimer::frame(double frame_time)
{
last_period = int64_t(frame_time * 1e9);
last += last_period;
return frame_time;
}
double FrameTimer::get_elapsed() const
{
return double(last - start) * 1e-9;
}
int64_t FrameTimer::get_time()
{
return get_current_time_nsecs();
}
#ifdef _WIN32
struct QPCFreq
{
QPCFreq()
{
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
inv_freq = 1e9 / double(freq.QuadPart);
}
double inv_freq;
} static static_qpc_freq;
#endif
int64_t get_current_time_nsecs()
{
#ifdef _WIN32
LARGE_INTEGER li;
if (!QueryPerformanceCounter(&li))
return 0;
return int64_t(double(li.QuadPart) * static_qpc_freq.inv_freq);
#else
struct timespec ts = {};
#if defined(ANDROID) || defined(__FreeBSD__)
constexpr auto timebase = CLOCK_MONOTONIC;
#else
constexpr auto timebase = CLOCK_MONOTONIC_RAW;
#endif
if (clock_gettime(timebase, &ts) < 0)
return 0;
return ts.tv_sec * 1000000000ll + ts.tv_nsec;
#endif
}
void Timer::start()
{
t = get_current_time_nsecs();
}
double Timer::end()
{
auto nt = get_current_time_nsecs();
return double(nt - t) * 1e-9;
}
}

View File

@@ -0,0 +1,63 @@
/* Copyright (c) 2017-2023 Hans-Kristian Arntzen
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <stdint.h>
namespace Util
{
class FrameTimer
{
public:
FrameTimer();
void reset();
double frame();
double frame(double frame_time);
double get_elapsed() const;
double get_frame_time() const;
void enter_idle();
void leave_idle();
private:
int64_t start;
int64_t last;
int64_t last_period;
int64_t idle_start;
int64_t idle_time = 0;
int64_t get_time();
};
class Timer
{
public:
void start();
double end();
private:
int64_t t = 0;
};
int64_t get_current_time_nsecs();
}