Blender  V3.3
BLI_linear_allocator.hh
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
11 #pragma once
12 
13 #include "BLI_string_ref.hh"
14 #include "BLI_utility_mixins.hh"
15 #include "BLI_vector.hh"
16 
17 namespace blender {
18 
19 template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopyable, NonMovable {
20  private:
21  BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
22  Vector<void *> owned_buffers_;
23  Vector<Span<char>> unused_borrowed_buffers_;
24 
25  uintptr_t current_begin_;
26  uintptr_t current_end_;
27 
28 #ifdef DEBUG
29  int64_t debug_allocated_amount_ = 0;
30 #endif
31 
32  /* Buffers larger than that are not packed together with smaller allocations to avoid wasting
33  * memory. */
34  constexpr static inline int64_t large_buffer_threshold = 4096;
35 
36  public:
38  {
39  current_begin_ = 0;
40  current_end_ = 0;
41  }
42 
44  {
45  for (void *ptr : owned_buffers_) {
46  allocator_.deallocate(ptr);
47  }
48  }
49 
56  void *allocate(const int64_t size, const int64_t alignment)
57  {
58  BLI_assert(size >= 0);
59  BLI_assert(alignment >= 1);
60  BLI_assert(is_power_of_2_i(alignment));
61 
62  const uintptr_t alignment_mask = alignment - 1;
63  const uintptr_t potential_allocation_begin = (current_begin_ + alignment_mask) &
64  ~alignment_mask;
65  const uintptr_t potential_allocation_end = potential_allocation_begin + size;
66 
67  if (potential_allocation_end <= current_end_) {
68 #ifdef DEBUG
69  debug_allocated_amount_ += size;
70 #endif
71  current_begin_ = potential_allocation_end;
72  return reinterpret_cast<void *>(potential_allocation_begin);
73  }
74  if (size <= large_buffer_threshold) {
75  this->allocate_new_buffer(size + alignment, alignment);
76  return this->allocate(size, alignment);
77  }
78  return this->allocator_large_buffer(size, alignment);
79  };
80 
86  template<typename T> T *allocate()
87  {
88  return static_cast<T *>(this->allocate(sizeof(T), alignof(T)));
89  }
90 
96  template<typename T> MutableSpan<T> allocate_array(int64_t size)
97  {
98  T *array = static_cast<T *>(this->allocate(sizeof(T) * size, alignof(T)));
99  return MutableSpan<T>(array, size);
100  }
101 
110  template<typename T, typename... Args> destruct_ptr<T> construct(Args &&...args)
111  {
112  void *buffer = this->allocate(sizeof(T), alignof(T));
113  T *value = new (buffer) T(std::forward<Args>(args)...);
114  return destruct_ptr<T>(value);
115  }
116 
122  template<typename T, typename... Args>
124  {
125  MutableSpan<T> array = this->allocate_array<T>(size);
126  for (const int64_t i : IndexRange(size)) {
127  new (&array[i]) T(std::forward<Args>(args)...);
128  }
129  return array;
130  }
131 
136  {
137  if (src.is_empty()) {
138  return {};
139  }
140  MutableSpan<T> dst = this->allocate_array<T>(src.size());
141  uninitialized_copy_n(src.data(), src.size(), dst.data());
142  return dst;
143  }
144 
150  {
151  const int64_t alloc_size = str.size() + 1;
152  char *buffer = static_cast<char *>(this->allocate(alloc_size, 1));
153  str.copy(buffer, alloc_size);
154  return StringRefNull(static_cast<const char *>(buffer));
155  }
156 
158  int64_t element_size,
159  int64_t element_alignment)
160  {
161  void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *));
162  void *elements_buffer = this->allocate(element_amount * element_size, element_alignment);
163 
164  MutableSpan<void *> pointers((void **)pointer_buffer, element_amount);
165  void *next_element_buffer = elements_buffer;
166  for (int64_t i : IndexRange(element_amount)) {
167  pointers[i] = next_element_buffer;
168  next_element_buffer = POINTER_OFFSET(next_element_buffer, element_size);
169  }
170 
171  return pointers;
172  }
173 
174  template<typename T, typename... Args>
176  {
178  n, sizeof(T), alignof(T));
179  MutableSpan<T *> pointers = void_pointers.cast<T *>();
180 
181  for (int64_t i : IndexRange(n)) {
182  new (static_cast<void *>(pointers[i])) T(std::forward<Args>(args)...);
183  }
184 
185  return pointers;
186  }
187 
193  {
194  unused_borrowed_buffers_.append(Span<char>(static_cast<char *>(buffer), size));
195  }
196 
197  template<size_t Size, size_t Alignment>
199  {
200  this->provide_buffer(aligned_buffer.ptr(), Size);
201  }
202 
203  private:
204  void allocate_new_buffer(int64_t min_allocation_size, int64_t min_alignment)
205  {
206  for (int64_t i : unused_borrowed_buffers_.index_range()) {
207  Span<char> buffer = unused_borrowed_buffers_[i];
208  if (buffer.size() >= min_allocation_size) {
209  unused_borrowed_buffers_.remove_and_reorder(i);
210  current_begin_ = (uintptr_t)buffer.begin();
211  current_end_ = (uintptr_t)buffer.end();
212  return;
213  }
214  }
215 
216  /* Possibly allocate more bytes than necessary for the current allocation. This way more small
217  * allocations can be packed together. Large buffers are allocated exactly to avoid wasting too
218  * much memory. */
219  int64_t size_in_bytes = min_allocation_size;
220  if (size_in_bytes <= large_buffer_threshold) {
221  /* Gradually grow buffer size with each allocation, up to a maximum. */
222  const int grow_size = 1 << std::min<int>(owned_buffers_.size() + 6, 20);
223  size_in_bytes = std::min(large_buffer_threshold,
224  std::max<int64_t>(size_in_bytes, grow_size));
225  }
226 
227  void *buffer = allocator_.allocate(size_in_bytes, min_alignment, __func__);
228  owned_buffers_.append(buffer);
229  current_begin_ = (uintptr_t)buffer;
230  current_end_ = current_begin_ + size_in_bytes;
231  }
232 
233  void *allocator_large_buffer(const int64_t size, const int64_t alignment)
234  {
235  void *buffer = allocator_.allocate(size, alignment, __func__);
236  owned_buffers_.append(buffer);
237  return buffer;
238  }
239 };
240 
241 } // namespace blender
#define BLI_assert(a)
Definition: BLI_assert.h:46
MINLINE int is_power_of_2_i(int n)
unsigned int uint
Definition: BLI_sys_types.h:67
#define POINTER_OFFSET(v, ofs)
#define BLI_NO_UNIQUE_ADDRESS
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
MutableSpan< T > construct_array_copy(Span< T > src)
MutableSpan< void * > allocate_elements_and_pointer_array(int64_t element_amount, int64_t element_size, int64_t element_alignment)
StringRefNull copy_string(StringRef str)
void provide_buffer(AlignedBuffer< Size, Alignment > &aligned_buffer)
Span< T * > construct_elements_and_pointer_array(int64_t n, Args &&...args)
void provide_buffer(void *buffer, uint size)
MutableSpan< T > allocate_array(int64_t size)
destruct_ptr< T > construct(Args &&...args)
void * allocate(const int64_t size, const int64_t alignment)
MutableSpan< T > construct_array(int64_t size, Args &&...args)
constexpr MutableSpan< NewT > cast() const
Definition: BLI_span.hh:717
constexpr T * data() const
Definition: BLI_span.hh:548
int64_t size() const
Definition: BLI_vector.hh:694
void remove_and_reorder(const int64_t index)
Definition: BLI_vector.hh:743
void append(const T &value)
Definition: BLI_vector.hh:433
IndexRange index_range() const
Definition: BLI_vector.hh:920
SyclQueue void void * src
#define str(s)
ccl_global float * buffer
#define T
std::unique_ptr< T, DestructValueAtAddress< T > > destruct_ptr
void uninitialized_copy_n(const T *src, int64_t n, T *dst)
#define min(a, b)
Definition: sort.c:35
_W64 unsigned int uintptr_t
Definition: stdint.h:119
__int64 int64_t
Definition: stdint.h:89
PointerRNA * ptr
Definition: wm_files.c:3480