Blender  V3.3
guarded_allocator.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: Apache-2.0
2  * Copyright 2011-2022 Blender Foundation */
3 
4 #ifndef __UTIL_GUARDED_ALLOCATOR_H__
5 #define __UTIL_GUARDED_ALLOCATOR_H__
6 
7 #include <cstddef>
8 #include <cstdlib>
9 #include <memory>
10 
11 #ifdef WITH_BLENDER_GUARDEDALLOC
12 # include "../../guardedalloc/MEM_guardedalloc.h"
13 #endif
14 
16 
17 /* Internal use only. */
18 void util_guarded_mem_alloc(size_t n);
19 void util_guarded_mem_free(size_t n);
20 
21 /* Guarded allocator for the use with STL. */
22 template<typename T> class GuardedAllocator {
23  public:
24  typedef size_t size_type;
25  typedef ptrdiff_t difference_type;
26  typedef T *pointer;
27  typedef const T *const_pointer;
28  typedef T &reference;
29  typedef const T &const_reference;
30  typedef T value_type;
31 
33  {
34  }
36  {
37  }
38 
39  T *allocate(size_t n, const void *hint = 0)
40  {
41  (void)hint;
42  size_t size = n * sizeof(T);
44  if (n == 0) {
45  return NULL;
46  }
47  T *mem;
48 #ifdef WITH_BLENDER_GUARDEDALLOC
49  /* C++ standard requires allocation functions to allocate memory suitably
50  * aligned for any standard type. This is 16 bytes for 64 bit platform as
51  * far as i concerned. We might over-align on 32bit here, but that should
52  * be all safe actually.
53  */
54  mem = (T *)MEM_mallocN_aligned(size, 16, "Cycles Alloc");
55 #else
56  mem = (T *)malloc(size);
57 #endif
58  if (mem == NULL) {
59  throw std::bad_alloc();
60  }
61  return mem;
62  }
63 
64  void deallocate(T *p, size_t n)
65  {
66  util_guarded_mem_free(n * sizeof(T));
67  if (p != NULL) {
68 #ifdef WITH_BLENDER_GUARDEDALLOC
69  MEM_freeN(p);
70 #else
71  free(p);
72 #endif
73  }
74  }
75 
76  T *address(T &x) const
77  {
78  return &x;
79  }
80 
81  const T *address(const T &x) const
82  {
83  return &x;
84  }
85 
87  {
88  return *this;
89  }
90 
91  size_t max_size() const
92  {
93  return size_t(-1);
94  }
95 
96  template<class U> struct rebind {
98  };
99 
100  template<class U> GuardedAllocator(const GuardedAllocator<U> &)
101  {
102  }
103 
104  template<class U> GuardedAllocator &operator=(const GuardedAllocator<U> &)
105  {
106  return *this;
107  }
108 
109  inline bool operator==(GuardedAllocator const & /*other*/) const
110  {
111  return true;
112  }
113  inline bool operator!=(GuardedAllocator const &other) const
114  {
115  return !operator==(other);
116  }
117 
118 #ifdef _MSC_VER
119  /* Welcome to the black magic here.
120  *
121  * The issue is that MSVC C++ allocates container proxy on any
122  * vector initialization, including static vectors which don't
123  * have any data yet. This leads to several issues:
124  *
125  * - Static objects initialization fiasco (global_stats from
126  * util_stats.h might not be initialized yet).
127  * - If main() function changes allocator type (for example,
128  * this might happen with `blender --debug-memory`) nobody
129  * will know how to convert already allocated memory to a new
130  * guarded allocator.
131  *
132  * Here we work this around by making it so container proxy does
133  * not use guarded allocation. A bit fragile, unfortunately.
134  */
135  template<> struct rebind<std::_Container_proxy> {
136  typedef std::allocator<std::_Container_proxy> other;
137  };
138 
139  operator std::allocator<std::_Container_proxy>() const
140  {
141  return std::allocator<std::_Container_proxy>();
142  }
143 #endif
144 };
145 
146 /* Get memory usage and peak from the guarded STL allocator. */
149 
150 /* Call given function and keep track if it runs out of memory.
151  *
152  * If it does run out f memory, stop execution and set progress
153  * to do a global cancel.
154  *
155  * It's not fully robust, but good enough to catch obvious issues
156  * when running out of memory.
157  */
158 #define MEM_GUARDED_CALL(progress, func, ...) \
159  do { \
160  try { \
161  (func)(__VA_ARGS__); \
162  } \
163  catch (std::bad_alloc &) { \
164  fprintf(stderr, "Error: run out of memory!\n"); \
165  fflush(stderr); \
166  (progress)->set_error("Out of memory"); \
167  } \
168  } while (false)
169 
171 
172 #endif /* __UTIL_GUARDED_ALLOCATOR_H__ */
void BLI_kdtree_nd_() free(KDTree *tree)
Definition: kdtree_impl.h:102
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
T * allocate(size_t n, const void *hint=0)
const T * address(const T &x) const
GuardedAllocator(const GuardedAllocator &)
bool operator==(GuardedAllocator const &) const
const T & const_reference
bool operator!=(GuardedAllocator const &other) const
GuardedAllocator & operator=(const GuardedAllocator< U > &)
GuardedAllocator(const GuardedAllocator< U > &)
GuardedAllocator< T > & operator=(const GuardedAllocator &)
void deallocate(T *p, size_t n)
T * address(T &x) const
size_t max_size() const
ptrdiff_t difference_type
#define CCL_NAMESPACE_END
Definition: cuda/compat.h:9
SyclQueue void void size_t num_bytes void
size_t util_guarded_get_mem_used()
size_t util_guarded_get_mem_peak()
void util_guarded_mem_free(size_t n)
CCL_NAMESPACE_BEGIN void util_guarded_mem_alloc(size_t n)
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_mallocN_aligned)(size_t len, size_t alignment, const char *str)
Definition: mallocn.c:35
#define T
GuardedAllocator< U > other