Blender  V3.3
BLI_task_test.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: Apache-2.0 */
2 
3 #include "testing/testing.h"
4 #include <atomic>
5 #include <cstring>
6 
7 #include "atomic_ops.h"
8 
9 #include "MEM_guardedalloc.h"
10 
11 #include "BLI_utildefines.h"
12 
13 #include "BLI_listbase.h"
14 #include "BLI_mempool.h"
15 #include "BLI_task.h"
16 #include "BLI_task.hh"
17 
18 #define ITEMS_NUM 10000
19 
20 /* *** Parallel iterations over range of integer values. *** */
21 
22 static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
23 {
24  int *data = (int *)userdata;
25  data[index] = index;
26  *((int *)tls->userdata_chunk) += index;
27  // printf("%d, %d, %d\n", index, data[index], *((int *)tls->userdata_chunk));
28 }
29 
30 static void task_range_iter_reduce_func(const void *__restrict UNUSED(userdata),
31  void *__restrict join_v,
32  void *__restrict userdata_chunk)
33 {
34  int *join = (int *)join_v;
35  int *chunk = (int *)userdata_chunk;
36  *join += *chunk;
37  // printf("%d, %d\n", data[ITEMS_NUM], *((int *)userdata_chunk));
38 }
39 
40 TEST(task, RangeIter)
41 {
42  int data[ITEMS_NUM] = {0};
43  int sum = 0;
44 
46 
47  TaskParallelSettings settings;
49  settings.min_iter_per_thread = 1;
50 
51  settings.userdata_chunk = &sum;
52  settings.userdata_chunk_size = sizeof(sum);
54 
56 
57  /* Those checks should ensure us all items of the listbase were processed once, and only once
58  * as expected. */
59 
60  int expected_sum = 0;
61  for (int i = 0; i < ITEMS_NUM; i++) {
62  EXPECT_EQ(data[i], i);
63  expected_sum += i;
64  }
65  EXPECT_EQ(sum, expected_sum);
66 
68 }
69 
70 /* *** Parallel iterations over mempool items. *** */
71 
72 static void task_mempool_iter_func(void *userdata,
73  MempoolIterData *item,
74  const TaskParallelTLS *__restrict UNUSED(tls))
75 {
76  int *data = (int *)item;
77  int *count = (int *)userdata;
78 
79  EXPECT_TRUE(data != nullptr);
80 
81  *data += 1;
83 }
84 
85 TEST(task, MempoolIter)
86 {
87  int *data[ITEMS_NUM];
90  sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
91 
92  int i;
93 
94  /* 'Randomly' add and remove some items from mempool, to create a non-homogeneous one. */
95  int items_num = 0;
96  for (i = 0; i < ITEMS_NUM; i++) {
97  data[i] = (int *)BLI_mempool_alloc(mempool);
98  *data[i] = i - 1;
99  items_num++;
100  }
101 
102  for (i = 0; i < ITEMS_NUM; i += 3) {
103  BLI_mempool_free(mempool, data[i]);
104  data[i] = nullptr;
105  items_num--;
106  }
107 
108  for (i = 0; i < ITEMS_NUM; i += 7) {
109  if (data[i] == nullptr) {
110  data[i] = (int *)BLI_mempool_alloc(mempool);
111  *data[i] = i - 1;
112  items_num++;
113  }
114  }
115 
116  for (i = 0; i < ITEMS_NUM - 5; i += 23) {
117  for (int j = 0; j < 5; j++) {
118  if (data[i + j] != nullptr) {
119  BLI_mempool_free(mempool, data[i + j]);
120  data[i + j] = nullptr;
121  items_num--;
122  }
123  }
124  }
125 
126  TaskParallelSettings settings;
128 
129  BLI_task_parallel_mempool(mempool, &items_num, task_mempool_iter_func, &settings);
130 
131  /* Those checks should ensure us all items of the mempool were processed once, and only once - as
132  * expected. */
133  EXPECT_EQ(items_num, 0);
134  for (i = 0; i < ITEMS_NUM; i++) {
135  if (data[i] != nullptr) {
136  EXPECT_EQ(*data[i], i);
137  }
138  }
139 
140  BLI_mempool_destroy(mempool);
142 }
143 
144 /* *** Parallel iterations over mempool items with TLS. *** */
145 
147  ListBase *accumulate_items;
148 };
149 
150 static void task_mempool_iter_tls_func(void *UNUSED(userdata),
151  MempoolIterData *item,
152  const TaskParallelTLS *__restrict tls)
153 {
154  TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)tls->userdata_chunk;
155  int *data = (int *)item;
156 
157  EXPECT_TRUE(data != nullptr);
158  if (task_data->accumulate_items == nullptr) {
159  task_data->accumulate_items = MEM_cnew<ListBase>(__func__);
160  }
161 
162  /* Flip to prove this has been touched. */
163  *data = -*data;
164 
165  BLI_addtail(task_data->accumulate_items, BLI_genericNodeN(data));
166 }
167 
168 static void task_mempool_iter_tls_reduce(const void *__restrict UNUSED(userdata),
169  void *__restrict chunk_join,
170  void *__restrict chunk)
171 {
172  TaskMemPool_Chunk *join_chunk = (TaskMemPool_Chunk *)chunk_join;
173  TaskMemPool_Chunk *data_chunk = (TaskMemPool_Chunk *)chunk;
174 
175  if (data_chunk->accumulate_items != nullptr) {
176  if (join_chunk->accumulate_items == nullptr) {
177  join_chunk->accumulate_items = MEM_cnew<ListBase>(__func__);
178  }
179  BLI_movelisttolist(join_chunk->accumulate_items, data_chunk->accumulate_items);
180  }
181 }
182 
183 static void task_mempool_iter_tls_free(const void *UNUSED(userdata),
184  void *__restrict userdata_chunk)
185 {
186  TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)userdata_chunk;
187  MEM_freeN(task_data->accumulate_items);
188 }
189 
190 TEST(task, MempoolIterTLS)
191 {
192  int *data[ITEMS_NUM];
194  BLI_mempool *mempool = BLI_mempool_create(
195  sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
196 
197  int i;
198 
199  /* Add numbers negative `1..ITEMS_NUM` inclusive. */
200  int items_num = 0;
201  for (i = 0; i < ITEMS_NUM; i++) {
202  data[i] = (int *)BLI_mempool_alloc(mempool);
203  *data[i] = -(i + 1);
204  items_num++;
205  }
206 
207  TaskParallelSettings settings;
209 
210  TaskMemPool_Chunk tls_data;
211  tls_data.accumulate_items = nullptr;
212 
213  settings.userdata_chunk = &tls_data;
214  settings.userdata_chunk_size = sizeof(tls_data);
215 
218 
219  BLI_task_parallel_mempool(mempool, nullptr, task_mempool_iter_tls_func, &settings);
220 
221  EXPECT_EQ(BLI_listbase_count(tls_data.accumulate_items), ITEMS_NUM);
222 
223  /* Check that all elements are added into the list once. */
224  int number_accum = 0;
225  for (LinkData *link = (LinkData *)tls_data.accumulate_items->first; link; link = link->next) {
226  int *data = (int *)link->data;
227  number_accum += *data;
228  }
229  EXPECT_EQ(number_accum, (ITEMS_NUM * (ITEMS_NUM + 1)) / 2);
230 
231  BLI_freelistN(tls_data.accumulate_items);
232  MEM_freeN(tls_data.accumulate_items);
233 
234  BLI_mempool_destroy(mempool);
236 }
237 
238 /* *** Parallel iterations over double-linked list items. *** */
239 
240 static void task_listbase_iter_func(void *userdata,
241  void *item,
242  int index,
243  const TaskParallelTLS *__restrict UNUSED(tls))
244 {
245  LinkData *data = (LinkData *)item;
246  int *count = (int *)userdata;
247 
248  data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
250 }
251 
252 TEST(task, ListBaseIter)
253 {
254  ListBase list = {nullptr, nullptr};
255  LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(
256  ITEMS_NUM, sizeof(*items_buffer), __func__);
258 
259  int i;
260 
261  int items_num = 0;
262  for (i = 0; i < ITEMS_NUM; i++) {
263  BLI_addtail(&list, &items_buffer[i]);
264  items_num++;
265  }
266 
267  TaskParallelSettings settings;
269 
270  BLI_task_parallel_listbase(&list, &items_num, task_listbase_iter_func, &settings);
271 
272  /* Those checks should ensure us all items of the listbase were processed once, and only once -
273  * as expected. */
274  EXPECT_EQ(items_num, 0);
275  LinkData *item;
276  for (i = 0, item = (LinkData *)list.first; i < ITEMS_NUM && item != nullptr;
277  i++, item = item->next) {
278  EXPECT_EQ(POINTER_AS_INT(item->data), i);
279  }
280  EXPECT_EQ(ITEMS_NUM, i);
281 
282  MEM_freeN(items_buffer);
284 }
285 
286 TEST(task, ParallelInvoke)
287 {
288  std::atomic<int> counter = 0;
290  [&]() { counter++; },
291  [&]() { counter++; },
292  [&]() { counter++; },
293  [&]() { counter++; },
294  [&]() { counter++; });
295  EXPECT_EQ(counter, 6);
296 }
EXPECT_EQ(BLI_expr_pylike_eval(expr, nullptr, 0, &result), EXPR_PYLIKE_INVALID)
struct LinkData * BLI_genericNodeN(void *data)
Definition: listbase.c:842
void void void BLI_movelisttolist(struct ListBase *dst, struct ListBase *src) ATTR_NONNULL(1
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:466
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:80
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
@ BLI_MEMPOOL_ALLOW_ITER
Definition: BLI_mempool.h:107
void BLI_mempool_free(BLI_mempool *pool, void *addr) ATTR_NONNULL(1
BLI_mempool * BLI_mempool_create(unsigned int esize, unsigned int elem_num, unsigned int pchunk, unsigned int flag) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL
Definition: BLI_mempool.c:253
void * BLI_mempool_alloc(BLI_mempool *pool) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(1)
Definition: BLI_mempool.c:319
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
Definition: BLI_mempool.c:707
struct MempoolIterData MempoolIterData
Definition: BLI_task.h:272
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition: task_range.cc:94
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition: BLI_task.h:293
void BLI_task_parallel_mempool(struct BLI_mempool *mempool, void *userdata, TaskParallelMempoolFunc func, const TaskParallelSettings *settings)
BLI_INLINE void BLI_parallel_mempool_settings_defaults(TaskParallelSettings *settings)
Definition: BLI_task.h:301
void BLI_task_parallel_listbase(struct ListBase *listbase, void *userdata, TaskParallelIteratorFunc func, const TaskParallelSettings *settings)
struct TaskMemPool_Chunk { ListBase *accumulate_items TaskMemPool_Chunk
static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
static void task_range_iter_reduce_func(const void *__restrict UNUSED(userdata), void *__restrict join_v, void *__restrict userdata_chunk)
static void task_mempool_iter_tls_reduce(const void *__restrict UNUSED(userdata), void *__restrict chunk_join, void *__restrict chunk)
static void task_listbase_iter_func(void *userdata, void *item, int index, const TaskParallelTLS *__restrict UNUSED(tls))
static void task_mempool_iter_tls_func(void *UNUSED(userdata), MempoolIterData *item, const TaskParallelTLS *__restrict tls)
static void task_mempool_iter_func(void *userdata, MempoolIterData *item, const TaskParallelTLS *__restrict UNUSED(tls))
static void task_mempool_iter_tls_free(const void *UNUSED(userdata), void *__restrict userdata_chunk)
#define ITEMS_NUM
TEST(task, RangeIter)
void BLI_threadapi_init(void)
Definition: threads.cc:125
void BLI_threadapi_exit(void)
Definition: threads.cc:130
#define POINTER_FROM_INT(i)
#define UNUSED(x)
#define POINTER_AS_INT(i)
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
static T sum(const btAlignedObjectArray< T > &items)
int count
ccl_gpu_kernel_postfix ccl_global int * counter
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_calloc_arrayN)(size_t len, size_t size, const char *str)
Definition: mallocn.c:32
struct blender::compositor::@179::@181 task
void parallel_invoke(Functions &&...functions)
Definition: BLI_task.hh:99
unsigned int uint32_t
Definition: stdint.h:80
void * data
Definition: DNA_listBase.h:26
struct LinkData * next
Definition: DNA_listBase.h:25
void * first
Definition: DNA_listBase.h:31
TaskParallelReduceFunc func_reduce
Definition: BLI_task.h:181
TaskParallelFreeFunc func_free
Definition: BLI_task.h:183
size_t userdata_chunk_size
Definition: BLI_task.h:169