3 #include "testing/testing.h"
18 #define ITEMS_NUM 10000
24 int *
data = (
int *)userdata;
26 *((
int *)tls->userdata_chunk) += index;
31 void *__restrict join_v,
32 void *__restrict userdata_chunk)
34 int *join = (
int *)join_v;
35 int *chunk = (
int *)userdata_chunk;
76 int *
data = (
int *)item;
77 int *
count = (
int *)userdata;
79 EXPECT_TRUE(
data !=
nullptr);
109 if (
data[i] ==
nullptr) {
116 for (i = 0; i <
ITEMS_NUM - 5; i += 23) {
117 for (
int j = 0; j < 5; j++) {
118 if (
data[i + j] !=
nullptr) {
120 data[i + j] =
nullptr;
135 if (
data[i] !=
nullptr) {
155 int *
data = (
int *)item;
157 EXPECT_TRUE(
data !=
nullptr);
158 if (task_data->accumulate_items ==
nullptr) {
159 task_data->accumulate_items = MEM_cnew<ListBase>(__func__);
169 void *__restrict chunk_join,
170 void *__restrict chunk)
175 if (data_chunk->accumulate_items !=
nullptr) {
176 if (join_chunk->accumulate_items ==
nullptr) {
177 join_chunk->accumulate_items = MEM_cnew<ListBase>(__func__);
184 void *__restrict userdata_chunk)
211 tls_data.accumulate_items =
nullptr;
224 int number_accum = 0;
226 int *
data = (
int *)link->data;
227 number_accum += *
data;
246 int *
count = (
int *)userdata;
256 ITEMS_NUM,
sizeof(*items_buffer), __func__);
277 i++, item = item->
next) {
EXPECT_EQ(BLI_expr_pylike_eval(expr, nullptr, 0, &result), EXPR_PYLIKE_INVALID)
struct LinkData * BLI_genericNodeN(void *data)
void void void BLI_movelisttolist(struct ListBase *dst, struct ListBase *src) ATTR_NONNULL(1
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void BLI_mempool_free(BLI_mempool *pool, void *addr) ATTR_NONNULL(1
BLI_mempool * BLI_mempool_create(unsigned int esize, unsigned int elem_num, unsigned int pchunk, unsigned int flag) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL
void * BLI_mempool_alloc(BLI_mempool *pool) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(1)
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
struct MempoolIterData MempoolIterData
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
void BLI_task_parallel_mempool(struct BLI_mempool *mempool, void *userdata, TaskParallelMempoolFunc func, const TaskParallelSettings *settings)
BLI_INLINE void BLI_parallel_mempool_settings_defaults(TaskParallelSettings *settings)
void BLI_task_parallel_listbase(struct ListBase *listbase, void *userdata, TaskParallelIteratorFunc func, const TaskParallelSettings *settings)
struct TaskMemPool_Chunk { ListBase *accumulate_items TaskMemPool_Chunk
static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
static void task_range_iter_reduce_func(const void *__restrict UNUSED(userdata), void *__restrict join_v, void *__restrict userdata_chunk)
static void task_mempool_iter_tls_reduce(const void *__restrict UNUSED(userdata), void *__restrict chunk_join, void *__restrict chunk)
static void task_listbase_iter_func(void *userdata, void *item, int index, const TaskParallelTLS *__restrict UNUSED(tls))
static void task_mempool_iter_tls_func(void *UNUSED(userdata), MempoolIterData *item, const TaskParallelTLS *__restrict tls)
static void task_mempool_iter_func(void *userdata, MempoolIterData *item, const TaskParallelTLS *__restrict UNUSED(tls))
static void task_mempool_iter_tls_free(const void *UNUSED(userdata), void *__restrict userdata_chunk)
void BLI_threadapi_init(void)
void BLI_threadapi_exit(void)
#define POINTER_FROM_INT(i)
#define POINTER_AS_INT(i)
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
static T sum(const btAlignedObjectArray< T > &items)
ccl_gpu_kernel_postfix ccl_global int * counter
void(* MEM_freeN)(void *vmemh)
void *(* MEM_calloc_arrayN)(size_t len, size_t size, const char *str)
struct blender::compositor::@179::@181 task
void parallel_invoke(Functions &&...functions)
TaskParallelReduceFunc func_reduce
TaskParallelFreeFunc func_free
size_t userdata_chunk_size