29 #define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
30 #define MALLOCA_FREE(_mem, _size) \
31 if (((_mem) != NULL) && ((_size) > 8192)) { \
72 const int tasks_num_factor =
max_ii(1, tasks_num >> 3);
102 void *userdata_chunk)
108 void **current_chunk_items;
109 int *current_chunk_indices;
110 int current_chunk_size;
112 const size_t items_size =
sizeof(*current_chunk_items) * (size_t)
state->iter_shared.chunk_size;
113 const size_t indices_size =
sizeof(*current_chunk_indices) *
114 (size_t)
state->iter_shared.chunk_size;
116 current_chunk_items =
MALLOCA(items_size);
117 current_chunk_indices =
MALLOCA(indices_size);
118 current_chunk_size = 0;
120 for (
bool do_abort =
false; !do_abort;) {
121 if (
state->iter_shared.spin_lock !=
NULL) {
126 int index =
state->iter_shared.next_index;
127 void *item =
state->iter_shared.next_item;
131 for (i = 0; i <
state->iter_shared.chunk_size && !
state->iter_shared.is_finished; i++) {
132 current_chunk_indices[i] = index;
133 current_chunk_items[i] = item;
134 state->iter_func(
state->userdata, &tls, &item, &index, &
state->iter_shared.is_finished);
138 state->iter_shared.next_index = index;
139 state->iter_shared.next_item = item;
140 current_chunk_size = i;
142 do_abort =
state->iter_shared.is_finished;
144 if (
state->iter_shared.spin_lock !=
NULL) {
148 for (i = 0; i < current_chunk_size; ++i) {
149 state->func(
state->userdata, current_chunk_items[i], current_chunk_indices[i], &tls);
169 if (userdata_chunk) {
180 if (userdata_chunk) {
194 settings,
state->items_num, threads_num, &
state->iter_shared.chunk_size);
202 const int items_num =
state->items_num;
203 const size_t tasks_num = items_num >= 0 ?
208 if (tasks_num == 1) {
215 state->iter_shared.spin_lock = &spin_lock;
219 void *userdata_chunk_local =
NULL;
220 void *userdata_chunk_array =
NULL;
221 const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk !=
NULL);
225 if (use_userdata_chunk) {
226 userdata_chunk_array =
MALLOCA(userdata_chunk_size * tasks_num);
229 for (
size_t i = 0; i < tasks_num; i++) {
230 if (use_userdata_chunk) {
231 userdata_chunk_local = (
char *)userdata_chunk_array + (userdata_chunk_size * i);
232 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
244 if (use_userdata_chunk) {
246 for (
size_t i = 0; i < tasks_num; i++) {
247 userdata_chunk_local = (
char *)userdata_chunk_array + (userdata_chunk_size * i);
249 settings->
func_reduce(
state->userdata, userdata_chunk, userdata_chunk_local);
256 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
266 const int init_index,
273 state.items_num = items_num;
274 state.iter_shared.next_index = init_index;
275 state.iter_shared.next_item = init_item;
276 state.iter_shared.is_finished =
false;
277 state.userdata = userdata;
278 state.iter_func = iter_func;
297 Link *link = *r_next_item;
302 *r_next_item = link->
next;
318 state.iter_shared.next_index = 0;
319 state.iter_shared.next_item = listbase->
first;
320 state.iter_shared.is_finished =
false;
321 state.userdata = userdata;
362 void *userdata_chunk_array =
NULL;
363 const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk !=
NULL);
367 if (use_userdata_chunk) {
369 settings->
func_init(userdata, userdata_chunk);
379 func(userdata, item, &tls);
382 if (use_userdata_chunk) {
385 settings->
func_free(userdata, userdata_chunk);
400 const int tasks_num = threads_num + 2;
402 state.userdata = userdata;
405 if (use_userdata_chunk) {
406 userdata_chunk_array =
MALLOCA(userdata_chunk_size * tasks_num);
410 mempool, (
size_t)tasks_num);
412 for (
int i = 0; i < tasks_num; i++) {
413 void *userdata_chunk_local =
NULL;
414 if (use_userdata_chunk) {
415 userdata_chunk_local = (
char *)userdata_chunk_array + (userdata_chunk_size * i);
416 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
418 settings->
func_init(userdata, userdata_chunk_local);
430 if (use_userdata_chunk) {
432 for (
int i = 0; i < tasks_num; i++) {
435 userdata, userdata_chunk, mempool_iterator_data[i].tls.userdata_chunk);
438 settings->
func_free(userdata, mempool_iterator_data[i].tls.userdata_chunk);
442 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
BLI_INLINE bool BLI_listbase_is_empty(const struct ListBase *lb)
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE int min_ii(int a, int b)
MINLINE int max_ii(int a, int b)
void mempool_iter_threadsafe_destroy(ParallelMempoolTaskData *iter_arr)
void * mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
ParallelMempoolTaskData * mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t iter_num)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter) ATTR_NONNULL()
void * BLI_mempool_iterstep(BLI_mempool_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
int BLI_mempool_len(const BLI_mempool *pool) ATTR_NONNULL(1)
int BLI_task_scheduler_num_threads(void)
struct MempoolIterData MempoolIterData
void * BLI_task_pool_user_data(TaskPool *pool)
void BLI_task_pool_work_and_wait(TaskPool *pool)
void(* TaskParallelIteratorIterFunc)(void *__restrict userdata, const TaskParallelTLS *__restrict tls, void **r_next_item, int *r_next_index, bool *r_do_abort)
TaskPool * BLI_task_pool_create(void *userdata, eTaskPriority priority)
void(* TaskParallelIteratorFunc)(void *__restrict userdata, void *item, int index, const TaskParallelTLS *__restrict tls)
void(* TaskParallelMempoolFunc)(void *userdata, MempoolIterData *iter, const TaskParallelTLS *__restrict tls)
void BLI_task_pool_free(TaskPool *pool)
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
pthread_spinlock_t SpinLock
void BLI_spin_init(SpinLock *spin)
void BLI_spin_unlock(SpinLock *spin)
void BLI_spin_lock(SpinLock *spin)
void BLI_spin_end(SpinLock *spin)
These structs are the foundation for all linked lists in the library system.
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
static const int chunk_size
TaskParallelMempoolFunc func
TaskParallelIteratorFunc func
TaskParallelIteratorStateShared iter_shared
TaskParallelIteratorIterFunc iter_func
TaskParallelReduceFunc func_reduce
TaskParallelFreeFunc func_free
TaskParallelInitFunc func_init
size_t userdata_chunk_size
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings, const int items_num, int tasks_num, int *r_chunk_size)
static void task_parallel_listbase_get(void *__restrict UNUSED(userdata), const TaskParallelTLS *__restrict UNUSED(tls), void **r_next_item, int *r_next_index, bool *r_do_abort)
void BLI_task_parallel_iterator(void *userdata, TaskParallelIteratorIterFunc iter_func, void *init_item, const int init_index, const int items_num, TaskParallelIteratorFunc func, const TaskParallelSettings *settings)
#define MALLOCA_FREE(_mem, _size)
static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk)
static void task_parallel_iterator_do(const TaskParallelSettings *settings, TaskParallelIteratorState *state)
static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
void BLI_task_parallel_listbase(ListBase *listbase, void *userdata, TaskParallelIteratorFunc func, const TaskParallelSettings *settings)
struct ParallelMempoolState ParallelMempoolState
static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings, TaskParallelIteratorState *state)
void BLI_task_parallel_mempool(BLI_mempool *mempool, void *userdata, TaskParallelMempoolFunc func, const TaskParallelSettings *settings)
struct TaskParallelIteratorState TaskParallelIteratorState
static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state, void *userdata_chunk)