Blender  V3.3
BLI_mempool.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2008 Blender Foundation. All rights reserved. */
3 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include "atomic_ops.h"
20 
21 #include "BLI_utildefines.h"
22 
23 #include "BLI_mempool.h" /* own include */
24 #include "BLI_mempool_private.h" /* own include */
25 
26 #include "MEM_guardedalloc.h"
27 
28 #include "BLI_strict_flags.h" /* keep last */
29 
30 #ifdef WITH_MEM_VALGRIND
31 # include "valgrind/memcheck.h"
32 #endif
33 
34 /* NOTE: copied from BLO_blend_defs.h, don't use here because we're in BLI. */
35 #ifdef __BIG_ENDIAN__
36 /* Big Endian */
37 # define MAKE_ID(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
38 # define MAKE_ID_8(a, b, c, d, e, f, g, h) \
39  ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
40  (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h))
41 #else
42 /* Little Endian */
43 # define MAKE_ID(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
44 # define MAKE_ID_8(a, b, c, d, e, f, g, h) \
45  ((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
46  (int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a))
47 #endif
48 
55 #define FREEWORD \
56  ((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
57  MAKE_ID('e', 'f', 'f', 'e'))
58 
62 #define USEDWORD MAKE_ID('u', 's', 'e', 'd')
63 
64 /* Currently totalloc isn't used. */
65 // #define USE_TOTALLOC
66 
67 /* optimize pool size */
68 #define USE_CHUNK_POW2
69 
70 #ifndef NDEBUG
71 static bool mempool_debug_memset = false;
72 #endif
73 
80 typedef struct BLI_freenode {
81  struct BLI_freenode *next;
85 
90 typedef struct BLI_mempool_chunk {
93 
97 struct BLI_mempool {
103 
111  /* keeps aligned to 16 bits */
112 
119 #ifdef USE_TOTALLOC
121  uint totalloc;
122 #endif
123 };
124 
125 #define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
126 
127 #define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
128 
129 #define NODE_STEP_NEXT(node) ((void *)((char *)(node) + esize))
130 #define NODE_STEP_PREV(node) ((void *)((char *)(node)-esize))
131 
133 #define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
134 
135 #ifdef USE_CHUNK_POW2
137 {
138  x -= 1;
139  x = x | (x >> 1);
140  x = x | (x >> 2);
141  x = x | (x >> 4);
142  x = x | (x >> 8);
143  x = x | (x >> 16);
144  return x + 1;
145 }
146 #endif
147 
149 {
150  while (index-- && head) {
151  head = head->next;
152  }
153  return head;
154 }
155 
162 BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
163 {
164  return (elem_num <= pchunk) ? 1 : ((elem_num / pchunk) + 1);
165 }
166 
168 {
169  return MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "BLI_Mempool Chunk");
170 }
171 
182  BLI_mempool_chunk *mpchunk,
183  BLI_freenode *last_tail)
184 {
185  const uint esize = pool->esize;
186  BLI_freenode *curnode = CHUNK_DATA(mpchunk);
187  uint j;
188 
189  /* append */
190  if (pool->chunk_tail) {
191  pool->chunk_tail->next = mpchunk;
192  }
193  else {
194  BLI_assert(pool->chunks == NULL);
195  pool->chunks = mpchunk;
196  }
197 
198  mpchunk->next = NULL;
199  pool->chunk_tail = mpchunk;
200 
201  if (UNLIKELY(pool->free == NULL)) {
202  pool->free = curnode;
203  }
204 
205  /* loop through the allocated data, building the pointer structures */
206  j = pool->pchunk;
207  if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
208  while (j--) {
209  curnode->next = NODE_STEP_NEXT(curnode);
210  curnode->freeword = FREEWORD;
211  curnode = curnode->next;
212  }
213  }
214  else {
215  while (j--) {
216  curnode->next = NODE_STEP_NEXT(curnode);
217  curnode = curnode->next;
218  }
219  }
220 
221  /* terminate the list (rewind one)
222  * will be overwritten if 'curnode' gets passed in again as 'last_tail' */
223  curnode = NODE_STEP_PREV(curnode);
224  curnode->next = NULL;
225 
226 #ifdef USE_TOTALLOC
227  pool->totalloc += pool->pchunk;
228 #endif
229 
230  /* final pointer in the previously allocated chunk is wrong */
231  if (last_tail) {
232  last_tail->next = CHUNK_DATA(mpchunk);
233  }
234 
235  return curnode;
236 }
237 
239 {
240  MEM_freeN(mpchunk);
241 }
242 
244 {
245  BLI_mempool_chunk *mpchunk_next;
246 
247  for (; mpchunk; mpchunk = mpchunk_next) {
248  mpchunk_next = mpchunk->next;
249  mempool_chunk_free(mpchunk);
250  }
251 }
252 
253 BLI_mempool *BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint flag)
254 {
255  BLI_mempool *pool;
256  BLI_freenode *last_tail = NULL;
257  uint i, maxchunks;
258 
259  /* allocate the pool structure */
260  pool = MEM_mallocN(sizeof(BLI_mempool), "memory pool");
261 
262  /* set the elem size */
263  if (esize < (int)MEMPOOL_ELEM_SIZE_MIN) {
264  esize = (int)MEMPOOL_ELEM_SIZE_MIN;
265  }
266 
267  if (flag & BLI_MEMPOOL_ALLOW_ITER) {
268  esize = MAX2(esize, (uint)sizeof(BLI_freenode));
269  }
270 
271  maxchunks = mempool_maxchunks(elem_num, pchunk);
272 
273  pool->chunks = NULL;
274  pool->chunk_tail = NULL;
275  pool->esize = esize;
276 
277  /* Optimize chunk size to powers of 2, accounting for slop-space. */
278 #ifdef USE_CHUNK_POW2
279  {
280  BLI_assert(power_of_2_max_u(pchunk * esize) > CHUNK_OVERHEAD);
281  pchunk = (power_of_2_max_u(pchunk * esize) - CHUNK_OVERHEAD) / esize;
282  }
283 #endif
284 
285  pool->csize = esize * pchunk;
286 
287  /* Ensure this is a power of 2, minus the rounding by element size. */
288 #if defined(USE_CHUNK_POW2) && !defined(NDEBUG)
289  {
290  uint final_size = (uint)MEM_SIZE_OVERHEAD + (uint)sizeof(BLI_mempool_chunk) + pool->csize;
291  BLI_assert(((uint)power_of_2_max_u(final_size) - final_size) < pool->esize);
292  }
293 #endif
294 
295  pool->pchunk = pchunk;
296  pool->flag = flag;
297  pool->free = NULL; /* mempool_chunk_add assigns */
298  pool->maxchunks = maxchunks;
299 #ifdef USE_TOTALLOC
300  pool->totalloc = 0;
301 #endif
302  pool->totused = 0;
303 
304  if (elem_num) {
305  /* Allocate the actual chunks. */
306  for (i = 0; i < maxchunks; i++) {
308  last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
309  }
310  }
311 
312 #ifdef WITH_MEM_VALGRIND
313  VALGRIND_CREATE_MEMPOOL(pool, 0, false);
314 #endif
315 
316  return pool;
317 }
318 
320 {
321  BLI_freenode *free_pop;
322 
323  if (UNLIKELY(pool->free == NULL)) {
324  /* Need to allocate a new chunk. */
326  mempool_chunk_add(pool, mpchunk, NULL);
327  }
328 
329  free_pop = pool->free;
330 
331  BLI_assert(pool->chunk_tail->next == NULL);
332 
333  if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
334  free_pop->freeword = USEDWORD;
335  }
336 
337  pool->free = free_pop->next;
338  pool->totused++;
339 
340 #ifdef WITH_MEM_VALGRIND
341  VALGRIND_MEMPOOL_ALLOC(pool, free_pop, pool->esize);
342 #endif
343 
344  return (void *)free_pop;
345 }
346 
348 {
349  void *retval = BLI_mempool_alloc(pool);
350  memset(retval, 0, (size_t)pool->esize);
351  return retval;
352 }
353 
355 {
356  BLI_freenode *newhead = addr;
357 
358 #ifndef NDEBUG
359  {
360  BLI_mempool_chunk *chunk;
361  bool found = false;
362  for (chunk = pool->chunks; chunk; chunk = chunk->next) {
363  if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
364  found = true;
365  break;
366  }
367  }
368  if (!found) {
369  BLI_assert_msg(0, "Attempt to free data which is not in pool.\n");
370  }
371  }
372 
373  /* Enable for debugging. */
375  memset(addr, 255, pool->esize);
376  }
377 #endif
378 
379  if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
380 #ifndef NDEBUG
381  /* This will detect double free's. */
382  BLI_assert(newhead->freeword != FREEWORD);
383 #endif
384  newhead->freeword = FREEWORD;
385  }
386 
387  newhead->next = pool->free;
388  pool->free = newhead;
389 
390  pool->totused--;
391 
392 #ifdef WITH_MEM_VALGRIND
393  VALGRIND_MEMPOOL_FREE(pool, addr);
394 #endif
395 
396  /* Nothing is in use; free all the chunks except the first. */
397  if (UNLIKELY(pool->totused == 0) && (pool->chunks->next)) {
398  const uint esize = pool->esize;
399  BLI_freenode *curnode;
400  uint j;
401  BLI_mempool_chunk *first;
402 
403  first = pool->chunks;
405  first->next = NULL;
406  pool->chunk_tail = first;
407 
408 #ifdef USE_TOTALLOC
409  pool->totalloc = pool->pchunk;
410 #endif
411 
412  /* Temp alloc so valgrind doesn't complain when setting free'd blocks 'next'. */
413 #ifdef WITH_MEM_VALGRIND
414  VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
415 #endif
416 
417  curnode = CHUNK_DATA(first);
418  pool->free = curnode;
419 
420  j = pool->pchunk;
421  while (j--) {
422  curnode->next = NODE_STEP_NEXT(curnode);
423  curnode = curnode->next;
424  }
425  curnode = NODE_STEP_PREV(curnode);
426  curnode->next = NULL; /* terminate the list */
427 
428 #ifdef WITH_MEM_VALGRIND
429  VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
430 #endif
431  }
432 }
433 
435 {
436  return (int)pool->totused;
437 }
438 
440 {
442 
443  if (index < pool->totused) {
444  /* We could have some faster mem chunk stepping code inline. */
445  BLI_mempool_iter iter;
446  void *elem;
447  BLI_mempool_iternew(pool, &iter);
448  for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
449  /* pass */
450  }
451  return elem;
452  }
453 
454  return NULL;
455 }
456 
458 {
459  BLI_mempool_iter iter;
460  void *elem;
461  void **p = data;
463  BLI_mempool_iternew(pool, &iter);
464  while ((elem = BLI_mempool_iterstep(&iter))) {
465  *p++ = elem;
466  }
467  BLI_assert((uint)(p - data) == pool->totused);
468 }
469 
470 void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
471 {
472  void **data = MEM_mallocN((size_t)pool->totused * sizeof(void *), allocstr);
474  return data;
475 }
476 
478 {
479  const uint esize = pool->esize;
480  BLI_mempool_iter iter;
481  char *elem, *p = data;
483  BLI_mempool_iternew(pool, &iter);
484  while ((elem = BLI_mempool_iterstep(&iter))) {
485  memcpy(p, elem, (size_t)esize);
486  p = NODE_STEP_NEXT(p);
487  }
488  BLI_assert((uint)(p - (char *)data) == pool->totused * esize);
489 }
490 
491 void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
492 {
493  char *data = MEM_malloc_arrayN(pool->totused, pool->esize, allocstr);
495  return data;
496 }
497 
499 {
501 
502  iter->pool = pool;
503  iter->curchunk = pool->chunks;
504  iter->curindex = 0;
505 }
506 
508 {
509  BLI_mempool_iternew(pool, &ts_iter->iter);
510  ts_iter->curchunk_threaded_shared = NULL;
511 }
512 
514 {
516 
517  ParallelMempoolTaskData *iter_arr = MEM_mallocN(sizeof(*iter_arr) * iter_num, __func__);
518  BLI_mempool_chunk **curchunk_threaded_shared = MEM_mallocN(sizeof(void *), __func__);
519 
521 
522  *curchunk_threaded_shared = iter_arr->ts_iter.iter.curchunk;
523  iter_arr->ts_iter.curchunk_threaded_shared = curchunk_threaded_shared;
524  for (size_t i = 1; i < iter_num; i++) {
525  iter_arr[i].ts_iter = iter_arr[0].ts_iter;
526  *curchunk_threaded_shared = iter_arr[i].ts_iter.iter.curchunk =
527  ((*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next : NULL);
528  }
529 
530  return iter_arr;
531 }
532 
534 {
536 
538  MEM_freeN(iter_arr);
539 }
540 
541 #if 0
542 /* unoptimized, more readable */
543 
544 static void *bli_mempool_iternext(BLI_mempool_iter *iter)
545 {
546  void *ret = NULL;
547 
548  if (iter->curchunk == NULL || !iter->pool->totused) {
549  return ret;
550  }
551 
552  ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
553 
554  iter->curindex++;
555 
556  if (iter->curindex == iter->pool->pchunk) {
557  iter->curindex = 0;
558  iter->curchunk = iter->curchunk->next;
559  }
560 
561  return ret;
562 }
563 
565 {
566  BLI_freenode *ret;
567 
568  do {
569  ret = bli_mempool_iternext(iter);
570  } while (ret && ret->freeword == FREEWORD);
571 
572  return ret;
573 }
574 
575 #else /* Optimized version of code above. */
576 
578 {
579  if (UNLIKELY(iter->curchunk == NULL)) {
580  return NULL;
581  }
582 
583  const uint esize = iter->pool->esize;
584  BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
585  BLI_freenode *ret;
586  do {
587  ret = curnode;
588 
589  if (++iter->curindex != iter->pool->pchunk) {
590  curnode = POINTER_OFFSET(curnode, esize);
591  }
592  else {
593  iter->curindex = 0;
594  iter->curchunk = iter->curchunk->next;
595  if (UNLIKELY(iter->curchunk == NULL)) {
596  return (ret->freeword == FREEWORD) ? NULL : ret;
597  }
598  curnode = CHUNK_DATA(iter->curchunk);
599  }
600  } while (ret->freeword == FREEWORD);
601 
602  return ret;
603 }
604 
606 {
607  BLI_mempool_iter *iter = &ts_iter->iter;
608  if (UNLIKELY(iter->curchunk == NULL)) {
609  return NULL;
610  }
611 
612  const uint esize = iter->pool->esize;
613  BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
614  BLI_freenode *ret;
615  do {
616  ret = curnode;
617 
618  if (++iter->curindex != iter->pool->pchunk) {
619  curnode = POINTER_OFFSET(curnode, esize);
620  }
621  else {
622  iter->curindex = 0;
623 
624  /* Begin unique to the `threadsafe` version of this function. */
625  for (iter->curchunk = *ts_iter->curchunk_threaded_shared;
626  (iter->curchunk != NULL) && (atomic_cas_ptr((void **)ts_iter->curchunk_threaded_shared,
627  iter->curchunk,
628  iter->curchunk->next) != iter->curchunk);
629  iter->curchunk = *ts_iter->curchunk_threaded_shared) {
630  /* pass. */
631  }
632  if (UNLIKELY(iter->curchunk == NULL)) {
633  return (ret->freeword == FREEWORD) ? NULL : ret;
634  }
635  /* End `threadsafe` exception. */
636 
637  iter->curchunk = iter->curchunk->next;
638  if (UNLIKELY(iter->curchunk == NULL)) {
639  return (ret->freeword == FREEWORD) ? NULL : ret;
640  }
641  curnode = CHUNK_DATA(iter->curchunk);
642  }
643  } while (ret->freeword == FREEWORD);
644 
645  return ret;
646 }
647 
648 #endif
649 
650 void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
651 {
652  BLI_mempool_chunk *mpchunk;
653  BLI_mempool_chunk *mpchunk_next;
654  uint maxchunks;
655 
656  BLI_mempool_chunk *chunks_temp;
657  BLI_freenode *last_tail = NULL;
658 
659 #ifdef WITH_MEM_VALGRIND
661  VALGRIND_CREATE_MEMPOOL(pool, 0, false);
662 #endif
663 
664  if (totelem_reserve == -1) {
665  maxchunks = pool->maxchunks;
666  }
667  else {
668  maxchunks = mempool_maxchunks((uint)totelem_reserve, pool->pchunk);
669  }
670 
671  /* Free all after 'pool->maxchunks'. */
672  mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
673  if (mpchunk && mpchunk->next) {
674  /* terminate */
675  mpchunk_next = mpchunk->next;
676  mpchunk->next = NULL;
677  mpchunk = mpchunk_next;
678 
679  do {
680  mpchunk_next = mpchunk->next;
681  mempool_chunk_free(mpchunk);
682  } while ((mpchunk = mpchunk_next));
683  }
684 
685  /* re-initialize */
686  pool->free = NULL;
687  pool->totused = 0;
688 #ifdef USE_TOTALLOC
689  pool->totalloc = 0;
690 #endif
691 
692  chunks_temp = pool->chunks;
693  pool->chunks = NULL;
694  pool->chunk_tail = NULL;
695 
696  while ((mpchunk = chunks_temp)) {
697  chunks_temp = mpchunk->next;
698  last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
699  }
700 }
701 
703 {
705 }
706 
708 {
709  mempool_chunk_free_all(pool->chunks);
710 
711 #ifdef WITH_MEM_VALGRIND
713 #endif
714 
715  MEM_freeN(pool);
716 }
717 
718 #ifndef NDEBUG
720 {
721  mempool_debug_memset = true;
722 }
723 #endif
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition: BLI_assert.h:53
#define BLI_INLINE
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)
Definition: BLI_memarena.c:31
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)
Definition: BLI_memarena.c:29
#define VALGRIND_DESTROY_MEMPOOL(pool)
Definition: BLI_memarena.c:30
BLI_mempool * BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint flag)
Definition: BLI_mempool.c:253
#define MEMPOOL_ELEM_SIZE_MIN
Definition: BLI_mempool.c:125
#define NODE_STEP_PREV(node)
Definition: BLI_mempool.c:130
static bool mempool_debug_memset
Definition: BLI_mempool.c:71
#define USEDWORD
Definition: BLI_mempool.c:62
#define CHUNK_DATA(chunk)
Definition: BLI_mempool.c:127
static BLI_freenode * mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpchunk, BLI_freenode *last_tail)
Definition: BLI_mempool.c:181
BLI_INLINE BLI_mempool_chunk * mempool_chunk_find(BLI_mempool_chunk *head, uint index)
Definition: BLI_mempool.c:148
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
Definition: BLI_mempool.c:477
void mempool_iter_threadsafe_destroy(ParallelMempoolTaskData *iter_arr)
Definition: BLI_mempool.c:533
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
Definition: BLI_mempool.c:243
void * BLI_mempool_alloc(BLI_mempool *pool)
Definition: BLI_mempool.c:319
int BLI_mempool_len(const BLI_mempool *pool)
Definition: BLI_mempool.c:434
void BLI_mempool_free(BLI_mempool *pool, void *addr)
Definition: BLI_mempool.c:354
void * mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
Definition: BLI_mempool.c:605
void BLI_mempool_clear(BLI_mempool *pool)
Definition: BLI_mempool.c:702
static void mempool_threadsafe_iternew(BLI_mempool *pool, BLI_mempool_threadsafe_iter *ts_iter)
Definition: BLI_mempool.c:507
void BLI_mempool_set_memory_debug(void)
Definition: BLI_mempool.c:719
void * BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
Definition: BLI_mempool.c:491
void * BLI_mempool_calloc(BLI_mempool *pool)
Definition: BLI_mempool.c:347
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
Definition: BLI_mempool.c:498
#define FREEWORD
Definition: BLI_mempool.c:55
struct BLI_freenode BLI_freenode
void BLI_mempool_destroy(BLI_mempool *pool)
Definition: BLI_mempool.c:707
struct BLI_mempool_chunk BLI_mempool_chunk
void BLI_mempool_as_table(BLI_mempool *pool, void **data)
Definition: BLI_mempool.c:457
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
Definition: BLI_mempool.c:650
void * BLI_mempool_iterstep(BLI_mempool_iter *iter)
Definition: BLI_mempool.c:577
#define CHUNK_OVERHEAD
Definition: BLI_mempool.c:133
#define NODE_STEP_NEXT(node)
Definition: BLI_mempool.c:129
void ** BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
Definition: BLI_mempool.c:470
void * BLI_mempool_findelem(BLI_mempool *pool, uint index)
Definition: BLI_mempool.c:439
BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
Definition: BLI_mempool.c:162
static uint power_of_2_max_u(uint x)
Definition: BLI_mempool.c:136
ParallelMempoolTaskData * mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t iter_num)
Definition: BLI_mempool.c:513
static BLI_mempool_chunk * mempool_chunk_alloc(BLI_mempool *pool)
Definition: BLI_mempool.c:167
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
Definition: BLI_mempool.c:238
@ BLI_MEMPOOL_ALLOW_ITER
Definition: BLI_mempool.h:107
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
unsigned int uint
Definition: BLI_sys_types.h:67
#define ARRAY_HAS_ITEM(arr_item, arr_start, arr_len)
#define MAX2(a, b)
#define UNLIKELY(x)
#define POINTER_OFFSET(v, ofs)
Read Guarded memory(de)allocation.
#define MEM_SIZE_OVERHEAD
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE void * atomic_cas_ptr(void **v, void *old, void *_new)
void *(* MEM_malloc_arrayN)(size_t len, size_t size, const char *str)
Definition: mallocn.c:34
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
return ret
_W64 int intptr_t
Definition: stdint.h:118
struct BLI_freenode * next
Definition: BLI_mempool.c:81
intptr_t freeword
Definition: BLI_mempool.c:83
struct BLI_mempool_chunk * next
Definition: BLI_mempool.c:91
struct BLI_mempool_chunk * curchunk
Definition: BLI_mempool.h:93
BLI_mempool * pool
Definition: BLI_mempool.h:92
unsigned int curindex
Definition: BLI_mempool.h:94
struct BLI_mempool_chunk ** curchunk_threaded_shared
BLI_mempool_chunk * chunks
Definition: BLI_mempool.c:99
BLI_mempool_chunk * chunk_tail
Definition: BLI_mempool.c:102
BLI_freenode * free
Definition: BLI_mempool.c:114
uint maxchunks
Definition: BLI_mempool.c:116
BLI_mempool_threadsafe_iter ts_iter