30 #ifdef WITH_MEM_VALGRIND
31 # include "valgrind/memcheck.h"
37 # define MAKE_ID(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
38 # define MAKE_ID_8(a, b, c, d, e, f, g, h) \
39 ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
40 (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h))
43 # define MAKE_ID(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
44 # define MAKE_ID_8(a, b, c, d, e, f, g, h) \
45 ((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
46 (int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a))
56 ((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
57 MAKE_ID('e', 'f', 'f', 'e'))
62 #define USEDWORD MAKE_ID('u', 's', 'e', 'd')
68 #define USE_CHUNK_POW2
125 #define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
127 #define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
129 #define NODE_STEP_NEXT(node) ((void *)((char *)(node) + esize))
130 #define NODE_STEP_PREV(node) ((void *)((char *)(node)-esize))
133 #define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
135 #ifdef USE_CHUNK_POW2
150 while (index-- && head) {
164 return (elem_num <= pchunk) ? 1 : ((elem_num / pchunk) + 1);
190 if (
pool->chunk_tail) {
191 pool->chunk_tail->next = mpchunk;
195 pool->chunks = mpchunk;
199 pool->chunk_tail = mpchunk;
202 pool->free = curnode;
211 curnode = curnode->
next;
217 curnode = curnode->
next;
247 for (; mpchunk; mpchunk = mpchunk_next) {
248 mpchunk_next = mpchunk->
next;
278 #ifdef USE_CHUNK_POW2
285 pool->csize = esize * pchunk;
288 #if defined(USE_CHUNK_POW2) && !defined(NDEBUG)
295 pool->pchunk = pchunk;
298 pool->maxchunks = maxchunks;
306 for (i = 0; i < maxchunks; i++) {
312 #ifdef WITH_MEM_VALGRIND
329 free_pop =
pool->free;
340 #ifdef WITH_MEM_VALGRIND
344 return (
void *)free_pop;
350 memset(retval, 0, (
size_t)
pool->esize);
362 for (chunk =
pool->chunks; chunk; chunk = chunk->
next) {
369 BLI_assert_msg(0,
"Attempt to free data which is not in pool.\n");
375 memset(addr, 255,
pool->esize);
388 pool->free = newhead;
392 #ifdef WITH_MEM_VALGRIND
393 VALGRIND_MEMPOOL_FREE(
pool, addr);
403 first =
pool->chunks;
406 pool->chunk_tail = first;
413 #ifdef WITH_MEM_VALGRIND
418 pool->free = curnode;
423 curnode = curnode->
next;
428 #ifdef WITH_MEM_VALGRIND
436 return (
int)
pool->totused;
443 if (index < pool->totused) {
481 char *elem, *p =
data;
485 memcpy(p, elem, (
size_t)esize);
524 for (
size_t i = 1; i < iter_num; i++) {
527 ((*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next :
NULL);
569 ret = bli_mempool_iternext(iter);
659 #ifdef WITH_MEM_VALGRIND
664 if (totelem_reserve == -1) {
665 maxchunks =
pool->maxchunks;
673 if (mpchunk && mpchunk->
next) {
675 mpchunk_next = mpchunk->
next;
677 mpchunk = mpchunk_next;
680 mpchunk_next = mpchunk->
next;
682 }
while ((mpchunk = mpchunk_next));
692 chunks_temp =
pool->chunks;
696 while ((mpchunk = chunks_temp)) {
697 chunks_temp = mpchunk->
next;
711 #ifdef WITH_MEM_VALGRIND
#define BLI_assert_msg(a, msg)
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)
#define VALGRIND_DESTROY_MEMPOOL(pool)
BLI_mempool * BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint flag)
#define MEMPOOL_ELEM_SIZE_MIN
#define NODE_STEP_PREV(node)
static bool mempool_debug_memset
#define CHUNK_DATA(chunk)
static BLI_freenode * mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpchunk, BLI_freenode *last_tail)
BLI_INLINE BLI_mempool_chunk * mempool_chunk_find(BLI_mempool_chunk *head, uint index)
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
void mempool_iter_threadsafe_destroy(ParallelMempoolTaskData *iter_arr)
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
void * BLI_mempool_alloc(BLI_mempool *pool)
int BLI_mempool_len(const BLI_mempool *pool)
void BLI_mempool_free(BLI_mempool *pool, void *addr)
void * mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
void BLI_mempool_clear(BLI_mempool *pool)
static void mempool_threadsafe_iternew(BLI_mempool *pool, BLI_mempool_threadsafe_iter *ts_iter)
void BLI_mempool_set_memory_debug(void)
void * BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
void * BLI_mempool_calloc(BLI_mempool *pool)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
struct BLI_freenode BLI_freenode
void BLI_mempool_destroy(BLI_mempool *pool)
struct BLI_mempool_chunk BLI_mempool_chunk
void BLI_mempool_as_table(BLI_mempool *pool, void **data)
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
void * BLI_mempool_iterstep(BLI_mempool_iter *iter)
#define NODE_STEP_NEXT(node)
void ** BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
void * BLI_mempool_findelem(BLI_mempool *pool, uint index)
BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
static uint power_of_2_max_u(uint x)
ParallelMempoolTaskData * mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t iter_num)
static BLI_mempool_chunk * mempool_chunk_alloc(BLI_mempool *pool)
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
#define ARRAY_HAS_ITEM(arr_item, arr_start, arr_len)
#define POINTER_OFFSET(v, ofs)
Read Guarded memory(de)allocation.
#define MEM_SIZE_OVERHEAD
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE void * atomic_cas_ptr(void **v, void *old, void *_new)
void *(* MEM_malloc_arrayN)(size_t len, size_t size, const char *str)
void(* MEM_freeN)(void *vmemh)
void *(* MEM_mallocN)(size_t len, const char *str)
struct BLI_freenode * next
struct BLI_mempool_chunk * next
struct BLI_mempool_chunk * curchunk
struct BLI_mempool_chunk ** curchunk_threaded_shared
BLI_mempool_chunk * chunks
BLI_mempool_chunk * chunk_tail
BLI_mempool_threadsafe_iter ts_iter