113 #define USE_FASTPATH_CHUNKS_FIRST
122 #ifdef USE_FASTPATH_CHUNKS_FIRST
123 # define USE_FASTPATH_CHUNKS_LAST
131 #define USE_ALIGN_CHUNKS_TEST
136 #define USE_HASH_TABLE_ACCUMULATE
138 #ifdef USE_HASH_TABLE_ACCUMULATE
143 # define BCHUNK_HASH_TABLE_ACCUMULATE_STEPS 4
147 # define BCHUNK_HASH_LEN 4
152 #define USE_HASH_TABLE_KEY_CACHE
153 #ifdef USE_HASH_TABLE_KEY_CACHE
154 # define HASH_TABLE_KEY_UNSET ((uint64_t)-1)
155 # define HASH_TABLE_KEY_FALLBACK ((uint64_t)-2)
160 #define BCHUNK_HASH_TABLE_MUL 3
172 #define USE_MERGE_CHUNKS
174 #ifdef USE_MERGE_CHUNKS
177 # define BCHUNK_SIZE_MIN_DIV 8
184 # define BCHUNK_SIZE_MAX_MUL 2
213 #ifdef USE_HASH_TABLE_ACCUMULATE
274 #ifdef USE_HASH_TABLE_KEY_CACHE
314 #ifdef USE_HASH_TABLE_KEY_CACHE
323 memcpy(data_copy,
data, data_len);
324 return bchunk_new(bs_mem, data_copy, data_len);
330 if (chunk->
users == 1) {
340 const uchar *data_base,
341 const size_t data_base_len,
363 chunk_list->
users = 0;
370 if (chunk_list->
users == 1) {
380 chunk_list->
users -= 1;
384 #ifdef USE_VALIDATE_LIST_SIZE
386 # define ASSERT_CHUNKLIST_SIZE(chunk_list, n) BLI_assert(bchunk_list_size(chunk_list) == n)
389 #ifndef ASSERT_CHUNKLIST_SIZE
390 # define ASSERT_CHUNKLIST_SIZE(chunk_list, n) (EXPR_NOP(chunk_list), EXPR_NOP(n))
393 #ifdef USE_VALIDATE_LIST_DATA_PARTIAL
405 # define ASSERT_CHUNKLIST_DATA(chunk_list, data) \
406 BLI_assert(bchunk_list_data_check(chunk_list, data))
408 # define ASSERT_CHUNKLIST_DATA(chunk_list, data) (EXPR_NOP(chunk_list), EXPR_NOP(data))
411 #ifdef USE_MERGE_CHUNKS
425 if (data_merge_len <= info->chunk_byte_size_max) {
435 memcpy(data_merge, chunk_prev->
data, chunk_prev->
data_len);
454 const size_t data_prev_len =
split;
455 const size_t data_curr_len = data_merge_len -
split;
459 if (data_prev_len <= chunk_prev->data_len) {
460 const size_t data_curr_shrink_len = chunk_prev->
data_len - data_prev_len;
463 memcpy(data_prev, chunk_prev->
data, data_prev_len);
466 memcpy(data_curr, &chunk_prev->
data[data_prev_len], data_curr_shrink_len);
467 memcpy(&data_curr[data_curr_shrink_len], chunk_curr->
data, chunk_curr->
data_len);
470 BLI_assert(data_curr_len <= chunk_curr->data_len);
473 const size_t data_prev_grow_len = data_prev_len - chunk_prev->
data_len;
476 memcpy(data_prev, chunk_prev->
data, chunk_prev->
data_len);
477 memcpy(&data_prev[chunk_prev->
data_len], chunk_curr->
data, data_prev_grow_len);
480 memcpy(data_curr, &chunk_curr->
data[data_prev_grow_len], data_curr_len);
507 const size_t data_len,
508 size_t *r_data_trim_len,
509 size_t *r_data_last_chunk_len)
511 size_t data_last_chunk_len = 0;
512 size_t data_trim_len = data_len;
514 #ifdef USE_MERGE_CHUNKS
519 data_trim_len = data_trim_len - data_last_chunk_len;
520 if (data_last_chunk_len) {
521 if (data_last_chunk_len < info->chunk_byte_size_min) {
530 data_last_chunk_len = data_len;
536 data_trim_len = data_trim_len - data_last_chunk_len;
539 BLI_assert(data_trim_len + data_last_chunk_len == data_len);
541 *r_data_trim_len = data_trim_len;
542 *r_data_last_chunk_len = data_last_chunk_len;
565 const size_t data_len)
569 #ifdef USE_MERGE_CHUNKS
570 BLI_assert(data_len <= info->chunk_byte_size_max);
577 const size_t data_merge_len = chunk_prev->
data_len + data_len;
581 memcpy(&data_merge[chunk_prev->
data_len],
data, data_len);
587 memcpy(data_merge, chunk_prev->
data, chunk_prev->
data_len);
588 memcpy(&data_merge[chunk_prev->
data_len],
data, data_len);
605 # ifdef USE_MERGE_CHUNKS
624 size_t data_trim_len, data_last_chunk_len;
627 if (data_trim_len != 0) {
636 while (i_prev != data_trim_len) {
643 if (data_last_chunk_len) {
652 if (data_last_chunk_len) {
658 #ifdef USE_MERGE_CHUNKS
673 #ifdef USE_MERGE_CHUNKS
684 const size_t data_len)
688 size_t data_trim_len, data_last_chunk_len;
692 while (i_prev != data_trim_len) {
699 if (data_last_chunk_len) {
705 #ifdef USE_MERGE_CHUNKS
714 # ifdef USE_MERGE_CHUNKS
735 #define HASH_INIT (5381)
745 const signed char *p;
748 for (p = (
const signed char *)key; n--; p++) {
749 h = ((h << 5) + h) + (
unsigned int)*p;
757 #ifdef USE_HASH_TABLE_ACCUMULATE
759 const uchar *data_slice,
760 const size_t data_slice_len,
764 for (
size_t i = 0, i_step = 0; i_step < data_slice_len; i++, i_step += info->
chunk_stride) {
770 for (
size_t i = 0; i < data_slice_len; i++) {
782 const size_t data_len,
785 const size_t hash_array_len = data_len / info->
chunk_stride;
788 size_t i_next = hash_array_len - i;
794 BLI_assert(data_trim_len <= cref->link->data_len);
798 }
while ((i < hash_array_len) && (
cref !=
NULL));
808 if (
UNLIKELY((iter_steps > hash_array_len))) {
809 iter_steps = hash_array_len;
812 const size_t hash_array_search_len = hash_array_len - iter_steps;
813 while (iter_steps != 0) {
814 const size_t hash_offset = iter_steps;
815 for (
uint i = 0; i < hash_array_search_len; i++) {
816 hash_array[i] += (hash_array[i + hash_offset]) * ((hash_array[i] & 0xff) + 1);
829 if (
UNLIKELY(!(iter_steps <= hash_array_len))) {
831 iter_steps = hash_array_len;
835 size_t iter_steps_sub = iter_steps;
837 while (iter_steps != 0) {
838 const size_t hash_array_search_len = hash_array_len - iter_steps_sub;
839 const size_t hash_offset = iter_steps;
840 for (
uint i = 0; i < hash_array_search_len; i++) {
841 hash_array[i] += (hash_array[i + hash_offset]) * ((hash_array[i] & 0xff) + 1);
844 iter_steps_sub += iter_steps;
852 const size_t hash_store_len)
861 # ifdef USE_HASH_TABLE_KEY_CACHE
891 # ifdef USE_HASH_TABLE_KEY_CACHE
901 const size_t table_len,
902 const size_t i_table_start,
904 const size_t data_len,
908 size_t size_left = data_len -
offset;
910 size_t key_index = (size_t)(key % (
hash_key)table_len);
911 for (
const BTableRef *tref = table[key_index]; tref; tref = tref->
next) {
913 # ifdef USE_HASH_TABLE_KEY_CACHE
918 if (chunk_test->
data_len <= size_left) {
935 const size_t data_hash_len = BCHUNK_HASH_LEN * info->
chunk_stride;
939 # ifdef USE_HASH_TABLE_KEY_CACHE
962 const size_t table_len,
965 const size_t data_len,
969 const size_t data_hash_len = BCHUNK_HASH_LEN * info->
chunk_stride;
971 size_t size_left = data_len -
offset;
973 size_t key_index = (size_t)(key % (
hash_key)table_len);
974 for (
BTableRef *tref = table[key_index]; tref; tref = tref->
next) {
976 # ifdef USE_HASH_TABLE_KEY_CACHE
981 if (chunk_test->
data_len <= size_left) {
1024 uint chunk_list_reference_skip_len = 0;
1025 size_t chunk_list_reference_skip_bytes = 0;
1028 #ifdef USE_FASTPATH_CHUNKS_FIRST
1030 bool full_match =
true;
1035 cref_match_first =
cref;
1036 chunk_list_reference_skip_len += 1;
1061 if (cref_match_first !=
NULL) {
1062 size_t chunk_size_step = 0;
1066 chunk_size_step += chunk->
data_len;
1070 if (
cref == cref_match_first) {
1080 i_prev = chunk_size_step;
1098 #define data_len_original invalid_usage
1099 #ifdef data_len_original
1104 #ifdef USE_FASTPATH_CHUNKS_LAST
1113 chunk_list_reference_last =
cref;
1114 chunk_list_reference_skip_len += 1;
1135 bool use_aligned =
false;
1137 #ifdef USE_ALIGN_CHUNKS_TEST
1140 if (data_len - i_prev <= chunk_list->total_size / 4) {
1156 while (i_prev != data_len) {
1160 if ((
cref != chunk_list_reference_last) &&
1178 (chunk_list_reference->
chunk_refs_len >= chunk_list_reference_skip_len) &&
1189 #ifdef USE_HASH_TABLE_ACCUMULATE
1190 size_t i_table_start = i_prev;
1191 const size_t table_hash_array_len = (data_len - i_prev) / info->
chunk_stride;
1192 hash_key *table_hash_array =
MEM_mallocN(
sizeof(*table_hash_array) * table_hash_array_len,
1199 uint i_table_start = 0;
1203 const uint chunk_list_reference_remaining_len = (chunk_list_reference->
chunk_refs_len -
1204 chunk_list_reference_skip_len) +
1207 chunk_list_reference_remaining_len *
sizeof(
BTableRef), __func__);
1208 uint table_ref_stack_n = 0;
1216 #ifdef USE_HASH_TABLE_ACCUMULATE
1222 size_t chunk_list_reference_bytes_remaining = chunk_list_reference->
total_size -
1223 chunk_list_reference_skip_bytes;
1225 if (cref_match_first) {
1226 cref = cref_match_first;
1233 #ifdef USE_PARANOID_CHECKS
1235 size_t test_bytes_len = 0;
1237 while (cr != chunk_list_reference_last) {
1241 BLI_assert(test_bytes_len == chunk_list_reference_bytes_remaining);
1245 while ((
cref != chunk_list_reference_last) &&
1256 size_t key_index = (size_t)(key % (
hash_key)table_len);
1257 BTableRef *tref_prev = table[key_index];
1258 BLI_assert(table_ref_stack_n < chunk_list_reference_remaining_len);
1259 BTableRef *tref = &table_ref_stack[table_ref_stack_n++];
1261 tref->
next = tref_prev;
1262 table[key_index] = tref;
1268 BLI_assert(table_ref_stack_n <= chunk_list_reference_remaining_len);
1270 #ifdef USE_HASH_TABLE_ACCUMULATE
1277 for (
size_t i = i_prev; i < data_len;) {
1281 info, table, table_len, i_table_start,
data, data_len, i, table_hash_array);
1282 if (cref_found !=
NULL) {
1301 while (!
ELEM(cref_found->
next,
NULL, chunk_list_reference_last)) {
1302 cref_found = cref_found->
next;
1326 #ifdef USE_HASH_TABLE_ACCUMULATE
1344 if (i_prev != data_len) {
1351 #ifdef USE_FASTPATH_CHUNKS_LAST
1352 if (chunk_list_reference_last !=
NULL) {
1368 #undef data_len_original
1396 #ifdef USE_MERGE_CHUNKS
1401 #ifdef USE_HASH_TABLE_ACCUMULATE
1436 state_next =
state->next;
1471 size_t size_accum = 0;
1473 size_accum +=
state->chunk_list->total_size;
1480 size_t size_total = 0;
1486 size_total += (size_t)chunk->
data_len;
1499 const size_t data_len,
1505 #ifdef USE_PARANOID_CHECKS
1506 if (state_reference) {
1512 if (state_reference) {
1525 chunk_list->
users += 1;
1528 state->chunk_list = chunk_list;
1532 #ifdef USE_PARANOID_CHECKS
1534 size_t data_test_len;
1547 #ifdef USE_PARANOID_CHECKS
1559 return state->chunk_list->total_size;
1564 #ifdef USE_PARANOID_CHECKS
1565 size_t data_test_len = 0;
1584 *r_data_len =
state->chunk_list->total_size;
1597 size_t total_size = 0;
1621 #ifdef USE_MERGE_CHUNKS
1649 #define GHASH_PTR_ADD_USER(gh, pt) \
1652 if (BLI_ghash_ensure_p((gh), (pt), &val)) { \
1653 *((int *)val) += 1; \
1656 *((int *)val) = 1; \
1708 #undef GHASH_PTR_ADD_USER
Efficient in-memory storage of multiple similar arrays.
BLI_INLINE void * BLI_ghashIterator_getKey(GHashIterator *ghi) ATTR_WARN_UNUSED_RESULT
BLI_INLINE void * BLI_ghashIterator_getValue(GHashIterator *ghi) ATTR_WARN_UNUSED_RESULT
#define GHASH_ITER(gh_iter_, ghash_)
unsigned int BLI_ghash_len(const GHash *gh) ATTR_WARN_UNUSED_RESULT
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
GHash * BLI_ghash_ptr_new(const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
BLI_INLINE bool BLI_listbase_is_empty(const struct ListBase *lb)
#define LISTBASE_FOREACH(type, var, list)
BLI_INLINE void BLI_listbase_clear(struct ListBase *lb)
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
int BLI_findindex(const struct ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter) ATTR_NONNULL()
void * BLI_mempool_iterstep(BLI_mempool_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
void BLI_mempool_free(BLI_mempool *pool, void *addr) ATTR_NONNULL(1
int BLI_mempool_len(const BLI_mempool *pool) ATTR_NONNULL(1)
BLI_mempool * BLI_mempool_create(unsigned int esize, unsigned int elem_num, unsigned int pchunk, unsigned int flag) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL
void * BLI_mempool_alloc(BLI_mempool *pool) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(1)
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
void BLI_mempool_clear(BLI_mempool *pool) ATTR_NONNULL(1)
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
#define POINTER_AS_INT(i)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei stride
Read Guarded memory(de)allocation.
#define MEM_reallocN(vmemh, len)
static void bchunk_list_calc_trim_len(const BArrayInfo *info, const size_t data_len, size_t *r_data_trim_len, size_t *r_data_last_chunk_len)
#define BCHUNK_HASH_TABLE_MUL
static BChunk * bchunk_new_copydata(BArrayMemory *bs_mem, const uchar *data, const size_t data_len)
static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref, hash_key *hash_store, const size_t hash_store_len)
static BChunkList * bchunk_list_from_data_merge(const BArrayInfo *info, BArrayMemory *bs_mem, const uchar *data, const size_t data_len_original, const BChunkList *chunk_list_reference)
static void array_store_free_data(BArrayStore *bs)
#define USE_HASH_TABLE_ACCUMULATE
struct BArrayMemory BArrayMemory
struct BChunkRef BChunkRef
BArrayStore * BLI_array_store_create(uint stride, uint chunk_count)
static BChunkList * bchunk_list_new(BArrayMemory *bs_mem, size_t total_size)
struct BArrayInfo BArrayInfo
static void hash_array_from_cref(const BArrayInfo *info, const BChunkRef *cref, const size_t data_len, hash_key *hash_array)
BLI_INLINE uint hash_data_single(const uchar p)
void * BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_len)
#define GHASH_PTR_ADD_USER(gh, pt)
static bool bchunk_data_compare(const BChunk *chunk, const uchar *data_base, const size_t data_base_len, const size_t offset)
static void bchunk_list_fill_from_array(const BArrayInfo *info, BArrayMemory *bs_mem, BChunkList *chunk_list, const uchar *data, const size_t data_len)
static const BChunkRef * table_lookup(const BArrayInfo *info, BTableRef **table, const size_t table_len, const size_t i_table_start, const uchar *data, const size_t data_len, const size_t offset, const hash_key *table_hash_array)
void BLI_array_store_state_remove(BArrayStore *bs, BArrayState *state)
#define ASSERT_CHUNKLIST_DATA(chunk_list, data)
static void bchunk_decref(BArrayMemory *bs_mem, BChunk *chunk)
static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len, size_t iter_steps)
static uint hash_data(const uchar *key, size_t n)
static void bchunk_list_append_data_n(const BArrayInfo *info, BArrayMemory *bs_mem, BChunkList *chunk_list, const uchar *data, size_t data_len)
size_t BLI_array_store_calc_size_expanded_get(const BArrayStore *bs)
#define HASH_TABLE_KEY_UNSET
void BLI_array_store_state_data_get(BArrayState *state, void *data)
void BLI_array_store_clear(BArrayStore *bs)
static void bchunk_list_decref(BArrayMemory *bs_mem, BChunkList *chunk_list)
static size_t bchunk_list_size(const BChunkList *chunk_list)
#define BCHUNK_SIZE_MAX_MUL
bool BLI_array_store_is_valid(BArrayStore *bs)
BArrayState * BLI_array_store_state_add(BArrayStore *bs, const void *data, const size_t data_len, const BArrayState *state_reference)
#define HASH_TABLE_KEY_FALLBACK
size_t BLI_array_store_state_size_get(BArrayState *state)
#define data_len_original
struct BChunkList BChunkList
static void bchunk_list_ensure_min_size_last(const BArrayInfo *info, BArrayMemory *bs_mem, BChunkList *chunk_list)
static void hash_accum(hash_key *hash_array, const size_t hash_array_len, size_t iter_steps)
#define ASSERT_CHUNKLIST_SIZE(chunk_list, n)
static void hash_array_from_data(const BArrayInfo *info, const uchar *data_slice, const size_t data_slice_len, hash_key *hash_array)
static void bchunk_list_append_only(BArrayMemory *bs_mem, BChunkList *chunk_list, BChunk *chunk)
void BLI_array_store_destroy(BArrayStore *bs)
static BChunk * bchunk_new(BArrayMemory *bs_mem, const uchar *data, const size_t data_len)
size_t BLI_array_store_calc_size_compacted_get(const BArrayStore *bs)
#define BCHUNK_HASH_TABLE_ACCUMULATE_STEPS
static void bchunk_list_append(const BArrayInfo *info, BArrayMemory *bs_mem, BChunkList *chunk_list, BChunk *chunk)
static void bchunk_list_append_data(const BArrayInfo *info, BArrayMemory *bs_mem, BChunkList *chunk_list, const uchar *data, const size_t data_len)
#define BCHUNK_SIZE_MIN_DIV
struct BTableRef BTableRef
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
void(* MEM_freeN)(void *vmemh)
size_t(* MEM_allocN_len)(const void *vmemh)
void *(* MEM_callocN)(size_t len, const char *str)
void *(* MEM_mallocN)(size_t len, const char *str)
void split(const std::string &s, const char delim, std::vector< std::string > &tokens)
unsigned __int64 uint64_t
size_t chunk_byte_size_max
size_t chunk_byte_size_min
size_t accum_read_ahead_bytes
size_t accum_read_ahead_len
struct BArrayState * next
struct BChunkList * chunk_list
struct BArrayState * prev