22 #define GHASH_INTERNAL_API
32 #define GHASH_USE_MODULO_BUCKETS
41 5, 11, 17, 37, 67, 131, 257, 521, 1031,
42 2053, 4099, 8209, 16411, 32771, 65537, 131101, 262147, 524309,
43 1048583, 2097169, 4194319, 8388617, 16777259, 33554467, 67108879, 134217757, 268435459,
45 #define hashsizes BLI_ghash_hash_sizes
47 #ifdef GHASH_USE_MODULO_BUCKETS
48 # define GHASH_MAX_SIZE 27
51 # define GHASH_BUCKET_BIT_MIN 2
52 # define GHASH_BUCKET_BIT_MAX 28
63 #define GHASH_LIMIT_GROW(_nbkt) (((_nbkt)*3) / 4)
64 #define GHASH_LIMIT_SHRINK(_nbkt) (((_nbkt)*3) / 16)
81 #define GHASH_ENTRY_SIZE(_is_gset) ((_is_gset) ? sizeof(GSetEntry) : sizeof(GHashEntry))
91 #ifdef GHASH_USE_MODULO_BUCKETS
94 uint bucket_mask, bucket_bit, bucket_bit_min;
114 dst->
key = (keycopyfp) ? keycopyfp(
src->key) :
src->key;
116 if ((gh_dst->
flag & GHASH_FLAG_IS_GSET) == 0) {
117 if ((gh_src->
flag & GHASH_FLAG_IS_GSET) == 0) {
148 #ifdef GHASH_USE_MODULO_BUCKETS
151 return hash & gh->bucket_mask;
163 if (gh->
buckets[curr_bucket]) {
166 for (; curr_bucket < gh->
nbuckets; curr_bucket++) {
167 if (gh->
buckets[curr_bucket]) {
171 for (curr_bucket = 0; curr_bucket < gh->
nbuckets; curr_bucket++) {
172 if (gh->
buckets[curr_bucket]) {
194 #ifdef GHASH_USE_MODULO_BUCKETS
196 gh->bucket_mask = nbuckets - 1;
202 if (nbuckets > nbuckets_old) {
203 for (i = 0; i < nbuckets_old; i++) {
204 for (
Entry *
e = buckets_old[i], *e_next;
e;
e = e_next) {
208 e->next = buckets_new[bucket_index];
209 buckets_new[bucket_index] =
e;
214 for (i = 0; i < nbuckets_old; i++) {
215 #ifdef GHASH_USE_MODULO_BUCKETS
216 for (
Entry *
e = buckets_old[i], *e_next;
e;
e = e_next) {
220 e->next = buckets_new[bucket_index];
221 buckets_new[bucket_index] =
e;
230 for (
e = buckets_old[i];
e &&
e->next;
e =
e->next) {
234 e->next = buckets_new[bucket_index];
235 buckets_new[bucket_index] = buckets_old[i];
262 #ifdef GHASH_USE_MODULO_BUCKETS
268 while ((nentries > gh->
limit_grow) && (gh->bucket_bit < GHASH_BUCKET_BIT_MAX)) {
269 new_nbuckets = 1u << ++gh->bucket_bit;
275 #ifdef GHASH_USE_MODULO_BUCKETS
278 gh->bucket_bit_min = gh->bucket_bit;
293 const bool user_defined,
294 const bool force_shrink)
308 #ifdef GHASH_USE_MODULO_BUCKETS
309 while ((nentries < gh->limit_shrink) && (gh->
cursize > gh->
size_min)) {
314 while ((nentries < gh->limit_shrink) && (gh->bucket_bit > gh->bucket_bit_min)) {
315 new_nbuckets = 1u << --gh->bucket_bit;
321 #ifdef GHASH_USE_MODULO_BUCKETS
324 gh->bucket_bit_min = gh->bucket_bit;
344 #ifdef GHASH_USE_MODULO_BUCKETS
349 gh->bucket_bit = GHASH_BUCKET_BIT_MIN;
350 gh->bucket_bit_min = GHASH_BUCKET_BIT_MIN;
351 gh->
nbuckets = 1u << gh->bucket_bit;
373 for (
e = gh->
buckets[bucket_index];
e;
e =
e->next) {
391 const uint bucket_index)
419 const uint nentries_reserve,
462 const uint bucket_index,
467 e->next = gh->
buckets[bucket_index];
484 e->next = gh->
buckets[bucket_index];
560 const uint bucket_index)
576 e_prev->
next =
e->next;
579 gh->
buckets[bucket_index] =
e->next;
608 state->curr_bucket = curr_bucket;
622 for (i = 0; i < gh->
nbuckets; i++) {
653 for (i = 0; i < gh->
nbuckets; i++) {
684 const uint nentries_reserve)
686 return ghash_new(hashfp, cmpfp, info, nentries_reserve, 0);
727 void *key_prev =
e->
e.key;
745 return e ?
e->val : val_default;
752 return e ? &
e->val :
NULL;
760 const bool haskey = (
e !=
NULL);
776 const bool haskey = (
e !=
NULL);
841 *r_key = *r_val =
NULL;
848 const uint nentries_reserve)
850 if (keyfreefp || valfreefp) {
866 if (keyfreefp || valfreefp) {
942 const uint nentries_reserve)
944 return (
GSet *)
ghash_new(hashfp, cmpfp, info, nentries_reserve, GHASH_FLAG_IS_GSET);
959 return ((
GHash *)gs)->nentries;
979 const bool haskey = (
e !=
NULL);
1064 return e ?
e->key :
NULL;
1073 void *key_ret =
e->key;
1100 double *r_prop_empty_buckets,
1101 double *r_prop_overloaded_buckets,
1102 int *r_biggest_bucket)
1114 if (r_prop_empty_buckets) {
1115 *r_prop_empty_buckets = 1.0;
1117 if (r_prop_overloaded_buckets) {
1118 *r_prop_overloaded_buckets = 0.0;
1120 if (r_biggest_bucket) {
1121 *r_biggest_bucket = 0;
1131 if (r_biggest_bucket) {
1132 *r_biggest_bucket = 0;
1140 for (i = 0; i < gh->
nbuckets; i++) {
1157 for (i = 0; i < gh->
nbuckets; i++) {
1163 if (r_biggest_bucket) {
1164 *r_biggest_bucket =
max_ii(*r_biggest_bucket, (
int)
count);
1166 if (r_prop_overloaded_buckets && (
count > overloaded_buckets_threshold)) {
1169 if (r_prop_empty_buckets && !
count) {
1174 if (r_prop_overloaded_buckets) {
1175 *r_prop_overloaded_buckets = (
double)sum_overloaded / (
double)gh->
nbuckets;
1177 if (r_prop_empty_buckets) {
1178 *r_prop_empty_buckets = (
double)sum_empty / (
double)gh->
nbuckets;
1187 double *r_prop_empty_buckets,
1188 double *r_prop_overloaded_buckets,
1189 int *r_biggest_bucket)
1194 r_prop_empty_buckets,
1195 r_prop_overloaded_buckets,
#define BLI_assert_unreachable()
void * BLI_ghash_lookup_default(const GHash *gh, const void *key, void *val_default)
BLI_INLINE void ghash_entry_copy(GHash *gh_dst, Entry *dst, const GHash *gh_src, const Entry *src, GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp)
static Entry * ghash_pop(GHash *gh, GHashIterState *state)
void * BLI_gset_pop_key(GSet *gs, const void *key)
bool BLI_ghash_pop(GHash *gh, GHashIterState *state, void **r_key, void **r_val)
double BLI_ghash_calc_quality_ex(GHash *gh, double *r_load, double *r_variance, double *r_prop_empty_buckets, double *r_prop_overloaded_buckets, int *r_biggest_bucket)
void * BLI_gset_replace_key(GSet *gs, void *key)
static void ghash_buckets_resize(GHash *gh, const uint nbuckets)
void * BLI_ghash_lookup(const GHash *gh, const void *key)
bool BLI_gset_ensure_p_ex(GSet *gs, const void *key, void ***r_key)
double BLI_ghash_calc_quality(GHash *gh)
bool BLI_ghash_reinsert(GHash *gh, void *key, void *val, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
void BLI_ghashIterator_step(GHashIterator *ghi)
bool BLI_ghash_ensure_p_ex(GHash *gh, const void *key, void ***r_key, void ***r_val)
void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
BLI_INLINE bool ghash_insert_safe_keyonly(GHash *gh, void *key, const bool override, GHashKeyFreeFP keyfreefp)
void BLI_ghashIterator_free(GHashIterator *ghi)
uint BLI_gset_len(const GSet *gs)
GSet * BLI_gset_new_ex(GSetHashFP hashfp, GSetCmpFP cmpfp, const char *info, const uint nentries_reserve)
static void ghash_buckets_contract(GHash *gh, const uint nentries, const bool user_defined, const bool force_shrink)
void BLI_ghash_clear_ex(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp, const uint nentries_reserve)
#define GHASH_ENTRY_SIZE(_is_gset)
void * BLI_ghash_popkey(GHash *gh, const void *key, GHashKeyFreeFP keyfreefp)
int BLI_ghash_buckets_len(const GHash *gh)
struct GHashEntry GHashEntry
BLI_INLINE void ghash_insert(GHash *gh, void *key, void *val)
BLI_INLINE void ghash_buckets_reset(GHash *gh, const uint nentries)
double BLI_gset_calc_quality(GSet *gs)
BLI_INLINE Entry * ghash_lookup_entry(const GHash *gh, const void *key)
void BLI_ghash_reserve(GHash *gh, const uint nentries_reserve)
void BLI_gset_clear(GSet *gs, GSetKeyFreeFP keyfreefp)
void BLI_gset_flag_clear(GSet *gs, uint flag)
BLI_INLINE void ghash_insert_ex_keyonly(GHash *gh, void *key, const uint bucket_index)
void BLI_gset_clear_ex(GSet *gs, GSetKeyFreeFP keyfreefp, const uint nentries_reserve)
BLI_INLINE uint ghash_find_next_bucket_index(const GHash *gh, uint curr_bucket)
double BLI_gset_calc_quality_ex(GSet *gs, double *r_load, double *r_variance, double *r_prop_empty_buckets, double *r_prop_overloaded_buckets, int *r_biggest_bucket)
static GHash * ghash_new(GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info, const uint nentries_reserve, const uint flag)
void * BLI_gset_lookup(const GSet *gs, const void *key)
BLI_STATIC_ASSERT(ARRAY_SIZE(hashsizes)==GHASH_MAX_SIZE, "Invalid 'hashsizes' size")
void BLI_gset_flag_set(GSet *gs, uint flag)
static GHash * ghash_copy(const GHash *gh, GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp)
BLI_INLINE bool ghash_insert_safe(GHash *gh, void *key, void *val, const bool override, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
void BLI_ghash_flag_clear(GHash *gh, uint flag)
bool BLI_gset_haskey(const GSet *gs, const void *key)
static Entry * ghash_remove_ex(GHash *gh, const void *key, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp, const uint bucket_index)
void BLI_gset_insert(GSet *gs, void *key)
BLI_INLINE uint ghash_keyhash(const GHash *gh, const void *key)
#define GHASH_LIMIT_SHRINK(_nbkt)
bool BLI_ghash_haskey(const GHash *gh, const void *key)
bool BLI_gset_reinsert(GSet *gs, void *key, GSetKeyFreeFP keyfreefp)
uint BLI_ghash_len(const GHash *gh)
void ** BLI_ghash_lookup_p(GHash *gh, const void *key)
BLI_INLINE Entry * ghash_lookup_entry_ex(const GHash *gh, const void *key, const uint bucket_index)
GHashIterator * BLI_ghashIterator_new(GHash *gh)
BLI_INLINE uint ghash_bucket_index(const GHash *gh, const uint hash)
int BLI_gset_buckets_len(const GSet *gs)
void * BLI_ghash_replace_key(GHash *gh, void *key)
BLI_INLINE void ghash_insert_ex(GHash *gh, void *key, void *val, const uint bucket_index)
#define GHASH_LIMIT_GROW(_nbkt)
void BLI_ghash_flag_set(GHash *gh, uint flag)
bool BLI_ghash_remove(GHash *gh, const void *key, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
const uint BLI_ghash_hash_sizes[]
GSet * BLI_gset_new(GSetHashFP hashfp, GSetCmpFP cmpfp, const char *info)
BLI_INLINE void ghash_insert_ex_keyonly_entry(GHash *gh, void *key, const uint bucket_index, Entry *e)
void BLI_ghash_insert(GHash *gh, void *key, void *val)
BLI_INLINE uint ghash_entryhash(const GHash *gh, const Entry *e)
BLI_INLINE Entry * ghash_lookup_entry_prev_ex(GHash *gh, const void *key, Entry **r_e_prev, const uint bucket_index)
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
GHash * BLI_ghash_copy(const GHash *gh, GHashKeyCopyFP keycopyfp, GHashValCopyFP valcopyfp)
bool BLI_ghash_ensure_p(GHash *gh, void *key, void ***r_val)
static void ghash_free_cb(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
void BLI_gset_free(GSet *gs, GSetKeyFreeFP keyfreefp)
void BLI_ghashIterator_init(GHashIterator *ghi, GHash *gh)
GHash * BLI_ghash_new_ex(GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info, const uint nentries_reserve)
bool BLI_gset_add(GSet *gs, void *key)
bool BLI_gset_pop(GSet *gs, GSetIterState *state, void **r_key)
GHash * BLI_ghash_new(GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info)
bool BLI_gset_remove(GSet *gs, const void *key, GSetKeyFreeFP keyfreefp)
static void ghash_buckets_expand(GHash *gh, const uint nentries, const bool user_defined)
GSet * BLI_gset_copy(const GSet *gs, GHashKeyCopyFP keycopyfp)
@ GHASH_FLAG_ALLOW_SHRINK
bool(* GHashCmpFP)(const void *a, const void *b)
void *(* GHashKeyCopyFP)(const void *key)
GHashKeyFreeFP GSetKeyFreeFP
void *(* GHashValCopyFP)(const void *val)
unsigned int(* GHashHashFP)(const void *key)
void(* GHashKeyFreeFP)(void *key)
void(* GHashValFreeFP)(void *val)
MINLINE int max_ii(int a, int b)
void BLI_mempool_free(BLI_mempool *pool, void *addr) ATTR_NONNULL(1
int BLI_mempool_len(const BLI_mempool *pool) ATTR_NONNULL(1)
void void BLI_mempool_clear_ex(BLI_mempool *pool, int totelem_reserve) ATTR_NONNULL(1)
BLI_mempool * BLI_mempool_create(unsigned int esize, unsigned int elem_num, unsigned int pchunk, unsigned int flag) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL
void * BLI_mempool_alloc(BLI_mempool *pool) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(1)
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
typedef double(DMatrix)[4][4]
Read Guarded memory(de)allocation.
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
static T sum(const btAlignedObjectArray< T > &items)
SyclQueue void void * src
void(* MEM_freeN)(void *vmemh)
void *(* MEM_callocN)(size_t len, const char *str)
void *(* MEM_mallocN)(size_t len, const char *str)
unsigned __int64 uint64_t
struct BLI_mempool * entrypool