Blender  V3.3
atomic_ops_ext.h
Go to the documentation of this file.
1 /*
2  * Original code from jemalloc with this license:
3  *
4  * Copyright (C) 2002-2013 Jason Evans <jasone@canonware.com>.
5  * All rights reserved.
6  * Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
7  * Copyright (C) 2009-2013 Facebook, Inc. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  * 1. Redistributions of source code must retain the above copyright notice(s),
12  * this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice(s),
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20  * EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  * This program is free software; you can redistribute it and/or
28  * modify it under the terms of the GNU General Public License
29  * as published by the Free Software Foundation; either version 2
30  * of the License, or (at your option) any later version.
31  *
32  * This program is distributed in the hope that it will be useful,
33  * but WITHOUT ANY WARRANTY; without even the implied warranty of
34  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35  * GNU General Public License for more details.
36  *
37  * You should have received a copy of the GNU General Public License
38  * along with this program; if not, write to the Free Software Foundation,
39  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
40  *
41  * The Original Code is Copyright (C) 2016 Blender Foundation.
42  * All rights reserved.
43  *
44  * The Original Code is: adapted from jemalloc.
45  */
46 
51 #ifndef __ATOMIC_OPS_EXT_H__
52 #define __ATOMIC_OPS_EXT_H__
53 
54 #include "atomic_ops_utils.h"
55 
56 /******************************************************************************/
57 /* size_t operations. */
58 ATOMIC_STATIC_ASSERT(sizeof(size_t) == LG_SIZEOF_PTR, "sizeof(size_t) != LG_SIZEOF_PTR");
59 
60 ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x)
61 {
62 #if (LG_SIZEOF_PTR == 8)
63  return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
64 #elif (LG_SIZEOF_PTR == 4)
65  return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
66 #endif
67 }
68 
69 ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
70 {
71 #if (LG_SIZEOF_PTR == 8)
72  return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
73 #elif (LG_SIZEOF_PTR == 4)
74  return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
75 #endif
76 }
77 
78 ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x)
79 {
80 #if (LG_SIZEOF_PTR == 8)
81  return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
82 #elif (LG_SIZEOF_PTR == 4)
83  return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
84 #endif
85 }
86 
87 ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x)
88 {
89 #if (LG_SIZEOF_PTR == 8)
90  return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
91 #elif (LG_SIZEOF_PTR == 4)
92  return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
93 #endif
94 }
95 
96 ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
97 {
98 #if (LG_SIZEOF_PTR == 8)
99  return (size_t)atomic_cas_uint64((uint64_t *)v, (uint64_t)old, (uint64_t)_new);
100 #elif (LG_SIZEOF_PTR == 4)
101  return (size_t)atomic_cas_uint32((uint32_t *)v, (uint32_t)old, (uint32_t)_new);
102 #endif
103 }
104 
105 ATOMIC_INLINE size_t atomic_load_z(const size_t *v)
106 {
107 #if (LG_SIZEOF_PTR == 8)
108  return (size_t)atomic_load_uint64((const uint64_t *)v);
109 #elif (LG_SIZEOF_PTR == 4)
110  return (size_t)atomic_load_uint32((const uint32_t *)v);
111 #endif
112 }
113 
114 ATOMIC_INLINE void atomic_store_z(size_t *p, size_t v)
115 {
116 #if (LG_SIZEOF_PTR == 8)
118 #elif (LG_SIZEOF_PTR == 4)
120 #endif
121 }
122 
124 {
125  size_t prev_value;
126  while ((prev_value = *p) < x) {
127  if (atomic_cas_z(p, prev_value, x) == prev_value) {
128  break;
129  }
130  }
131  return prev_value;
132 }
133 
134 /******************************************************************************/
135 /* unsigned operations. */
136 ATOMIC_STATIC_ASSERT(sizeof(unsigned int) == LG_SIZEOF_INT,
137  "sizeof(unsigned int) != LG_SIZEOF_INT");
138 
139 ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x)
140 {
141 #if (LG_SIZEOF_INT == 8)
142  return (unsigned int)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
143 #elif (LG_SIZEOF_INT == 4)
144  return (unsigned int)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
145 #endif
146 }
147 
148 ATOMIC_INLINE unsigned int atomic_sub_and_fetch_u(unsigned int *p, unsigned int x)
149 {
150 #if (LG_SIZEOF_INT == 8)
151  return (unsigned int)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
152 #elif (LG_SIZEOF_INT == 4)
153  return (unsigned int)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
154 #endif
155 }
156 
157 ATOMIC_INLINE unsigned int atomic_fetch_and_add_u(unsigned int *p, unsigned int x)
158 {
159 #if (LG_SIZEOF_INT == 8)
160  return (unsigned int)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
161 #elif (LG_SIZEOF_INT == 4)
162  return (unsigned int)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
163 #endif
164 }
165 
166 ATOMIC_INLINE unsigned int atomic_fetch_and_sub_u(unsigned int *p, unsigned int x)
167 {
168 #if (LG_SIZEOF_INT == 8)
169  return (unsigned int)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
170 #elif (LG_SIZEOF_INT == 4)
171  return (unsigned int)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
172 #endif
173 }
174 
175 ATOMIC_INLINE unsigned int atomic_cas_u(unsigned int *v, unsigned int old, unsigned int _new)
176 {
177 #if (LG_SIZEOF_INT == 8)
178  return (unsigned int)atomic_cas_uint64((uint64_t *)v, (uint64_t)old, (uint64_t)_new);
179 #elif (LG_SIZEOF_INT == 4)
180  return (unsigned int)atomic_cas_uint32((uint32_t *)v, (uint32_t)old, (uint32_t)_new);
181 #endif
182 }
183 
184 /******************************************************************************/
185 /* Char operations. */
187 {
188  return (char)atomic_fetch_and_or_uint8((uint8_t *)p, (uint8_t)b);
189 }
190 
192 {
193  return (char)atomic_fetch_and_and_uint8((uint8_t *)p, (uint8_t)b);
194 }
195 
196 /******************************************************************************/
197 /* Pointer operations. */
198 
199 ATOMIC_INLINE void *atomic_cas_ptr(void **v, void *old, void *_new)
200 {
201 #if (LG_SIZEOF_PTR == 8)
202  return (void *)atomic_cas_uint64((uint64_t *)v, *(uint64_t *)&old, *(uint64_t *)&_new);
203 #elif (LG_SIZEOF_PTR == 4)
204  return (void *)atomic_cas_uint32((uint32_t *)v, *(uint32_t *)&old, *(uint32_t *)&_new);
205 #endif
206 }
207 
208 ATOMIC_INLINE void *atomic_load_ptr(void *const *v)
209 {
210 #if (LG_SIZEOF_PTR == 8)
211  return (void *)atomic_load_uint64((const uint64_t *)v);
212 #elif (LG_SIZEOF_PTR == 4)
213  return (void *)atomic_load_uint32((const uint32_t *)v);
214 #endif
215 }
216 
217 ATOMIC_INLINE void atomic_store_ptr(void **p, void *v)
218 {
219 #if (LG_SIZEOF_PTR == 8)
221 #elif (LG_SIZEOF_PTR == 4)
223 #endif
224 }
225 
226 /******************************************************************************/
227 /* float operations. */
228 ATOMIC_STATIC_ASSERT(sizeof(float) == sizeof(uint32_t), "sizeof(float) != sizeof(uint32_t)");
229 
230 ATOMIC_INLINE float atomic_cas_float(float *v, float old, float _new)
231 {
232  uint32_t ret = atomic_cas_uint32((uint32_t *)v, *(uint32_t *)&old, *(uint32_t *)&_new);
233  return *(float *)&ret;
234 }
235 
236 ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x)
237 {
238  float oldval, newval;
239  uint32_t prevval;
240 
241  do { /* Note that since collisions are unlikely, loop will nearly always run once. */
242  oldval = *p;
243  newval = oldval + x;
244  prevval = atomic_cas_uint32((uint32_t *)p, *(uint32_t *)(&oldval), *(uint32_t *)(&newval));
245  } while (_ATOMIC_UNLIKELY(prevval != *(uint32_t *)(&oldval)));
246 
247  return newval;
248 }
249 
250 #endif /* __ATOMIC_OPS_EXT_H__ */
ATOMIC_INLINE void atomic_store_uint64(uint64_t *p, uint64_t v)
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint64_t atomic_load_uint64(const uint64_t *v)
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_load_uint32(const uint32_t *v)
ATOMIC_INLINE void atomic_store_uint32(uint32_t *p, uint32_t v)
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
ATOMIC_INLINE unsigned int atomic_fetch_and_sub_u(unsigned int *p, unsigned int x)
ATOMIC_INLINE size_t atomic_load_z(const size_t *v)
ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x)
ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x)
ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x)
ATOMIC_INLINE char atomic_fetch_and_or_char(char *p, char b)
ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x)
ATOMIC_INLINE void * atomic_load_ptr(void *const *v)
ATOMIC_INLINE void * atomic_cas_ptr(void **v, void *old, void *_new)
ATOMIC_INLINE char atomic_fetch_and_and_char(char *p, char b)
ATOMIC_INLINE unsigned int atomic_cas_u(unsigned int *v, unsigned int old, unsigned int _new)
ATOMIC_STATIC_ASSERT(sizeof(size_t)==LG_SIZEOF_PTR, "sizeof(size_t) != LG_SIZEOF_PTR")
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
ATOMIC_INLINE void atomic_store_z(size_t *p, size_t v)
ATOMIC_INLINE float atomic_cas_float(float *v, float old, float _new)
ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x)
ATOMIC_INLINE size_t atomic_fetch_and_update_max_z(size_t *p, size_t x)
ATOMIC_INLINE void atomic_store_ptr(void **p, void *v)
ATOMIC_INLINE unsigned int atomic_sub_and_fetch_u(unsigned int *p, unsigned int x)
ATOMIC_INLINE unsigned int atomic_fetch_and_add_u(unsigned int *p, unsigned int x)
#define ATOMIC_INLINE
#define _ATOMIC_UNLIKELY(x)
ATTR_WARN_UNUSED_RESULT const BMVert * v
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
return ret
unsigned int uint32_t
Definition: stdint.h:80
__int64 int64_t
Definition: stdint.h:89
signed int int32_t
Definition: stdint.h:77
unsigned char uint8_t
Definition: stdint.h:78
unsigned __int64 uint64_t
Definition: stdint.h:90