Blender  V3.3
atomic_ops_unix.h
Go to the documentation of this file.
1 /*
2  * Original code from jemalloc with this license:
3  *
4  * Copyright (C) 2002-2013 Jason Evans <jasone@canonware.com>.
5  * All rights reserved.
6  * Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
7  * Copyright (C) 2009-2013 Facebook, Inc. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  * 1. Redistributions of source code must retain the above copyright notice(s),
12  * this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice(s),
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20  * EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  * This program is free software; you can redistribute it and/or
28  * modify it under the terms of the GNU General Public License
29  * as published by the Free Software Foundation; either version 2
30  * of the License, or (at your option) any later version.
31  *
32  * This program is distributed in the hope that it will be useful,
33  * but WITHOUT ANY WARRANTY; without even the implied warranty of
34  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35  * GNU General Public License for more details.
36  *
37  * You should have received a copy of the GNU General Public License
38  * along with this program; if not, write to the Free Software Foundation,
39  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
40  *
41  * The Original Code is Copyright (C) 2016 Blender Foundation.
42  * All rights reserved.
43  *
44  * The Original Code is: adapted from jemalloc.
45  */
46 
51 #ifndef __ATOMIC_OPS_UNIX_H__
52 #define __ATOMIC_OPS_UNIX_H__
53 
54 #include "atomic_ops_utils.h"
55 
56 #if defined(__arm__) || defined(__riscv)
57 /* Attempt to fix compilation error on Debian armel and RISC-V kernels.
58  * Both architectures do have both 32 and 64bit atomics, however
59  * its gcc doesn't have __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n defined.
60  */
61 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_1
62 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_2
63 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_4
64 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_8
65 #endif
66 
67 /* Define the `ATOMIC_FORCE_USE_FALLBACK` to force lock-based fallback implementation to be used
68  * (even on platforms where there is native implementation available via compiler.
69  * Useful for development purposes. */
70 #undef ATOMIC_FORCE_USE_FALLBACK
71 
72 /* -------------------------------------------------------------------- */
79 typedef struct AtomicSpinLock {
80  volatile int lock;
81 
82  /* Pad the structure size to a cache-line, to avoid unwanted sharing with other data. */
83  int pad[32 - sizeof(int)];
84 } __attribute__((aligned(32))) AtomicSpinLock;
85 
87 {
88  while (__sync_lock_test_and_set(&lock->lock, 1)) {
89  while (lock->lock) {
90  }
91  }
92 }
93 
95 {
96  __sync_lock_release(&lock->lock);
97 }
98 
101 /* -------------------------------------------------------------------- */
105 /* TODO(sergey): On x64 platform both read and write of a variable aligned to its type size is
106  * atomic, so in theory it is possible to avoid memory barrier and gain performance. The downside
107  * of that would be that it will impose requirement to value which is being operated on. */
108 #define __atomic_impl_load_generic(v) (__sync_synchronize(), *(v))
109 #define __atomic_impl_store_generic(p, v) \
110  do { \
111  *(p) = (v); \
112  __sync_synchronize(); \
113  } while (0)
114 
117 /* -------------------------------------------------------------------- */
121 /* Global lock, shared by all atomic operations implementations.
122  *
123  * Could be split into per-size locks, although added complexity and being more error-proone does
124  * not seem to worth it for a fall-back implementation. */
126 
127 #define ATOMIC_LOCKING_OP_AND_FETCH_DEFINE(_type, _op_name, _op) \
128  ATOMIC_INLINE _type##_t atomic_##_op_name##_and_fetch_##_type(_type##_t *p, _type##_t x) \
129  { \
130  atomic_spin_lock(&_atomic_global_lock); \
131  const _type##_t original_value = *(p); \
132  const _type##_t new_value = original_value _op(x); \
133  *(p) = new_value; \
134  atomic_spin_unlock(&_atomic_global_lock); \
135  return new_value; \
136  }
137 
138 #define ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, _op_name, _op) \
139  ATOMIC_INLINE _type##_t atomic_fetch_and_##_op_name##_##_type(_type##_t *p, _type##_t x) \
140  { \
141  atomic_spin_lock(&_atomic_global_lock); \
142  const _type##_t original_value = *(p); \
143  *(p) = original_value _op(x); \
144  atomic_spin_unlock(&_atomic_global_lock); \
145  return original_value; \
146  }
147 
148 #define ATOMIC_LOCKING_ADD_AND_FETCH_DEFINE(_type) \
149  ATOMIC_LOCKING_OP_AND_FETCH_DEFINE(_type, add, +)
150 
151 #define ATOMIC_LOCKING_SUB_AND_FETCH_DEFINE(_type) \
152  ATOMIC_LOCKING_OP_AND_FETCH_DEFINE(_type, sub, -)
153 
154 #define ATOMIC_LOCKING_FETCH_AND_ADD_DEFINE(_type) \
155  ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, add, +)
156 
157 #define ATOMIC_LOCKING_FETCH_AND_SUB_DEFINE(_type) \
158  ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, sub, -)
159 
160 #define ATOMIC_LOCKING_FETCH_AND_OR_DEFINE(_type) ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, or, |)
161 
162 #define ATOMIC_LOCKING_FETCH_AND_AND_DEFINE(_type) \
163  ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, and, &)
164 
165 #define ATOMIC_LOCKING_CAS_DEFINE(_type) \
166  ATOMIC_INLINE _type##_t atomic_cas_##_type(_type##_t *v, _type##_t old, _type##_t _new) \
167  { \
168  atomic_spin_lock(&_atomic_global_lock); \
169  const _type##_t original_value = *v; \
170  if (*v == old) { \
171  *v = _new; \
172  } \
173  atomic_spin_unlock(&_atomic_global_lock); \
174  return original_value; \
175  }
176 
177 #define ATOMIC_LOCKING_LOAD_DEFINE(_type) \
178  ATOMIC_INLINE _type##_t atomic_load_##_type(const _type##_t *v) \
179  { \
180  atomic_spin_lock(&_atomic_global_lock); \
181  const _type##_t value = *v; \
182  atomic_spin_unlock(&_atomic_global_lock); \
183  return value; \
184  }
185 
186 #define ATOMIC_LOCKING_STORE_DEFINE(_type) \
187  ATOMIC_INLINE void atomic_store_##_type(_type##_t *p, const _type##_t v) \
188  { \
189  atomic_spin_lock(&_atomic_global_lock); \
190  *p = v; \
191  atomic_spin_unlock(&_atomic_global_lock); \
192  }
193 
196 /* -------------------------------------------------------------------- */
200 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
201  (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
202 /* Unsigned */
204 {
205  return __sync_add_and_fetch(p, x);
206 }
207 
209 {
210  return __sync_sub_and_fetch(p, x);
211 }
212 
214 {
215  return __sync_fetch_and_add(p, x);
216 }
217 
219 {
220  return __sync_fetch_and_sub(p, x);
221 }
222 
224 {
225  return __sync_val_compare_and_swap(v, old, _new);
226 }
227 
229 {
230  return __atomic_load_n(v, __ATOMIC_SEQ_CST);
231 }
232 
234 {
235  __atomic_store(p, &v, __ATOMIC_SEQ_CST);
236 }
237 
238 /* Signed */
240 {
241  return __sync_add_and_fetch(p, x);
242 }
243 
245 {
246  return __sync_sub_and_fetch(p, x);
247 }
248 
250 {
251  return __sync_fetch_and_add(p, x);
252 }
253 
255 {
256  return __sync_fetch_and_sub(p, x);
257 }
258 
260 {
261  return __sync_val_compare_and_swap(v, old, _new);
262 }
263 
265 {
266  return __atomic_load_n(v, __ATOMIC_SEQ_CST);
267 }
268 
270 {
271  __atomic_store(p, &v, __ATOMIC_SEQ_CST);
272 }
273 
274 #elif !defined(ATOMIC_FORCE_USE_FALLBACK) && (defined(__amd64__) || defined(__x86_64__))
275 /* Unsigned */
277 {
278  asm volatile("lock; xaddq %0, %1;"
279  : "+r"(x), "=m"(*p) /* Outputs. */
280  : "m"(*p) /* Inputs. */
281  );
282  return x;
283 }
284 
286 {
287  x = (uint64_t)(-(int64_t)x);
288  asm volatile("lock; xaddq %0, %1;"
289  : "+r"(x), "=m"(*p) /* Outputs. */
290  : "m"(*p) /* Inputs. */
291  );
292  return x;
293 }
294 
296 {
297  return atomic_fetch_and_add_uint64(p, x) + x;
298 }
299 
301 {
302  return atomic_fetch_and_sub_uint64(p, x) - x;
303 }
304 
306 {
307  uint64_t ret;
308  asm volatile("lock; cmpxchgq %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
309  return ret;
310 }
311 
313 {
315 }
316 
318 {
320 }
321 
322 /* Signed */
324 {
325  asm volatile("lock; xaddq %0, %1;"
326  : "+r"(x), "=m"(*p) /* Outputs. */
327  : "m"(*p) /* Inputs. */
328  );
329  return x;
330 }
331 
333 {
334  x = -x;
335  asm volatile("lock; xaddq %0, %1;"
336  : "+r"(x), "=m"(*p) /* Outputs. */
337  : "m"(*p) /* Inputs. */
338  );
339  return x;
340 }
341 
343 {
344  return atomic_fetch_and_add_int64(p, x) + x;
345 }
346 
348 {
349  return atomic_fetch_and_sub_int64(p, x) - x;
350 }
351 
353 {
354  int64_t ret;
355  asm volatile("lock; cmpxchgq %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
356  return ret;
357 }
358 
360 {
362 }
363 
365 {
367 }
368 
369 #else
370 
371 /* Unsigned */
372 
375 
378 
380 
383 
384 /* Signed */
387 
390 
392 
395 
396 #endif
397 
400 /* -------------------------------------------------------------------- */
404 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
405  (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
406 /* Unsigned */
408 {
409  return __sync_add_and_fetch(p, x);
410 }
411 
413 {
414  return __sync_sub_and_fetch(p, x);
415 }
416 
418 {
419  return __sync_val_compare_and_swap(v, old, _new);
420 }
421 
423 {
424  return __atomic_load_n(v, __ATOMIC_SEQ_CST);
425 }
426 
428 {
429  __atomic_store(p, &v, __ATOMIC_SEQ_CST);
430 }
431 
432 /* Signed */
434 {
435  return __sync_add_and_fetch(p, x);
436 }
437 
439 {
440  return __sync_sub_and_fetch(p, x);
441 }
442 
444 {
445  return __sync_val_compare_and_swap(v, old, _new);
446 }
447 
449 {
450  return __atomic_load_n(v, __ATOMIC_SEQ_CST);
451 }
452 
454 {
455  __atomic_store(p, &v, __ATOMIC_SEQ_CST);
456 }
457 
458 #elif !defined(ATOMIC_FORCE_USE_FALLBACK) && \
459  (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
460 /* Unsigned */
462 {
463  uint32_t ret = x;
464  asm volatile("lock; xaddl %0, %1;"
465  : "+r"(ret), "=m"(*p) /* Outputs. */
466  : "m"(*p) /* Inputs. */
467  );
468  return ret + x;
469 }
470 
472 {
473  uint32_t ret = (uint32_t)(-(int32_t)x);
474  asm volatile("lock; xaddl %0, %1;"
475  : "+r"(ret), "=m"(*p) /* Outputs. */
476  : "m"(*p) /* Inputs. */
477  );
478  return ret - x;
479 }
480 
482 {
483  uint32_t ret;
484  asm volatile("lock; cmpxchgl %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
485  return ret;
486 }
487 
489 {
490  return __atomic_load_n(v, __ATOMIC_SEQ_CST);
491 }
492 
494 {
495  __atomic_store(p, &v, __ATOMIC_SEQ_CST);
496 }
497 
498 /* Signed */
500 {
501  int32_t ret = x;
502  asm volatile("lock; xaddl %0, %1;"
503  : "+r"(ret), "=m"(*p) /* Outputs. */
504  : "m"(*p) /* Inputs. */
505  );
506  return ret + x;
507 }
508 
510 {
511  int32_t ret = -x;
512  asm volatile("lock; xaddl %0, %1;"
513  : "+r"(ret), "=m"(*p) /* Outputs. */
514  : "m"(*p) /* Inputs. */
515  );
516  return ret - x;
517 }
518 
520 {
521  int32_t ret;
522  asm volatile("lock; cmpxchgl %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
523  return ret;
524 }
525 
527 {
528  return __atomic_load_n(v, __ATOMIC_SEQ_CST);
529 }
530 
532 {
533  __atomic_store(p, &v, __ATOMIC_SEQ_CST);
534 }
535 
536 #else
537 
538 /* Unsigned */
539 
542 
544 
547 
548 /* Signed */
549 
552 
554 
557 
558 #endif
559 
560 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
561  (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
562 /* Unsigned */
564 {
565  return __sync_fetch_and_add(p, x);
566 }
567 
569 {
570  return __sync_fetch_and_or(p, x);
571 }
572 
574 {
575  return __sync_fetch_and_and(p, x);
576 }
577 
578 /* Signed */
580 {
581  return __sync_fetch_and_add(p, x);
582 }
583 
585 {
586  return __sync_fetch_and_or(p, x);
587 }
588 
590 {
591  return __sync_fetch_and_and(p, x);
592 }
593 
594 #else
595 
596 /* Unsigned */
600 
601 /* Signed */
605 
606 #endif
607 
610 /* -------------------------------------------------------------------- */
614 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
615  (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_2))
616 
617 /* Signed */
619 {
620  return __sync_fetch_and_and(p, b);
621 }
623 {
624  return __sync_fetch_and_or(p, b);
625 }
626 
627 #else
628 
631 
632 #endif
633 
636 /* -------------------------------------------------------------------- */
640 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
641  (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_1))
642 
643 /* Unsigned */
645 {
646  return __sync_fetch_and_and(p, b);
647 }
649 {
650  return __sync_fetch_and_or(p, b);
651 }
652 
653 /* Signed */
655 {
656  return __sync_fetch_and_and(p, b);
657 }
659 {
660  return __sync_fetch_and_or(p, b);
661 }
662 
663 #else
664 
665 /* Unsigned */
668 
669 /* Signed */
672 
673 #endif
674 
677 #undef __atomic_impl_load_generic
678 #undef __atomic_impl_store_generic
679 
680 #undef ATOMIC_LOCKING_OP_AND_FETCH_DEFINE
681 #undef ATOMIC_LOCKING_FETCH_AND_OP_DEFINE
682 #undef ATOMIC_LOCKING_ADD_AND_FETCH_DEFINE
683 #undef ATOMIC_LOCKING_SUB_AND_FETCH_DEFINE
684 #undef ATOMIC_LOCKING_FETCH_AND_ADD_DEFINE
685 #undef ATOMIC_LOCKING_FETCH_AND_SUB_DEFINE
686 #undef ATOMIC_LOCKING_FETCH_AND_OR_DEFINE
687 #undef ATOMIC_LOCKING_FETCH_AND_AND_DEFINE
688 #undef ATOMIC_LOCKING_CAS_DEFINE
689 #undef ATOMIC_LOCKING_LOAD_DEFINE
690 #undef ATOMIC_LOCKING_STORE_DEFINE
691 
692 #endif /* __ATOMIC_OPS_UNIX_H__ */
unsigned int uint32
Definition: Common.h:29
unsigned long long uint64
Definition: Common.h:30
unsigned char uint8
Definition: Common.h:26
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int16_t atomic_fetch_and_or_int16(int16_t *p, int16_t b)
ATOMIC_INLINE void atomic_store_uint64(uint64_t *p, uint64_t v)
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint64_t atomic_load_uint64(const uint64_t *v)
ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE int32_t atomic_load_int32(const int32_t *v)
ATOMIC_INLINE int64_t atomic_load_int64(const int64_t *v)
ATOMIC_INLINE int32_t atomic_fetch_and_or_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE void atomic_store_int64(int64_t *p, int64_t v)
ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE void atomic_store_int32(int32_t *p, int32_t v)
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int16_t atomic_fetch_and_and_int16(int16_t *p, int16_t b)
ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_load_uint32(const uint32_t *v)
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE void atomic_store_uint32(uint32_t *p, uint32_t v)
ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
struct AtomicSpinLock __attribute__((aligned(32))) AtomicSpinLock
ATOMIC_INLINE void atomic_spin_lock(volatile AtomicSpinLock *lock)
ATOMIC_INLINE void atomic_spin_unlock(volatile AtomicSpinLock *lock)
#define ATOMIC_LOCKING_FETCH_AND_AND_DEFINE(_type)
#define ATOMIC_LOCKING_FETCH_AND_SUB_DEFINE(_type)
#define __atomic_impl_load_generic(v)
#define ATOMIC_LOCKING_FETCH_AND_OR_DEFINE(_type)
#define ATOMIC_LOCKING_SUB_AND_FETCH_DEFINE(_type)
static _ATOMIC_MAYBE_UNUSED AtomicSpinLock _atomic_global_lock
#define ATOMIC_LOCKING_LOAD_DEFINE(_type)
#define __atomic_impl_store_generic(p, v)
#define ATOMIC_LOCKING_ADD_AND_FETCH_DEFINE(_type)
#define ATOMIC_LOCKING_FETCH_AND_ADD_DEFINE(_type)
#define ATOMIC_LOCKING_CAS_DEFINE(_type)
#define ATOMIC_LOCKING_STORE_DEFINE(_type)
volatile int lock
#define ATOMIC_INLINE
#define _ATOMIC_MAYBE_UNUSED
ATTR_WARN_UNUSED_RESULT const BMVert * v
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
return ret
signed short int16_t
Definition: stdint.h:76
unsigned int uint32_t
Definition: stdint.h:80
__int64 int64_t
Definition: stdint.h:89
signed int int32_t
Definition: stdint.h:77
unsigned char uint8_t
Definition: stdint.h:78
unsigned __int64 uint64_t
Definition: stdint.h:90
signed char int8_t
Definition: stdint.h:75
int pad[32 - sizeof(int)]
volatile int lock