Blender  V3.3
ssei.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: Apache-2.0
2  * Copyright 2011-2013 Intel Corporation
3  * Modifications Copyright 2014-2022 Blender Foundation. */
4 
5 #ifndef __UTIL_SSEI_H__
6 #define __UTIL_SSEI_H__
7 
9 
10 #ifdef __KERNEL_SSE2__
11 
12 struct sseb;
13 struct ssef;
14 
16 struct ssei {
17  typedef sseb Mask; // mask type
18  typedef ssei Int; // int type
19  typedef ssef Float; // float type
20 
21  enum { size = 4 }; // number of SIMD elements
22  union {
23  __m128i m128;
24  int32_t i[4];
25  }; // data
26 
30 
31  __forceinline ssei()
32  {
33  }
34  __forceinline ssei(const ssei &a)
35  {
36  m128 = a.m128;
37  }
38  __forceinline ssei &operator=(const ssei &a)
39  {
40  m128 = a.m128;
41  return *this;
42  }
43 
44  __forceinline ssei(const __m128i a) : m128(a)
45  {
46  }
47  __forceinline operator const __m128i &(void) const
48  {
49  return m128;
50  }
51  __forceinline operator __m128i &(void)
52  {
53  return m128;
54  }
55 
56  __forceinline ssei(const int a) : m128(_mm_set1_epi32(a))
57  {
58  }
59  __forceinline ssei(int a, int b, int c, int d) : m128(_mm_setr_epi32(a, b, c, d))
60  {
61  }
62 
63  __forceinline explicit ssei(const __m128 a) : m128(_mm_cvtps_epi32(a))
64  {
65  }
66 
70 
71  __forceinline const int32_t &operator[](const size_t index) const
72  {
73  assert(index < 4);
74  return i[index];
75  }
76  __forceinline int32_t &operator[](const size_t index)
77  {
78  assert(index < 4);
79  return i[index];
80  }
81 };
82 
86 
87 __forceinline const ssei cast(const __m128 &a)
88 {
89  return _mm_castps_si128(a);
90 }
91 __forceinline const ssei operator+(const ssei &a)
92 {
93  return a;
94 }
95 __forceinline const ssei operator-(const ssei &a)
96 {
97  return _mm_sub_epi32(_mm_setzero_si128(), a.m128);
98 }
99 # if defined(__KERNEL_SSSE3__)
100 __forceinline const ssei abs(const ssei &a)
101 {
102  return _mm_abs_epi32(a.m128);
103 }
104 # endif
105 
109 
110 __forceinline const ssei operator+(const ssei &a, const ssei &b)
111 {
112  return _mm_add_epi32(a.m128, b.m128);
113 }
114 __forceinline const ssei operator+(const ssei &a, const int32_t &b)
115 {
116  return a + ssei(b);
117 }
118 __forceinline const ssei operator+(const int32_t &a, const ssei &b)
119 {
120  return ssei(a) + b;
121 }
122 
123 __forceinline const ssei operator-(const ssei &a, const ssei &b)
124 {
125  return _mm_sub_epi32(a.m128, b.m128);
126 }
127 __forceinline const ssei operator-(const ssei &a, const int32_t &b)
128 {
129  return a - ssei(b);
130 }
131 __forceinline const ssei operator-(const int32_t &a, const ssei &b)
132 {
133  return ssei(a) - b;
134 }
135 
136 # if defined(__KERNEL_SSE41__)
137 __forceinline const ssei operator*(const ssei &a, const ssei &b)
138 {
139  return _mm_mullo_epi32(a.m128, b.m128);
140 }
141 __forceinline const ssei operator*(const ssei &a, const int32_t &b)
142 {
143  return a * ssei(b);
144 }
145 __forceinline const ssei operator*(const int32_t &a, const ssei &b)
146 {
147  return ssei(a) * b;
148 }
149 # endif
150 
151 __forceinline const ssei operator&(const ssei &a, const ssei &b)
152 {
153  return _mm_and_si128(a.m128, b.m128);
154 }
155 __forceinline const ssei operator&(const ssei &a, const int32_t &b)
156 {
157  return a & ssei(b);
158 }
159 __forceinline const ssei operator&(const int32_t &a, const ssei &b)
160 {
161  return ssei(a) & b;
162 }
163 
164 __forceinline const ssei operator|(const ssei &a, const ssei &b)
165 {
166  return _mm_or_si128(a.m128, b.m128);
167 }
168 __forceinline const ssei operator|(const ssei &a, const int32_t &b)
169 {
170  return a | ssei(b);
171 }
172 __forceinline const ssei operator|(const int32_t &a, const ssei &b)
173 {
174  return ssei(a) | b;
175 }
176 
177 __forceinline const ssei operator^(const ssei &a, const ssei &b)
178 {
179  return _mm_xor_si128(a.m128, b.m128);
180 }
181 __forceinline const ssei operator^(const ssei &a, const int32_t &b)
182 {
183  return a ^ ssei(b);
184 }
185 __forceinline const ssei operator^(const int32_t &a, const ssei &b)
186 {
187  return ssei(a) ^ b;
188 }
189 
190 __forceinline const ssei operator<<(const ssei &a, const int32_t &n)
191 {
192  return _mm_slli_epi32(a.m128, n);
193 }
194 __forceinline const ssei operator>>(const ssei &a, const int32_t &n)
195 {
196  return _mm_srai_epi32(a.m128, n);
197 }
198 
199 __forceinline const ssei andnot(const ssei &a, const ssei &b)
200 {
201  return _mm_andnot_si128(a.m128, b.m128);
202 }
203 __forceinline const ssei andnot(const sseb &a, const ssei &b)
204 {
205  return _mm_andnot_si128(cast(a.m128), b.m128);
206 }
207 __forceinline const ssei andnot(const ssei &a, const sseb &b)
208 {
209  return _mm_andnot_si128(a.m128, cast(b.m128));
210 }
211 
212 __forceinline const ssei sra(const ssei &a, const int32_t &b)
213 {
214  return _mm_srai_epi32(a.m128, b);
215 }
216 __forceinline const ssei srl(const ssei &a, const int32_t &b)
217 {
218  return _mm_srli_epi32(a.m128, b);
219 }
220 
221 # if defined(__KERNEL_SSE41__)
222 __forceinline const ssei min(const ssei &a, const ssei &b)
223 {
224  return _mm_min_epi32(a.m128, b.m128);
225 }
226 __forceinline const ssei min(const ssei &a, const int32_t &b)
227 {
228  return min(a, ssei(b));
229 }
230 __forceinline const ssei min(const int32_t &a, const ssei &b)
231 {
232  return min(ssei(a), b);
233 }
234 
235 __forceinline const ssei max(const ssei &a, const ssei &b)
236 {
237  return _mm_max_epi32(a.m128, b.m128);
238 }
239 __forceinline const ssei max(const ssei &a, const int32_t &b)
240 {
241  return max(a, ssei(b));
242 }
243 __forceinline const ssei max(const int32_t &a, const ssei &b)
244 {
245  return max(ssei(a), b);
246 }
247 # endif
248 
252 
253 __forceinline ssei &operator+=(ssei &a, const ssei &b)
254 {
255  return a = a + b;
256 }
257 __forceinline ssei &operator+=(ssei &a, const int32_t &b)
258 {
259  return a = a + b;
260 }
261 
262 __forceinline ssei &operator-=(ssei &a, const ssei &b)
263 {
264  return a = a - b;
265 }
266 __forceinline ssei &operator-=(ssei &a, const int32_t &b)
267 {
268  return a = a - b;
269 }
270 
271 # if defined(__KERNEL_SSE41__)
272 __forceinline ssei &operator*=(ssei &a, const ssei &b)
273 {
274  return a = a * b;
275 }
276 __forceinline ssei &operator*=(ssei &a, const int32_t &b)
277 {
278  return a = a * b;
279 }
280 # endif
281 
282 __forceinline ssei &operator&=(ssei &a, const ssei &b)
283 {
284  return a = a & b;
285 }
286 __forceinline ssei &operator&=(ssei &a, const int32_t &b)
287 {
288  return a = a & b;
289 }
290 
291 __forceinline ssei &operator|=(ssei &a, const ssei &b)
292 {
293  return a = a | b;
294 }
295 __forceinline ssei &operator|=(ssei &a, const int32_t &b)
296 {
297  return a = a | b;
298 }
299 
300 __forceinline ssei &operator^=(ssei &a, const ssei &b)
301 {
302  return a = a ^ b;
303 }
304 __forceinline ssei &operator^=(ssei &a, const int32_t &b)
305 {
306  return a = a ^ b;
307 }
308 
309 __forceinline ssei &operator<<=(ssei &a, const int32_t &b)
310 {
311  return a = a << b;
312 }
313 __forceinline ssei &operator>>=(ssei &a, const int32_t &b)
314 {
315  return a = a >> b;
316 }
317 
321 
322 __forceinline const sseb operator==(const ssei &a, const ssei &b)
323 {
324  return _mm_castsi128_ps(_mm_cmpeq_epi32(a.m128, b.m128));
325 }
326 __forceinline const sseb operator==(const ssei &a, const int32_t &b)
327 {
328  return a == ssei(b);
329 }
330 __forceinline const sseb operator==(const int32_t &a, const ssei &b)
331 {
332  return ssei(a) == b;
333 }
334 
335 __forceinline const sseb operator!=(const ssei &a, const ssei &b)
336 {
337  return !(a == b);
338 }
339 __forceinline const sseb operator!=(const ssei &a, const int32_t &b)
340 {
341  return a != ssei(b);
342 }
343 __forceinline const sseb operator!=(const int32_t &a, const ssei &b)
344 {
345  return ssei(a) != b;
346 }
347 
348 __forceinline const sseb operator<(const ssei &a, const ssei &b)
349 {
350  return _mm_castsi128_ps(_mm_cmplt_epi32(a.m128, b.m128));
351 }
352 __forceinline const sseb operator<(const ssei &a, const int32_t &b)
353 {
354  return a < ssei(b);
355 }
356 __forceinline const sseb operator<(const int32_t &a, const ssei &b)
357 {
358  return ssei(a) < b;
359 }
360 
361 __forceinline const sseb operator>=(const ssei &a, const ssei &b)
362 {
363  return !(a < b);
364 }
365 __forceinline const sseb operator>=(const ssei &a, const int32_t &b)
366 {
367  return a >= ssei(b);
368 }
369 __forceinline const sseb operator>=(const int32_t &a, const ssei &b)
370 {
371  return ssei(a) >= b;
372 }
373 
374 __forceinline const sseb operator>(const ssei &a, const ssei &b)
375 {
376  return _mm_castsi128_ps(_mm_cmpgt_epi32(a.m128, b.m128));
377 }
378 __forceinline const sseb operator>(const ssei &a, const int32_t &b)
379 {
380  return a > ssei(b);
381 }
382 __forceinline const sseb operator>(const int32_t &a, const ssei &b)
383 {
384  return ssei(a) > b;
385 }
386 
387 __forceinline const sseb operator<=(const ssei &a, const ssei &b)
388 {
389  return !(a > b);
390 }
391 __forceinline const sseb operator<=(const ssei &a, const int32_t &b)
392 {
393  return a <= ssei(b);
394 }
395 __forceinline const sseb operator<=(const int32_t &a, const ssei &b)
396 {
397  return ssei(a) <= b;
398 }
399 
400 __forceinline const ssei select(const sseb &m, const ssei &t, const ssei &f)
401 {
402 # ifdef __KERNEL_SSE41__
403  return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
404 # else
405  return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
406 # endif
407 }
408 
409 __forceinline const ssei select(const int mask, const ssei &t, const ssei &f)
410 {
411 # if defined(__KERNEL_SSE41__) && \
412  ((!defined(__clang__) && !defined(_MSC_VER)) || defined(__INTEL_COMPILER))
413  return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask));
414 # else
415  return select(sseb(mask), t, f);
416 # endif
417 }
418 
420 // Movement/Shifting/Shuffling Functions
422 
423 __forceinline ssei unpacklo(const ssei &a, const ssei &b)
424 {
425  return _mm_unpacklo_epi32(a, b);
426 }
427 __forceinline ssei unpackhi(const ssei &a, const ssei &b)
428 {
429  return _mm_unpackhi_epi32(a, b);
430 }
431 
432 template<size_t i0, size_t i1, size_t i2, size_t i3>
433 __forceinline const ssei shuffle(const ssei &a)
434 {
435 # ifdef __KERNEL_NEON__
436  int32x4_t result = shuffle_neon<int32x4_t, i0, i1, i2, i3>(vreinterpretq_s32_m128i(a));
437  return vreinterpretq_m128i_s32(result);
438 # else
439  return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0));
440 # endif
441 }
442 
443 template<size_t i0, size_t i1, size_t i2, size_t i3>
444 __forceinline const ssei shuffle(const ssei &a, const ssei &b)
445 {
446 # ifdef __KERNEL_NEON__
447  int32x4_t result = shuffle_neon<int32x4_t, i0, i1, i2, i3>(vreinterpretq_s32_m128i(a),
448  vreinterpretq_s32_m128i(b));
449  return vreinterpretq_m128i_s32(result);
450 # else
451  return _mm_castps_si128(
452  _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
453 # endif
454 }
455 
456 template<size_t i0> __forceinline const ssei shuffle(const ssei &b)
457 {
458  return shuffle<i0, i0, i0, i0>(b);
459 }
460 
461 # if defined(__KERNEL_SSE41__)
462 template<size_t src> __forceinline int extract(const ssei &b)
463 {
464  return _mm_extract_epi32(b, src);
465 }
466 template<size_t dst> __forceinline const ssei insert(const ssei &a, const int32_t b)
467 {
468  return _mm_insert_epi32(a, b, dst);
469 }
470 # else
471 template<size_t src> __forceinline int extract(const ssei &b)
472 {
473  return b[src];
474 }
475 template<size_t dst> __forceinline const ssei insert(const ssei &a, const int32_t b)
476 {
477  ssei c = a;
478  c[dst] = b;
479  return c;
480 }
481 # endif
482 
486 
487 # if defined(__KERNEL_SSE41__)
488 __forceinline const ssei vreduce_min(const ssei &v)
489 {
490  ssei h = min(shuffle<1, 0, 3, 2>(v), v);
491  return min(shuffle<2, 3, 0, 1>(h), h);
492 }
493 __forceinline const ssei vreduce_max(const ssei &v)
494 {
495  ssei h = max(shuffle<1, 0, 3, 2>(v), v);
496  return max(shuffle<2, 3, 0, 1>(h), h);
497 }
498 __forceinline const ssei vreduce_add(const ssei &v)
499 {
500  ssei h = shuffle<1, 0, 3, 2>(v) + v;
501  return shuffle<2, 3, 0, 1>(h) + h;
502 }
503 
504 __forceinline int reduce_min(const ssei &v)
505 {
506 # ifdef __KERNEL_NEON__
507  return vminvq_s32(vreinterpretq_s32_m128i(v));
508 # else
509  return extract<0>(vreduce_min(v));
510 # endif
511 }
512 __forceinline int reduce_max(const ssei &v)
513 {
514 # ifdef __KERNEL_NEON__
515  return vmaxvq_s32(vreinterpretq_s32_m128i(v));
516 # else
517  return extract<0>(vreduce_max(v));
518 # endif
519 }
520 __forceinline int reduce_add(const ssei &v)
521 {
522 # ifdef __KERNEL_NEON__
523  return vaddvq_s32(vreinterpretq_s32_m128i(v));
524 # else
525  return extract<0>(vreduce_add(v));
526 # endif
527 }
528 
529 __forceinline uint32_t select_min(const ssei &v)
530 {
531  return __bsf(movemask(v == vreduce_min(v)));
532 }
533 __forceinline uint32_t select_max(const ssei &v)
534 {
535  return __bsf(movemask(v == vreduce_max(v)));
536 }
537 
538 __forceinline uint32_t select_min(const sseb &valid, const ssei &v)
539 {
540  const ssei a = select(valid, v, ssei((int)pos_inf));
541  return __bsf(movemask(valid & (a == vreduce_min(a))));
542 }
543 __forceinline uint32_t select_max(const sseb &valid, const ssei &v)
544 {
545  const ssei a = select(valid, v, ssei((int)neg_inf));
546  return __bsf(movemask(valid & (a == vreduce_max(a))));
547 }
548 
549 # else
550 
551 __forceinline int ssei_min(int a, int b)
552 {
553  return (a < b) ? a : b;
554 }
555 __forceinline int ssei_max(int a, int b)
556 {
557  return (a > b) ? a : b;
558 }
559 __forceinline int reduce_min(const ssei &v)
560 {
561  return ssei_min(ssei_min(v[0], v[1]), ssei_min(v[2], v[3]));
562 }
563 __forceinline int reduce_max(const ssei &v)
564 {
565  return ssei_max(ssei_max(v[0], v[1]), ssei_max(v[2], v[3]));
566 }
567 __forceinline int reduce_add(const ssei &v)
568 {
569  return v[0] + v[1] + v[2] + v[3];
570 }
571 
572 # endif
573 
577 
578 __forceinline ssei load4i(const void *const a)
579 {
580  return _mm_load_si128((__m128i *)a);
581 }
582 
583 __forceinline void store4i(void *ptr, const ssei &v)
584 {
585  _mm_store_si128((__m128i *)ptr, v);
586 }
587 
588 __forceinline void storeu4i(void *ptr, const ssei &v)
589 {
590  _mm_storeu_si128((__m128i *)ptr, v);
591 }
592 
593 __forceinline void store4i(const sseb &mask, void *ptr, const ssei &i)
594 {
595 # if defined(__KERNEL_AVX__)
596  _mm_maskstore_ps((float *)ptr, (__m128i)mask, _mm_castsi128_ps(i));
597 # else
598  *(ssei *)ptr = select(mask, i, *(ssei *)ptr);
599 # endif
600 }
601 
602 __forceinline ssei load4i_nt(void *ptr)
603 {
604 # if defined(__KERNEL_SSE41__)
605  return _mm_stream_load_si128((__m128i *)ptr);
606 # else
607  return _mm_load_si128((__m128i *)ptr);
608 # endif
609 }
610 
611 __forceinline void store4i_nt(void *ptr, const ssei &v)
612 {
613 # if defined(__KERNEL_SSE41__)
614  _mm_stream_ps((float *)ptr, _mm_castsi128_ps(v));
615 # else
616  _mm_store_si128((__m128i *)ptr, v);
617 # endif
618 }
619 
623 
624 ccl_device_inline void print_ssei(const char *label, const ssei &a)
625 {
626  printf("%s: %df %df %df %d\n", label, a[0], a[1], a[2], a[3]);
627 }
628 
629 #endif
630 
632 
633 #endif
struct Mask Mask
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint i1
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
__forceinline const avxb operator|=(avxb &a, const avxb &b)
Definition: avxb.h:121
__forceinline uint32_t movemask(const avxb &a)
Definition: avxb.h:214
__forceinline const avxb operator&(const avxb &a, const avxb &b)
Binary Operators.
Definition: avxb.h:100
__forceinline const avxb operator^=(avxb &a, const avxb &b)
Definition: avxb.h:125
__forceinline const avxb operator&=(avxb &a, const avxb &b)
Assignment Operators.
Definition: avxb.h:117
__forceinline const avxb unpacklo(const avxb &a, const avxb &b)
Movement/Shifting/Shuffling Functions.
Definition: avxb.h:167
__forceinline const avxb operator|(const avxb &a, const avxb &b)
Definition: avxb.h:104
__forceinline const avxb select(const avxb &m, const avxb &t, const avxb &f)
Definition: avxb.h:154
__forceinline const avxb unpackhi(const avxb &a, const avxb &b)
Definition: avxb.h:171
__forceinline float extract< 0 >(const avxf &a)
Definition: avxf.h:259
__forceinline avxi & operator-=(avxi &a, const avxi &b)
Definition: avxi.h:394
__forceinline int reduce_max(const avxi &v)
Definition: avxi.h:692
__forceinline avxi & operator<<=(avxi &a, const int32_t b)
Definition: avxi.h:439
__forceinline uint32_t select_max(const avxi &v)
Definition: avxi.h:705
__forceinline const avxi vreduce_add(const avxi &v)
Definition: avxi.h:682
__forceinline int reduce_min(const avxi &v)
Definition: avxi.h:688
__forceinline const avxi operator>>(const avxi &a, const int32_t n)
Definition: avxi.h:326
__forceinline const avxi vreduce_min(const avxi &v)
Definition: avxi.h:652
__forceinline avxi & operator>>=(avxi &a, const int32_t b)
Definition: avxi.h:443
__forceinline avxi & operator+=(avxi &a, const avxi &b)
Assignment Operators.
Definition: avxi.h:385
__forceinline const avxi vreduce_max(const avxi &v)
Definition: avxi.h:667
__forceinline const avxi srl(const avxi &a, const int32_t b)
Definition: avxi.h:335
__forceinline int reduce_add(const avxi &v)
Definition: avxi.h:696
__forceinline uint32_t select_min(const avxi &v)
Definition: avxi.h:701
__forceinline const avxi sra(const avxi &a, const int32_t b)
Definition: avxi.h:331
__forceinline avxi & operator*=(avxi &a, const avxi &b)
Definition: avxi.h:403
__forceinline float extract(const int4 &b)
Definition: binning.cpp:32
ATTR_WARN_UNUSED_RESULT const BMVert * v
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
btGeneric6DofConstraint & operator=(btGeneric6DofConstraint &other)
SIMD_FORCE_INLINE btVector3 & operator[](int i)
Get a mutable reference to a row of the matrix as a vector.
Definition: btMatrix3x3.h:157
#define ccl_device_inline
Definition: cuda/compat.h:34
#define CCL_NAMESPACE_END
Definition: cuda/compat.h:9
const char * label
SyclQueue void void * src
SyclQueue void void size_t num_bytes void
static void shuffle(float2 points[], int size, int rng_seed)
Definition: jitter.cpp:230
ccl_device_inline float4 mask(const int4 &mask, const float4 &a)
Definition: math_float4.h:513
U * cast(T *in)
Definition: Cast.h:13
Matrix< T, M, N > operator-(const Matrix< T, M, N > &m1, const Matrix< T, M, N > &m2)
Definition: VecMat.h:908
Vec< T, N > operator*(const typename Vec< T, N >::value_type r, const Vec< T, N > &v)
Definition: VecMat.h:844
static unsigned c
Definition: RandGen.cpp:83
static unsigned a[3]
Definition: RandGen.cpp:78
Insertion insert(const float3 &point_prev, const float3 &handle_prev, const float3 &handle_next, const float3 &point_next, float parameter)
Definition: curve_bezier.cc:61
std::ostream & operator<<(std::ostream &stream, const AssetCatalogPath &path_to_append)
bool operator==(const AttributeIDRef &a, const AttributeIDRef &b)
GPUState operator^(const GPUState &a, const GPUState &b)
T abs(const T &a)
constexpr bool operator!=(StringRef a, StringRef b)
constexpr bool operator>=(StringRef a, StringRef b)
constexpr bool operator<(StringRef a, StringRef b)
constexpr bool operator<=(StringRef a, StringRef b)
constexpr bool operator>(StringRef a, StringRef b)
std::string operator+(StringRef a, StringRef b)
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
#define __forceinline
CCL_NAMESPACE_BEGIN __forceinline uint32_t __bsf(const uint32_t x)
Definition: simd.h:377
#define min(a, b)
Definition: sort.c:35
unsigned int uint32_t
Definition: stdint.h:80
signed int int32_t
Definition: stdint.h:77
float max
PointerRNA * ptr
Definition: wm_files.c:3480