26 for (
int slot = 0; slot < num_slots; slot++) {
28 #ifdef __ATOMIC_PASS_WRITE__
34 if (old_id !=
ID_NONE && old_id !=
id) {
42 else if (id_buffer[slot].
x ==
id || slot == num_slots - 1) {
49 id_buffer[slot].x =
id;
50 id_buffer[slot].y = weight;
55 else if (id_buffer[slot].
x ==
id || slot == num_slots - 1) {
56 id_buffer[slot].y += weight;
66 for (
int slot = 1; slot < num_slots; ++slot) {
72 while (i > 0 && id_buffer[i].
y > id_buffer[i - 1].
y) {
74 id_buffer[i] = id_buffer[i - 1];
75 id_buffer[i - 1] =
swap;
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
#define atomic_compare_and_swap_float(p, old_val, new_val)
#define atomic_add_and_fetch_float(p, x)
#define kernel_assert(cond)
#define ccl_device_inline
#define CCL_NAMESPACE_END
const KernelGlobalsCPU *ccl_restrict KernelGlobals
ccl_device_inline void kernel_cryptomatte_post(KernelGlobals kg, ccl_global float *render_buffer, int pixel_index)
ccl_device_inline void kernel_sort_id_slots(ccl_global float *buffer, int num_slots)
CCL_NAMESPACE_BEGIN struct IDPassBufferElement IDPassBufferElement
ccl_device_inline void kernel_write_id_slots(ccl_global float *buffer, int num_slots, float id, float weight)
ccl_global float * buffer
ccl_gpu_kernel_postfix ccl_global KernelWorkTile const int ccl_global float * render_buffer
ccl_gpu_kernel_postfix ccl_global float int int int int int int int pass_stride
unsigned __int64 uint64_t