Blender  V3.3
draw_manager_data.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2016 Blender Foundation. */
3 
8 #include "draw_manager.h"
9 
10 #include "BKE_curve.h"
11 #include "BKE_duplilist.h"
12 #include "BKE_global.h"
13 #include "BKE_image.h"
14 #include "BKE_mesh.h"
15 #include "BKE_object.h"
16 #include "BKE_paint.h"
17 #include "BKE_pbvh.h"
18 #include "BKE_volume.h"
19 
20 #include "DNA_curve_types.h"
21 #include "DNA_mesh_types.h"
22 #include "DNA_meta_types.h"
23 
24 #include "BLI_alloca.h"
25 #include "BLI_hash.h"
26 #include "BLI_link_utils.h"
27 #include "BLI_listbase.h"
28 #include "BLI_memblock.h"
29 #include "BLI_mempool.h"
30 
31 #ifdef DRW_DEBUG_CULLING
32 # include "BLI_math_bits.h"
33 #endif
34 
35 #include "GPU_buffers.h"
36 #include "GPU_capabilities.h"
37 #include "GPU_material.h"
38 #include "GPU_uniform_buffer.h"
39 
40 #include "intern/gpu_codegen.h"
41 
42 /* -------------------------------------------------------------------- */
46 static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len)
47 {
48  /* Count unique batches. Tt's not really important if
49  * there is collisions. If there is a lot of different batches,
50  * the sorting benefit will be negligible.
51  * So at least sort fast! */
52  uchar idx[128] = {0};
53  /* Shift by 6 positions knowing each GPUBatch is > 64 bytes */
54 #define KEY(a) ((((size_t)((a).draw.batch)) >> 6) % ARRAY_SIZE(idx))
55  BLI_assert(array_len <= ARRAY_SIZE(idx));
56 
57  for (int i = 0; i < array_len; i++) {
58  /* Early out if nothing to sort. */
59  if (++idx[KEY(array[i])] == array_len) {
60  return;
61  }
62  }
63  /* Accumulate batch indices */
64  for (int i = 1; i < ARRAY_SIZE(idx); i++) {
65  idx[i] += idx[i - 1];
66  }
67  /* Traverse in reverse to not change the order of the resource ID's. */
68  for (int src = array_len - 1; src >= 0; src--) {
69  array_tmp[--idx[KEY(array[src])]] = array[src];
70  }
71 #undef KEY
72 
73  memcpy(array, array_tmp, sizeof(*array) * array_len);
74 }
75 
77 {
78  int chunk_id = DRW_handle_chunk_get(&DST.resource_handle);
79  int elem_id = DRW_handle_id_get(&DST.resource_handle);
80  int ubo_len = 1 + chunk_id - ((elem_id == 0) ? 1 : 0);
81  size_t list_size = sizeof(GPUUniformBuf *) * ubo_len;
82 
83  /* TODO: find a better system. currently a lot of obinfos UBO are going to be unused
84  * if not rendering with Eevee. */
85 
86  if (vmempool->matrices_ubo == NULL) {
87  vmempool->matrices_ubo = MEM_callocN(list_size, __func__);
88  vmempool->obinfos_ubo = MEM_callocN(list_size, __func__);
89  vmempool->ubo_len = ubo_len;
90  }
91 
92  /* Remove unnecessary buffers */
93  for (int i = ubo_len; i < vmempool->ubo_len; i++) {
94  GPU_uniformbuf_free(vmempool->matrices_ubo[i]);
95  GPU_uniformbuf_free(vmempool->obinfos_ubo[i]);
96  }
97 
98  if (ubo_len != vmempool->ubo_len) {
99  vmempool->matrices_ubo = MEM_recallocN(vmempool->matrices_ubo, list_size);
100  vmempool->obinfos_ubo = MEM_recallocN(vmempool->obinfos_ubo, list_size);
101  vmempool->ubo_len = ubo_len;
102  }
103 
104  /* Create/Update buffers. */
105  for (int i = 0; i < ubo_len; i++) {
106  void *data_obmat = BLI_memblock_elem_get(vmempool->obmats, i, 0);
107  void *data_infos = BLI_memblock_elem_get(vmempool->obinfos, i, 0);
108  if (vmempool->matrices_ubo[i] == NULL) {
109  vmempool->matrices_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectMatrix) *
111  vmempool->obinfos_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectInfos) *
113  }
114  GPU_uniformbuf_update(vmempool->matrices_ubo[i], data_obmat);
115  GPU_uniformbuf_update(vmempool->obinfos_ubo[i], data_infos);
116  }
117 
119 
120  /* Aligned alloc to avoid unaligned memcpy. */
121  DRWCommandChunk *chunk_tmp = MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, "tmp call chunk");
122  DRWCommandChunk *chunk;
123  BLI_memblock_iter iter;
124  BLI_memblock_iternew(vmempool->commands, &iter);
125  while ((chunk = BLI_memblock_iterstep(&iter))) {
126  bool sortable = true;
127  /* We can only sort chunks that contain #DRWCommandDraw only. */
128  for (int i = 0; i < ARRAY_SIZE(chunk->command_type) && sortable; i++) {
129  if (chunk->command_type[i] != 0) {
130  sortable = false;
131  }
132  }
133  if (sortable) {
134  draw_call_sort(chunk->commands, chunk_tmp->commands, chunk->command_used);
135  }
136  }
137  MEM_freeN(chunk_tmp);
138 }
139 
142 /* -------------------------------------------------------------------- */
147  int loc,
149  const void *value,
150  eGPUSamplerState sampler_state,
151  int length,
152  int arraysize)
153 {
154  if (loc == -1) {
155  /* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */
156  // BLI_assert(0);
157  return;
158  }
159 
160  DRWUniformChunk *unichunk = shgroup->uniforms;
161  /* Happens on first uniform or if chunk is full. */
162  if (!unichunk || unichunk->uniform_used == unichunk->uniform_len) {
164  unichunk->uniform_len = ARRAY_SIZE(shgroup->uniforms->uniforms);
165  unichunk->uniform_used = 0;
166  BLI_LINKS_PREPEND(shgroup->uniforms, unichunk);
167  }
168 
169  DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used++;
170 
171  uni->location = loc;
172  uni->type = type;
173  uni->length = length;
174  uni->arraysize = arraysize;
175 
176  switch (type) {
178  BLI_assert(length <= 4);
179  memcpy(uni->ivalue, value, sizeof(int) * length);
180  break;
182  BLI_assert(length <= 4);
183  memcpy(uni->fvalue, value, sizeof(float) * length);
184  break;
185  case DRW_UNIFORM_BLOCK:
186  uni->block = (GPUUniformBuf *)value;
187  break;
189  uni->block_ref = (GPUUniformBuf **)value;
190  break;
191  case DRW_UNIFORM_IMAGE:
192  case DRW_UNIFORM_TEXTURE:
193  uni->texture = (GPUTexture *)value;
194  uni->sampler_state = sampler_state;
195  break;
198  uni->texture_ref = (GPUTexture **)value;
199  uni->sampler_state = sampler_state;
200  break;
202  uni->uniform_attrs = (GPUUniformAttrList *)value;
203  break;
204  default:
205  uni->pvalue = (const float *)value;
206  break;
207  }
208 }
209 
211  const char *name,
213  const void *value,
214  int length,
215  int arraysize)
216 {
217  BLI_assert(arraysize > 0 && arraysize <= 16);
218  BLI_assert(length >= 0 && length <= 16);
226  int location = GPU_shader_get_uniform(shgroup->shader, name);
227  drw_shgroup_uniform_create_ex(shgroup, location, type, value, 0, length, arraysize);
228 }
229 
231  const char *name,
232  const GPUTexture *tex,
233  eGPUSamplerState sampler_state)
234 {
235  BLI_assert(tex != NULL);
236  int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
237  drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE, tex, sampler_state, 0, 1);
238 }
239 
240 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
241 {
243 }
244 
246  const char *name,
247  GPUTexture **tex,
248  eGPUSamplerState sampler_state)
249 {
250  BLI_assert(tex != NULL);
251  int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
252  drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE_REF, tex, sampler_state, 0, 1);
253 }
254 
256 {
258 }
259 
260 void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
261 {
262  BLI_assert(tex != NULL);
263  int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
264  drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE, tex, 0, 0, 1);
265 }
266 
267 void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
268 {
269  BLI_assert(tex != NULL);
270  int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
272 }
273 
275  const char *name,
277 {
278  BLI_assert(ubo != NULL);
279  int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
280  if (loc == -1) {
281 #ifdef DRW_UNUSED_RESOURCE_TRACKING
282  printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n",
283  file,
284  line,
285  name);
286 #else
287  /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
288  // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects.");
289 #endif
290  return;
291  }
292  drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, 0, 0, 1);
293 }
294 
296  const char *name,
298 {
299  BLI_assert(ubo != NULL);
300  int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
301  if (loc == -1) {
302 #ifdef DRW_UNUSED_RESOURCE_TRACKING
303  printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n",
304  file,
305  line,
306  name);
307 #else
308  /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
309  // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects.");
310 #endif
311  return;
312  }
313  drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, 0, 0, 1);
314 }
315 
317  const char *name,
319 {
320  BLI_assert(ssbo != NULL);
321  /* TODO(@fclem): Fix naming inconsistency. */
322  int loc = GPU_shader_get_ssbo(shgroup->shader, name);
323  if (loc == -1) {
324 #ifdef DRW_UNUSED_RESOURCE_TRACKING
325  printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
326  file,
327  line,
328  name);
329 #else
330  /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
331  // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
332 #endif
333  return;
334  }
335  drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK, ssbo, 0, 0, 1);
336 }
337 
339  const char *name,
341 {
342  BLI_assert(ssbo != NULL);
343  /* TODO(@fclem): Fix naming inconsistency. */
344  int loc = GPU_shader_get_ssbo(shgroup->shader, name);
345  if (loc == -1) {
346 #ifdef DRW_UNUSED_RESOURCE_TRACKING
347  printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
348  file,
349  line,
350  name);
351 #else
352  /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
353  // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
354 #endif
355  return;
356  }
357  drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK_REF, ssbo, 0, 0, 1);
358 }
359 
361  const char *name,
362  const int *value,
363  int arraysize)
364 {
365  /* Boolean are expected to be 4bytes longs for OpenGL! */
366  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
367 }
368 
370  const char *name,
371  const float *value,
372  int arraysize)
373 {
374  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
375 }
376 
378  const char *name,
379  const float *value,
380  int arraysize)
381 {
382  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
383 }
384 
386  const char *name,
387  const float *value,
388  int arraysize)
389 {
390  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
391 }
392 
394  const char *name,
395  const float *value,
396  int arraysize)
397 {
398  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
399 }
400 
402  const char *name,
403  const int *value,
404  int arraysize)
405 {
406  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
407 }
408 
410  const char *name,
411  const int *value,
412  int arraysize)
413 {
414  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
415 }
416 
418  const char *name,
419  const int *value,
420  int arraysize)
421 {
422  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
423 }
424 
426  const char *name,
427  const int *value,
428  int arraysize)
429 {
430  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
431 }
432 
433 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
434 {
435  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
436 }
437 
438 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
439 {
440  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
441 }
442 
443 void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
444 {
445  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
446 }
447 
448 void DRW_shgroup_uniform_ivec2_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
449 {
450  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 2, 1);
451 }
452 
453 void DRW_shgroup_uniform_ivec3_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
454 {
455  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 3, 1);
456 }
457 
458 void DRW_shgroup_uniform_ivec4_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
459 {
460  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 4, 1);
461 }
462 
463 void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
464 {
465  int ival = value;
466  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &ival, 1, 1);
467 }
468 
469 void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
470 {
471  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
472 }
473 
474 void DRW_shgroup_uniform_vec2_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
475 {
476  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 2, 1);
477 }
478 
479 void DRW_shgroup_uniform_vec3_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
480 {
481  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 3, 1);
482 }
483 
484 void DRW_shgroup_uniform_vec4_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
485 {
486  drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 4, 1);
487 }
488 
490  const char *name,
491  const float (*value)[4])
492 {
493  int location = GPU_shader_get_uniform(shgroup->shader, name);
494 
495  if (location == -1) {
496  /* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */
497  // BLI_assert(0);
498  return;
499  }
500 
501  /* Each array element stored as an individual entry in the uniform list.
502  * All entries from the same array share the same base location,
503  * and array-size used to determine the number of elements
504  * copied in draw_update_uniforms. */
505  for (int i = 0; i < 4; i++) {
506  drw_shgroup_uniform_create_ex(shgroup, location, DRW_UNIFORM_FLOAT_COPY, &value[i], 0, 4, 4);
507  }
508 }
509 
511  const char *name,
512  GPUVertBuf *vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
513 {
514  int location = GPU_shader_get_ssbo(shgroup->shader, name);
515  if (location == -1) {
516 #ifdef DRW_UNUSED_RESOURCE_TRACKING
517  printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
518  file,
519  line,
520  name);
521 #else
522  BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
523 #endif
524  return;
525  }
527  shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE, vertex_buffer, 0, 0, 1);
528 }
529 
531  const char *name,
532  GPUVertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
533 {
534  int location = GPU_shader_get_ssbo(shgroup->shader, name);
535  if (location == -1) {
536 #ifdef DRW_UNUSED_RESOURCE_TRACKING
537  printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
538  file,
539  line,
540  name);
541 #else
542  BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
543 #endif
544  return;
545  }
547  shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF, vertex_buffer, 0, 0, 1);
548 }
549 
551  const char *name,
552  GPUVertBuf *vertex_buffer)
553 {
554  int location = GPU_shader_get_ssbo(shgroup->shader, name);
555  if (location == -1) {
556  return;
557  }
559  shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE, vertex_buffer, 0, 0, 1);
560 }
561 
563  const char *name,
564  GPUVertBuf **vertex_buffer)
565 {
566  int location = GPU_shader_get_ssbo(shgroup->shader, name);
567  if (location == -1) {
568  return;
569  }
571  shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF, vertex_buffer, 0, 0, 1);
572 }
575 /* -------------------------------------------------------------------- */
579 static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4])
580 {
581  ID *ob_data = (ob) ? ob->data : NULL;
582  float loc[3], size[3];
583  float *texcoloc = NULL;
584  float *texcosize = NULL;
585  if (ob_data != NULL) {
586  switch (GS(ob_data->name)) {
587  case ID_VO: {
588  BoundBox *bbox = BKE_volume_boundbox_get(ob);
589  mid_v3_v3v3(loc, bbox->vec[0], bbox->vec[6]);
590  sub_v3_v3v3(size, bbox->vec[0], bbox->vec[6]);
591  texcoloc = loc;
592  texcosize = size;
593  break;
594  }
595  case ID_ME:
596  BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, &texcosize);
597  break;
598  case ID_CU_LEGACY: {
599  Curve *cu = (Curve *)ob_data;
601  texcoloc = cu->loc;
602  texcosize = cu->size;
603  break;
604  }
605  case ID_MB: {
606  MetaBall *mb = (MetaBall *)ob_data;
607  texcoloc = mb->loc;
608  texcosize = mb->size;
609  break;
610  }
611  default:
612  break;
613  }
614  }
615 
616  if ((texcoloc != NULL) && (texcosize != NULL)) {
617  mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
618  invert_v3(r_orcofacs[1]);
619  sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
620  negate_v3(r_orcofacs[0]);
621  mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
622  }
623  else {
624  copy_v3_fl(r_orcofacs[0], 0.0f);
625  copy_v3_fl(r_orcofacs[1], 1.0f);
626  }
627 }
628 
629 BLI_INLINE void drw_call_matrix_init(DRWObjectMatrix *ob_mats, Object *ob, float (*obmat)[4])
630 {
631  copy_m4_m4(ob_mats->model, obmat);
632  if (ob) {
633  copy_m4_m4(ob_mats->modelinverse, ob->imat);
634  }
635  else {
636  /* WATCH: Can be costly. */
637  invert_m4_m4(ob_mats->modelinverse, ob_mats->model);
638  }
639 }
640 
641 static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
642 {
643  BLI_assert(ob);
644  /* Index. */
645  ob_infos->ob_index = ob->index;
646  /* Orco factors. */
647  drw_call_calc_orco(ob, ob_infos->orcotexfac);
648  /* Random float value. */
651  /* TODO(fclem): this is rather costly to do at runtime. Maybe we can
652  * put it in ob->runtime and make depsgraph ensure it is up to date. */
653  BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
654  ob_infos->ob_random = random * (1.0f / (float)0xFFFFFFFF);
655  /* Object State. */
656  ob_infos->ob_flag = 1.0f; /* Required to have a correct sign */
657  ob_infos->ob_flag += (ob->base_flag & BASE_SELECTED) ? (1 << 1) : 0;
658  ob_infos->ob_flag += (ob->base_flag & BASE_FROM_DUPLI) ? (1 << 2) : 0;
659  ob_infos->ob_flag += (ob->base_flag & BASE_FROM_SET) ? (1 << 3) : 0;
660  if (ob->base_flag & BASE_FROM_DUPLI) {
661  ob_infos->ob_flag += (DRW_object_get_dupli_parent(ob) == DST.draw_ctx.obact) ? (1 << 4) : 0;
662  }
663  else {
664  ob_infos->ob_flag += (ob == DST.draw_ctx.obact) ? (1 << 4) : 0;
665  }
666  /* Negative scaling. */
667  ob_infos->ob_flag *= (ob->transflag & OB_NEG_SCALE) ? -1.0f : 1.0f;
668  /* Object Color. */
669  copy_v4_v4(ob_infos->ob_color, ob->color);
670 }
671 
673 {
674  const BoundBox *bbox;
675  if (ob != NULL && (bbox = BKE_object_boundbox_get(ob))) {
676  float corner[3];
677  /* Get BoundSphere center and radius from the BoundBox. */
678  mid_v3_v3v3(cull->bsphere.center, bbox->vec[0], bbox->vec[6]);
679  mul_v3_m4v3(corner, ob->obmat, bbox->vec[0]);
680  mul_m4_v3(ob->obmat, cull->bsphere.center);
681  cull->bsphere.radius = len_v3v3(cull->bsphere.center, corner);
682 
683  /* Bypass test for very large objects (see T67319). */
684  if (UNLIKELY(cull->bsphere.radius > 1e12)) {
685  cull->bsphere.radius = -1.0f;
686  }
687  }
688  else {
689  /* Bypass test. */
690  cull->bsphere.radius = -1.0f;
691  }
692  /* Reset user data */
693  cull->user_data = NULL;
694 }
695 
696 static DRWResourceHandle drw_resource_handle_new(float (*obmat)[4], Object *ob)
697 {
700  /* FIXME Meh, not always needed but can be accessed after creation.
701  * Also it needs to have the same resource handle. */
703  UNUSED_VARS(ob_infos);
704 
707 
708  if (ob && (ob->transflag & OB_NEG_SCALE)) {
710  }
711 
712  drw_call_matrix_init(ob_mats, ob, obmat);
713  drw_call_culling_init(culling, ob);
714  /* ob_infos is init only if needed. */
715 
716  return handle;
717 }
718 
720 {
722  if (handle == 0) {
723  /* Handle not yet allocated. Return next handle. */
724  handle = DST.resource_handle;
725  }
726  return handle & ~(1u << 31);
727 }
728 
730  float (*obmat)[4],
731  Object *ob)
732 {
733  if (ob == NULL) {
734  if (obmat == NULL) {
735  DRWResourceHandle handle = 0;
736  return handle;
737  }
738 
739  return drw_resource_handle_new(obmat, NULL);
740  }
741 
742  if (DST.ob_handle == 0) {
744  DST.ob_state_obinfo_init = false;
745  }
746 
747  if (shgroup->objectinfo) {
748  if (!DST.ob_state_obinfo_init) {
749  DST.ob_state_obinfo_init = true;
751  &DST.ob_handle);
752 
753  drw_call_obinfos_init(ob_infos, ob);
754  }
755  }
756 
757  if (shgroup->uniform_attrs) {
759  shgroup->uniform_attrs,
760  &DST.ob_handle,
761  ob,
763  DST.dupli_source);
764  }
765 
766  return DST.ob_handle;
767 }
768 
769 static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type)
770 {
771  command_type_bits[index / 16] |= ((uint64_t)type) << ((index % 16) * 4);
772 }
773 
774 eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
775 {
776  return ((command_type_bits[index / 16] >> ((index % 16) * 4)) & 0xF);
777 }
778 
780 {
781  DRWCommandChunk *chunk = shgroup->cmd.last;
782 
783  if (chunk == NULL) {
785  smallchunk->command_len = ARRAY_SIZE(smallchunk->commands);
786  smallchunk->command_used = 0;
787  smallchunk->command_type[0] = 0x0lu;
788  chunk = (DRWCommandChunk *)smallchunk;
789  BLI_LINKS_APPEND(&shgroup->cmd, chunk);
790  }
791  else if (chunk->command_used == chunk->command_len) {
793  chunk->command_len = ARRAY_SIZE(chunk->commands);
794  chunk->command_used = 0;
795  memset(chunk->command_type, 0x0, sizeof(chunk->command_type));
796  BLI_LINKS_APPEND(&shgroup->cmd, chunk);
797  }
798 
800 
801  return chunk->commands + chunk->command_used++;
802 }
803 
805 {
807  cmd->batch = batch;
808  cmd->handle = handle;
809 }
810 
812  DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
813 {
815  cmd->batch = batch;
816  cmd->handle = handle;
817  cmd->vert_first = start;
818  cmd->vert_count = count;
819 }
820 
822  DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint count, bool use_attr)
823 {
825  cmd->batch = batch;
826  cmd->handle = handle;
827  cmd->inst_count = count;
828  cmd->use_attrs = use_attr;
829 }
830 
832  DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
833 {
835  cmd->batch = batch;
836  cmd->handle = handle;
837  cmd->inst_first = start;
838  cmd->inst_count = count;
839 }
840 
842  int groups_x_len,
843  int groups_y_len,
844  int groups_z_len)
845 {
847  cmd->groups_x_len = groups_x_len;
848  cmd->groups_y_len = groups_y_len;
849  cmd->groups_z_len = groups_z_len;
850 }
851 
852 static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
853 {
855  cmd->groups_ref = groups_ref;
856 }
857 
858 static void drw_command_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
859 {
861  cmd->indirect_buf = indirect_buf;
862 }
863 
865 {
867  cmd->type = type;
868 }
869 
871  GPUBatch *batch,
872  DRWResourceHandle handle,
873  uint vert_count)
874 {
876  cmd->batch = batch;
877  cmd->handle = handle;
878  cmd->vert_count = vert_count;
879 }
880 
881 static void drw_command_set_select_id(DRWShadingGroup *shgroup, GPUVertBuf *buf, uint select_id)
882 {
883  /* Only one can be valid. */
884  BLI_assert(buf == NULL || select_id == -1);
886  cmd->select_buf = buf;
887  cmd->select_id = select_id;
888 }
889 
891  uint write_mask,
892  uint reference,
893  uint compare_mask)
894 {
895  BLI_assert(write_mask <= 0xFF);
896  BLI_assert(reference <= 0xFF);
897  BLI_assert(compare_mask <= 0xFF);
899  cmd->write_mask = write_mask;
900  cmd->comp_mask = compare_mask;
901  cmd->ref = reference;
902 }
903 
904 static void drw_command_clear(DRWShadingGroup *shgroup,
906  uchar r,
907  uchar g,
908  uchar b,
909  uchar a,
910  float depth,
911  uchar stencil)
912 {
914  cmd->clear_channels = channels;
915  cmd->r = r;
916  cmd->g = g;
917  cmd->b = b;
918  cmd->a = a;
919  cmd->depth = depth;
920  cmd->stencil = stencil;
921 }
922 
924  DRWState enable,
925  DRWState disable)
926 {
927  /* TODO: Restrict what state can be changed. */
929  cmd->enable = enable;
930  cmd->disable = disable;
931 }
932 
934  Object *ob,
935  float (*obmat)[4],
936  struct GPUBatch *geom,
937  bool bypass_culling,
938  void *user_data)
939 {
940  BLI_assert(geom != NULL);
941  if (G.f & G_FLAG_PICKSEL) {
943  }
944  DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : obmat, ob);
945  drw_command_draw(shgroup, geom, handle);
946 
947  /* Culling data. */
948  if (user_data || bypass_culling) {
950  &DST.ob_handle);
951 
952  if (user_data) {
953  culling->user_data = user_data;
954  }
955  if (bypass_culling) {
956  /* NOTE: this will disable culling for the whole object. */
957  culling->bsphere.radius = -1.0f;
958  }
959  }
960 }
961 
963  DRWShadingGroup *shgroup, struct Object *ob, GPUBatch *geom, uint v_sta, uint v_num)
964 {
965  BLI_assert(geom != NULL);
966  if (G.f & G_FLAG_PICKSEL) {
968  }
969  DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
970  drw_command_draw_range(shgroup, geom, handle, v_sta, v_num);
971 }
972 
974  DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_num)
975 {
976  BLI_assert(geom != NULL);
977  if (G.f & G_FLAG_PICKSEL) {
979  }
980  DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
981  drw_command_draw_intance_range(shgroup, geom, handle, i_sta, i_num);
982 }
983 
985  int groups_x_len,
986  int groups_y_len,
987  int groups_z_len)
988 {
989  BLI_assert(groups_x_len > 0 && groups_y_len > 0 && groups_z_len > 0);
991 
992  drw_command_compute(shgroup, groups_x_len, groups_y_len, groups_z_len);
993 }
994 
995 void DRW_shgroup_call_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
996 {
998 
999  drw_command_compute_ref(shgroup, groups_ref);
1000 }
1001 
1003 {
1005 
1006  drw_command_compute_indirect(shgroup, indirect_buf);
1007 }
1009 {
1011 
1012  drw_command_barrier(shgroup, type);
1013 }
1014 
1016  GPUBatch *geom,
1017  Object *ob,
1018  uint vert_count)
1019 {
1020  BLI_assert(vert_count > 0);
1021  BLI_assert(geom != NULL);
1022  if (G.f & G_FLAG_PICKSEL) {
1024  }
1025  DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
1026  drw_command_draw_procedural(shgroup, geom, handle, vert_count);
1027 }
1028 
1030 {
1031  struct GPUBatch *geom = drw_cache_procedural_points_get();
1032  drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, point_count);
1033 }
1034 
1036 {
1037  struct GPUBatch *geom = drw_cache_procedural_lines_get();
1038  drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, line_count * 2);
1039 }
1040 
1042 {
1044  drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tri_count * 3);
1045 }
1046 
1048  Object *ob,
1049  struct GPUBatch *geom,
1050  uint count)
1051 {
1052  BLI_assert(geom != NULL);
1053  if (G.f & G_FLAG_PICKSEL) {
1055  }
1056  DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
1057  drw_command_draw_instance(shgroup, geom, handle, count, false);
1058 }
1059 
1061  Object *ob,
1062  struct GPUBatch *geom,
1063  struct GPUBatch *inst_attributes)
1064 {
1065  BLI_assert(geom != NULL);
1066  BLI_assert(inst_attributes != NULL);
1067  if (G.f & G_FLAG_PICKSEL) {
1069  }
1070  DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
1072  DST.vmempool->idatalist, NULL, inst_attributes, geom);
1073  drw_command_draw_instance(shgroup, batch, handle, 0, true);
1074 }
1075 
1076 #define SCULPT_DEBUG_BUFFERS (G.debug_value == 889)
1077 typedef struct DRWSculptCallbackData {
1081  bool use_wire;
1082  bool use_mats;
1083  bool use_mask;
1085  bool fast_mode; /* Set by draw manager. Do not init. */
1086 
1089 
1090 #define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9])
1091 static float sculpt_debug_colors[9][4] = {
1092  {1.0f, 0.2f, 0.2f, 1.0f},
1093  {0.2f, 1.0f, 0.2f, 1.0f},
1094  {0.2f, 0.2f, 1.0f, 1.0f},
1095  {1.0f, 1.0f, 0.2f, 1.0f},
1096  {0.2f, 1.0f, 1.0f, 1.0f},
1097  {1.0f, 0.2f, 1.0f, 1.0f},
1098  {1.0f, 0.7f, 0.2f, 1.0f},
1099  {0.2f, 1.0f, 0.7f, 1.0f},
1100  {0.7f, 0.2f, 1.0f, 1.0f},
1101 };
1102 
1104 {
1105  if (!buffers) {
1106  return;
1107  }
1108 
1109  /* Meh... use_mask is a bit misleading here. */
1110  if (scd->use_mask && !GPU_pbvh_buffers_has_overlays(buffers)) {
1111  return;
1112  }
1113 
1114  GPUBatch *geom = GPU_pbvh_buffers_batch_get(buffers, scd->fast_mode, scd->use_wire);
1115  short index = 0;
1116 
1117  if (scd->use_mats) {
1118  index = GPU_pbvh_buffers_material_index_get(buffers);
1119  if (index >= scd->num_shading_groups) {
1120  index = 0;
1121  }
1122  }
1123 
1124  DRWShadingGroup *shgrp = scd->shading_groups[index];
1125  if (geom != NULL && shgrp != NULL) {
1126  if (SCULPT_DEBUG_BUFFERS) {
1127  /* Color each buffers in different colors. Only work in solid/Xray mode. */
1128  shgrp = DRW_shgroup_create_sub(shgrp);
1130  shgrp, "materialDiffuseColor", SCULPT_DEBUG_COLOR(scd->debug_node_nr++), 1);
1131  }
1132  /* DRW_shgroup_call_no_cull reuses matrices calculations for all the drawcalls of this
1133  * object. */
1134  DRW_shgroup_call_no_cull(shgrp, geom, scd->ob);
1135  }
1136 }
1137 
1138 static void sculpt_debug_cb(void *user_data,
1139  const float bmin[3],
1140  const float bmax[3],
1141  PBVHNodeFlags flag)
1142 {
1143  int *debug_node_nr = (int *)user_data;
1144  BoundBox bb;
1145  BKE_boundbox_init_from_minmax(&bb, bmin, bmax);
1146 
1147 #if 0 /* Nodes hierarchy. */
1148  if (flag & PBVH_Leaf) {
1149  DRW_debug_bbox(&bb, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
1150  }
1151  else {
1152  DRW_debug_bbox(&bb, (float[4]){0.5f, 0.5f, 0.5f, 0.6f});
1153  }
1154 #else /* Color coded leaf bounds. */
1155  if (flag & PBVH_Leaf) {
1156  DRW_debug_bbox(&bb, SCULPT_DEBUG_COLOR((*debug_node_nr)++));
1157  }
1158 #endif
1159 }
1160 
1161 static void drw_sculpt_get_frustum_planes(Object *ob, float planes[6][4])
1162 {
1163  /* TODO: take into account partial redraw for clipping planes. */
1165 
1166  /* Transform clipping planes to object space. Transforming a plane with a
1167  * 4x4 matrix is done by multiplying with the transpose inverse.
1168  * The inverse cancels out here since we transform by inverse(obmat). */
1169  float tmat[4][4];
1170  transpose_m4_m4(tmat, ob->obmat);
1171  for (int i = 0; i < 6; i++) {
1172  mul_m4_v4(tmat, planes[i]);
1173  }
1174 }
1175 
1177 {
1178  /* PBVH should always exist for non-empty meshes, created by depsgraph eval. */
1179  PBVH *pbvh = (scd->ob->sculpt) ? scd->ob->sculpt->pbvh : NULL;
1180  if (!pbvh) {
1181  return;
1182  }
1183 
1184  const DRWContextState *drwctx = DRW_context_state_get();
1185  RegionView3D *rv3d = drwctx->rv3d;
1186  const bool navigating = rv3d && (rv3d->rflag & RV3D_NAVIGATING);
1187 
1188  Paint *p = NULL;
1189  if (drwctx->evil_C != NULL) {
1191  }
1192 
1193  /* Frustum planes to show only visible PBVH nodes. */
1194  float update_planes[6][4];
1195  float draw_planes[6][4];
1196  PBVHFrustumPlanes update_frustum;
1197  PBVHFrustumPlanes draw_frustum;
1198 
1199  if (p && (p->flags & PAINT_SCULPT_DELAY_UPDATES)) {
1200  update_frustum.planes = update_planes;
1201  update_frustum.num_planes = 6;
1202  BKE_pbvh_get_frustum_planes(pbvh, &update_frustum);
1203  if (!navigating) {
1204  drw_sculpt_get_frustum_planes(scd->ob, update_planes);
1205  update_frustum.planes = update_planes;
1206  update_frustum.num_planes = 6;
1207  BKE_pbvh_set_frustum_planes(pbvh, &update_frustum);
1208  }
1209  }
1210  else {
1211  drw_sculpt_get_frustum_planes(scd->ob, update_planes);
1212  update_frustum.planes = update_planes;
1213  update_frustum.num_planes = 6;
1214  }
1215 
1216  drw_sculpt_get_frustum_planes(scd->ob, draw_planes);
1217  draw_frustum.planes = draw_planes;
1218  draw_frustum.num_planes = 6;
1219 
1220  /* Fast mode to show low poly multires while navigating. */
1221  scd->fast_mode = false;
1222  if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
1223  scd->fast_mode = rv3d && (rv3d->rflag & RV3D_NAVIGATING);
1224  }
1225 
1226  /* Update draw buffers only for visible nodes while painting.
1227  * But do update them otherwise so navigating stays smooth. */
1228  bool update_only_visible = rv3d && !(rv3d->rflag & RV3D_PAINTING);
1229  if (p && (p->flags & PAINT_SCULPT_DELAY_UPDATES)) {
1230  update_only_visible = true;
1231  }
1232 
1233  Mesh *mesh = scd->ob->data;
1235 
1236  BKE_pbvh_draw_cb(pbvh,
1237  update_only_visible,
1238  &update_frustum,
1239  &draw_frustum,
1240  (void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb,
1241  scd,
1242  scd->use_mats);
1243 
1244  if (SCULPT_DEBUG_BUFFERS) {
1245  int debug_node_nr = 0;
1246  DRW_debug_modelmat(scd->ob->obmat);
1248  pbvh,
1249  (void (*)(
1250  void *d, const float min[3], const float max[3], PBVHNodeFlags f))sculpt_debug_cb,
1251  &debug_node_nr);
1252  }
1253 }
1254 
1255 void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask)
1256 {
1257  DRWSculptCallbackData scd = {
1258  .ob = ob,
1259  .shading_groups = &shgroup,
1260  .num_shading_groups = 1,
1261  .use_wire = use_wire,
1262  .use_mats = false,
1263  .use_mask = use_mask,
1264  };
1266 }
1267 
1269  int num_shgroups,
1270  Object *ob)
1271 {
1272  DRWSculptCallbackData scd = {
1273  .ob = ob,
1274  .shading_groups = shgroups,
1275  .num_shading_groups = num_shgroups,
1276  .use_wire = false,
1277  .use_mats = true,
1278  .use_mask = false,
1279  };
1281 }
1282 
1284 
1286  struct GPUVertFormat *format,
1287  GPUPrimType prim_type)
1288 {
1290  BLI_assert(format != NULL);
1291 
1293  callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count);
1294  callbuf->buf_select = NULL;
1295  callbuf->count = 0;
1296 
1297  if (G.f & G_FLAG_PICKSEL) {
1298  /* Not actually used for rendering but alloced in one chunk. */
1299  if (inst_select_format.attr_len == 0) {
1301  }
1304  drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
1305  }
1306 
1307  DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL);
1308  GPUBatch *batch = DRW_temp_batch_request(DST.vmempool->idatalist, callbuf->buf, prim_type);
1309  drw_command_draw(shgroup, batch, handle);
1310 
1311  return callbuf;
1312 }
1313 
1315  struct GPUVertFormat *format,
1316  GPUBatch *geom)
1317 {
1318  BLI_assert(geom != NULL);
1319  BLI_assert(format != NULL);
1320 
1322  callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count);
1323  callbuf->buf_select = NULL;
1324  callbuf->count = 0;
1325 
1326  if (G.f & G_FLAG_PICKSEL) {
1327  /* Not actually used for rendering but alloced in one chunk. */
1328  if (inst_select_format.attr_len == 0) {
1330  }
1333  drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
1334  }
1335 
1336  DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL);
1338  DST.vmempool->idatalist, callbuf->buf, NULL, geom);
1339  drw_command_draw(shgroup, batch, handle);
1340 
1341  return callbuf;
1342 }
1343 
1345 {
1346  GPUVertBuf *buf = callbuf->buf;
1347  const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
1348 
1349  if (UNLIKELY(resize)) {
1351  }
1352 
1353  GPU_vertbuf_vert_set(buf, callbuf->count, data);
1354 
1355  if (G.f & G_FLAG_PICKSEL) {
1356  if (UNLIKELY(resize)) {
1358  }
1359  GPU_vertbuf_attr_set(callbuf->buf_select, 0, callbuf->count, &DST.select_id);
1360  }
1361 
1362  callbuf->count++;
1363 }
1364 
1365 void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
1366 {
1367  GPUVertBuf *buf = callbuf->buf;
1368  const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
1369 
1370  BLI_assert(attr_len == GPU_vertbuf_get_format(buf)->attr_len);
1371  UNUSED_VARS_NDEBUG(attr_len);
1372 
1373  if (UNLIKELY(resize)) {
1375  }
1376 
1377  for (int i = 0; i < attr_len; i++) {
1378  GPU_vertbuf_attr_set(buf, i, callbuf->count, attr[i]);
1379  }
1380 
1381  if (G.f & G_FLAG_PICKSEL) {
1382  if (UNLIKELY(resize)) {
1384  }
1385  GPU_vertbuf_attr_set(callbuf->buf_select, 0, callbuf->count, &DST.select_id);
1386  }
1387 
1388  callbuf->count++;
1389 }
1390 
1393 /* -------------------------------------------------------------------- */
1397 static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
1398 {
1399  shgroup->uniforms = NULL;
1400  shgroup->uniform_attrs = NULL;
1401 
1402  int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW);
1403  int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL);
1404  int info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_INFO);
1405  int baseinst_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_BASE_INSTANCE);
1406  int chunkid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_CHUNK);
1407  int resourceid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_ID);
1408 
1409  /* TODO(@fclem): Will take the place of the above after the GPUShaderCreateInfo port. */
1410  if (view_ubo_location == -1) {
1411  view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_VIEW);
1412  }
1413  if (model_ubo_location == -1) {
1414  model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_MODEL);
1415  }
1416  if (info_ubo_location == -1) {
1417  info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_INFOS);
1418  }
1419 
1420  if (chunkid_location != -1) {
1422  shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, NULL, 0, 0, 1);
1423  }
1424 
1425  if (resourceid_location != -1) {
1427  shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, NULL, 0, 0, 1);
1428  }
1429 
1430  if (baseinst_location != -1) {
1432  shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, NULL, 0, 0, 1);
1433  }
1434 
1435  if (model_ubo_location != -1) {
1437  shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, NULL, 0, 0, 1);
1438  }
1439  else {
1440  /* NOTE: This is only here to support old hardware fallback where uniform buffer is still
1441  * too slow or buggy. */
1443  int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
1444  if (model != -1) {
1445  drw_shgroup_uniform_create_ex(shgroup, model, DRW_UNIFORM_MODEL_MATRIX, NULL, 0, 0, 1);
1446  }
1447  if (modelinverse != -1) {
1449  shgroup, modelinverse, DRW_UNIFORM_MODEL_MATRIX_INVERSE, NULL, 0, 0, 1);
1450  }
1451  }
1452 
1453  if (info_ubo_location != -1) {
1455  shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, NULL, 0, 0, 1);
1456 
1457  /* Abusing this loc to tell shgroup we need the obinfos. */
1458  shgroup->objectinfo = 1;
1459  }
1460  else {
1461  shgroup->objectinfo = 0;
1462  }
1463 
1464  if (view_ubo_location != -1) {
1466  shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, 0, 0, 1);
1467  }
1468 
1469  /* Not supported. */
1481 }
1482 
1484 {
1486 
1487  BLI_LINKS_APPEND(&pass->shgroups, shgroup);
1488 
1489  shgroup->shader = shader;
1490  shgroup->cmd.first = NULL;
1491  shgroup->cmd.last = NULL;
1492  shgroup->pass_handle = pass->handle;
1493 
1494  return shgroup;
1495 }
1496 
1498 {
1499  if (!gpupass) {
1500  /* Shader compilation error */
1501  return NULL;
1502  }
1503 
1504  GPUShader *sh = GPU_pass_shader_get(gpupass);
1505 
1506  if (!sh) {
1507  /* Shader not yet compiled */
1508  return NULL;
1509  }
1510 
1512  return grp;
1513 }
1514 
1516  GPUTexture *gputex,
1517  const char *name,
1519 {
1520  DRW_shgroup_uniform_texture_ex(grp, name, gputex, state);
1521 
1522  GPUTexture **gputex_ref = BLI_memblock_alloc(DST.vmempool->images);
1523  *gputex_ref = gputex;
1524  GPU_texture_ref(gputex);
1525 }
1526 
1528 {
1530 
1531  /* Bind all textures needed by the material. */
1533  if (tex->ima) {
1534  /* Image */
1535  GPUTexture *gputex;
1536  ImageUser *iuser = tex->iuser_available ? &tex->iuser : NULL;
1537  if (tex->tiled_mapping_name[0]) {
1538  gputex = BKE_image_get_gpu_tiles(tex->ima, iuser, NULL);
1539  drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
1540  gputex = BKE_image_get_gpu_tilemap(tex->ima, iuser, NULL);
1541  drw_shgroup_material_texture(grp, gputex, tex->tiled_mapping_name, tex->sampler_state);
1542  }
1543  else {
1544  gputex = BKE_image_get_gpu_texture(tex->ima, iuser, NULL);
1545  drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
1546  }
1547  }
1548  else if (tex->colorband) {
1549  /* Color Ramp */
1550  DRW_shgroup_uniform_texture(grp, tex->sampler_name, *tex->colorband);
1551  }
1552  }
1553 
1555  if (ubo != NULL) {
1557  }
1558 
1560  if (uattrs != NULL) {
1562  drw_shgroup_uniform_create_ex(grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, 0, 0, 1);
1563  grp->uniform_attrs = uattrs;
1564  }
1565 }
1566 
1568  int arraysize)
1569 {
1570  GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
1571 
1572  for (int i = 0; i < arraysize; i++) {
1574  attrs[i].name,
1575  (attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
1576  attrs[i].components,
1577  (attrs[i].type == DRW_ATTR_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
1578  }
1579  return format;
1580 }
1581 
1583 {
1585  DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
1586 
1587  if (shgroup) {
1588  drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
1590  }
1591  return shgroup;
1592 }
1593 
1595 {
1596  DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
1597  drw_shgroup_init(shgroup, shader);
1598  return shgroup;
1599 }
1600 
1602  DRWPass *pass,
1603  GPUVertBuf *tf_target)
1604 {
1605  BLI_assert(tf_target != NULL);
1606  DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
1607  drw_shgroup_init(shgroup, shader);
1608  drw_shgroup_uniform_create_ex(shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, 0, 0, 1);
1609  return shgroup;
1610 }
1611 
1613 {
1614  drw_command_set_mutable_state(shgroup, state, 0x0);
1615 }
1616 
1618 {
1619  drw_command_set_mutable_state(shgroup, 0x0, state);
1620 }
1621 
1623  uint write_mask,
1624  uint reference,
1625  uint compare_mask)
1626 {
1627  drw_command_set_stencil_mask(shgroup, write_mask, reference, compare_mask);
1628 }
1629 
1631 {
1632  drw_command_set_stencil_mask(shgroup, 0xFF, mask, 0xFF);
1633 }
1634 
1637  uchar r,
1638  uchar g,
1639  uchar b,
1640  uchar a,
1641  float depth,
1642  uchar stencil)
1643 {
1644  drw_command_clear(shgroup, channels, r, g, b, a, depth, stencil);
1645 }
1646 
1648 {
1649  DRWCommandChunk *chunk = shgroup->cmd.first;
1650  for (; chunk; chunk = chunk->next) {
1651  for (int i = 0; i < chunk->command_used; i++) {
1653  return false;
1654  }
1655  }
1656  }
1657  return true;
1658 }
1659 
1661 {
1663 
1664  *shgroup_new = *shgroup;
1665  drw_shgroup_init(shgroup_new, shgroup_new->shader);
1666  shgroup_new->cmd.first = NULL;
1667  shgroup_new->cmd.last = NULL;
1668 
1670  &shgroup->pass_handle);
1671 
1672  BLI_LINKS_INSERT_AFTER(&parent_pass->shgroups, shgroup, shgroup_new);
1673 
1674  return shgroup_new;
1675 }
1676 
1679 /* -------------------------------------------------------------------- */
1683 /* Extract the 8 corners from a Projection Matrix.
1684  * Although less accurate, this solution can be simplified as follows:
1685  * BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const
1686  * float[3]){1.0f, 1.0f, 1.0f}); for (int i = 0; i < 8; i++) {mul_project_m4_v3(projinv,
1687  * bbox.vec[i]);}
1688  */
1689 static void draw_frustum_boundbox_calc(const float (*viewinv)[4],
1690  const float (*projmat)[4],
1691  BoundBox *r_bbox)
1692 {
1693  float left, right, bottom, top, near, far;
1694  bool is_persp = projmat[3][3] == 0.0f;
1695 
1696 #if 0 /* Equivalent to this but it has accuracy problems. */
1698  &bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
1699  for (int i = 0; i < 8; i++) {
1700  mul_project_m4_v3(projinv, bbox.vec[i]);
1701  }
1702 #endif
1703 
1704  projmat_dimensions(projmat, &left, &right, &bottom, &top, &near, &far);
1705 
1706  r_bbox->vec[0][2] = r_bbox->vec[3][2] = r_bbox->vec[7][2] = r_bbox->vec[4][2] = -near;
1707  r_bbox->vec[0][0] = r_bbox->vec[3][0] = left;
1708  r_bbox->vec[4][0] = r_bbox->vec[7][0] = right;
1709  r_bbox->vec[0][1] = r_bbox->vec[4][1] = bottom;
1710  r_bbox->vec[7][1] = r_bbox->vec[3][1] = top;
1711 
1712  /* Get the coordinates of the far plane. */
1713  if (is_persp) {
1714  float sca_far = far / near;
1715  left *= sca_far;
1716  right *= sca_far;
1717  bottom *= sca_far;
1718  top *= sca_far;
1719  }
1720 
1721  r_bbox->vec[1][2] = r_bbox->vec[2][2] = r_bbox->vec[6][2] = r_bbox->vec[5][2] = -far;
1722  r_bbox->vec[1][0] = r_bbox->vec[2][0] = left;
1723  r_bbox->vec[6][0] = r_bbox->vec[5][0] = right;
1724  r_bbox->vec[1][1] = r_bbox->vec[5][1] = bottom;
1725  r_bbox->vec[2][1] = r_bbox->vec[6][1] = top;
1726 
1727  /* Transform into world space. */
1728  for (int i = 0; i < 8; i++) {
1729  mul_m4_v3(viewinv, r_bbox->vec[i]);
1730  }
1731 }
1732 
1733 static void draw_frustum_culling_planes_calc(const float (*persmat)[4], float (*frustum_planes)[4])
1734 {
1735  planes_from_projmat(persmat,
1736  frustum_planes[0],
1737  frustum_planes[5],
1738  frustum_planes[1],
1739  frustum_planes[3],
1740  frustum_planes[4],
1741  frustum_planes[2]);
1742 
1743  /* Normalize. */
1744  for (int p = 0; p < 6; p++) {
1745  frustum_planes[p][3] /= normalize_v3(frustum_planes[p]);
1746  }
1747 }
1748 
1750  const float (*viewinv)[4],
1751  const float (*projmat)[4],
1752  const float (*projinv)[4],
1753  BoundSphere *bsphere)
1754 {
1755  /* Extract Bounding Sphere */
1756  if (projmat[3][3] != 0.0f) {
1757  /* Orthographic */
1758  /* The most extreme points on the near and far plane. (normalized device coords). */
1759  const float *nearpoint = bbox->vec[0];
1760  const float *farpoint = bbox->vec[6];
1761 
1762  /* just use median point */
1763  mid_v3_v3v3(bsphere->center, farpoint, nearpoint);
1764  bsphere->radius = len_v3v3(bsphere->center, farpoint);
1765  }
1766  else if (projmat[2][0] == 0.0f && projmat[2][1] == 0.0f) {
1767  /* Perspective with symmetrical frustum. */
1768 
1769  /* We obtain the center and radius of the circumscribed circle of the
1770  * isosceles trapezoid composed by the diagonals of the near and far clipping plane */
1771 
1772  /* center of each clipping plane */
1773  float mid_min[3], mid_max[3];
1774  mid_v3_v3v3(mid_min, bbox->vec[3], bbox->vec[4]);
1775  mid_v3_v3v3(mid_max, bbox->vec[2], bbox->vec[5]);
1776 
1777  /* square length of the diagonals of each clipping plane */
1778  float a_sq = len_squared_v3v3(bbox->vec[3], bbox->vec[4]);
1779  float b_sq = len_squared_v3v3(bbox->vec[2], bbox->vec[5]);
1780 
1781  /* distance squared between clipping planes */
1782  float h_sq = len_squared_v3v3(mid_min, mid_max);
1783 
1784  float fac = (4 * h_sq + b_sq - a_sq) / (8 * h_sq);
1785 
1786  /* The goal is to get the smallest sphere,
1787  * not the sphere that passes through each corner */
1788  CLAMP(fac, 0.0f, 1.0f);
1789 
1790  interp_v3_v3v3(bsphere->center, mid_min, mid_max, fac);
1791 
1792  /* distance from the center to one of the points of the far plane (1, 2, 5, 6) */
1793  bsphere->radius = len_v3v3(bsphere->center, bbox->vec[1]);
1794  }
1795  else {
1796  /* Perspective with asymmetrical frustum. */
1797 
1798  /* We put the sphere center on the line that goes from origin
1799  * to the center of the far clipping plane. */
1800 
1801  /* Detect which of the corner of the far clipping plane is the farthest to the origin */
1802  float nfar[4]; /* most extreme far point in NDC space */
1803  float farxy[2]; /* far-point projection onto the near plane */
1804  float farpoint[3] = {0.0f}; /* most extreme far point in camera coordinate */
1805  float nearpoint[3]; /* most extreme near point in camera coordinate */
1806  float farcenter[3] = {0.0f}; /* center of far clipping plane in camera coordinate */
1807  float F = -1.0f, N; /* square distance of far and near point to origin */
1808  float f, n; /* distance of far and near point to z axis. f is always > 0 but n can be < 0 */
1809  float e, s; /* far and near clipping distance (<0) */
1810  float c; /* slope of center line = distance of far clipping center
1811  * to z axis / far clipping distance. */
1812  float z; /* projection of sphere center on z axis (<0) */
1813 
1814  /* Find farthest corner and center of far clip plane. */
1815  float corner[3] = {1.0f, 1.0f, 1.0f}; /* in clip space */
1816  for (int i = 0; i < 4; i++) {
1817  float point[3];
1818  mul_v3_project_m4_v3(point, projinv, corner);
1819  float len = len_squared_v3(point);
1820  if (len > F) {
1821  copy_v3_v3(nfar, corner);
1822  copy_v3_v3(farpoint, point);
1823  F = len;
1824  }
1825  add_v3_v3(farcenter, point);
1826  /* rotate by 90 degree to walk through the 4 points of the far clip plane */
1827  float tmp = corner[0];
1828  corner[0] = -corner[1];
1829  corner[1] = tmp;
1830  }
1831 
1832  /* the far center is the average of the far clipping points */
1833  mul_v3_fl(farcenter, 0.25f);
1834  /* the extreme near point is the opposite point on the near clipping plane */
1835  copy_v3_fl3(nfar, -nfar[0], -nfar[1], -1.0f);
1836  mul_v3_project_m4_v3(nearpoint, projinv, nfar);
1837  /* this is a frustum projection */
1838  N = len_squared_v3(nearpoint);
1839  e = farpoint[2];
1840  s = nearpoint[2];
1841  /* distance to view Z axis */
1842  f = len_v2(farpoint);
1843  /* get corresponding point on the near plane */
1844  mul_v2_v2fl(farxy, farpoint, s / e);
1845  /* this formula preserve the sign of n */
1846  sub_v2_v2(nearpoint, farxy);
1847  n = f * s / e - len_v2(nearpoint);
1848  c = len_v2(farcenter) / e;
1849  /* the big formula, it simplifies to (F-N)/(2(e-s)) for the symmetric case */
1850  z = (F - N) / (2.0f * (e - s + c * (f - n)));
1851 
1852  bsphere->center[0] = farcenter[0] * z / e;
1853  bsphere->center[1] = farcenter[1] * z / e;
1854  bsphere->center[2] = z;
1855 
1856  /* For XR, the view matrix may contain a scale factor. Then, transforming only the center
1857  * into world space after calculating the radius will result in incorrect behavior. */
1858  mul_m4_v3(viewinv, bsphere->center); /* Transform to world space. */
1859  mul_m4_v3(viewinv, farpoint);
1860  bsphere->radius = len_v3v3(bsphere->center, farpoint);
1861  }
1862 }
1863 
1865  const float viewmat[4][4],
1866  const float winmat[4][4])
1867 {
1868  copy_m4_m4(storage->viewmat, viewmat);
1869  invert_m4_m4(storage->viewinv, storage->viewmat);
1870 
1871  copy_m4_m4(storage->winmat, winmat);
1872  invert_m4_m4(storage->wininv, storage->winmat);
1873 
1874  mul_m4_m4m4(storage->persmat, winmat, viewmat);
1875  invert_m4_m4(storage->persinv, storage->persmat);
1876 
1877  const bool is_persp = (winmat[3][3] == 0.0f);
1878 
1879  /* Near clip distance. */
1880  storage->viewvecs[0][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] - 1.0f) :
1881  -(winmat[3][2] + 1.0f) / winmat[2][2];
1882 
1883  /* Far clip distance. */
1884  storage->viewvecs[1][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] + 1.0f) :
1885  -(winmat[3][2] - 1.0f) / winmat[2][2];
1886 
1887  /* view vectors for the corners of the view frustum.
1888  * Can be used to recreate the world space position easily */
1889  float view_vecs[4][3] = {
1890  {-1.0f, -1.0f, -1.0f},
1891  {1.0f, -1.0f, -1.0f},
1892  {-1.0f, 1.0f, -1.0f},
1893  {-1.0f, -1.0f, 1.0f},
1894  };
1895 
1896  /* convert the view vectors to view space */
1897  for (int i = 0; i < 4; i++) {
1898  mul_project_m4_v3(storage->wininv, view_vecs[i]);
1899  /* normalized trick see:
1900  * http://www.derschmale.com/2014/01/26/reconstructing-positions-from-the-depth-buffer */
1901  if (is_persp) {
1902  /* Divide XY by Z. */
1903  mul_v2_fl(view_vecs[i], 1.0f / view_vecs[i][2]);
1904  }
1905  }
1906 
1916  copy_v3_v3(storage->viewvecs[0], view_vecs[0]);
1917 
1918  /* we need to store the differences */
1919  storage->viewvecs[1][0] = view_vecs[1][0] - view_vecs[0][0];
1920  storage->viewvecs[1][1] = view_vecs[2][1] - view_vecs[0][1];
1921  storage->viewvecs[1][2] = view_vecs[3][2] - view_vecs[0][2];
1922 }
1923 
1924 DRWView *DRW_view_create(const float viewmat[4][4],
1925  const float winmat[4][4],
1926  const float (*culling_viewmat)[4],
1927  const float (*culling_winmat)[4],
1928  DRWCallVisibilityFn *visibility_fn)
1929 {
1931 
1933  view->culling_mask = 1u << DST.primary_view_num++;
1934  }
1935  else {
1936  BLI_assert(0);
1937  view->culling_mask = 0u;
1938  }
1939  view->clip_planes_len = 0;
1940  view->visibility_fn = visibility_fn;
1941  view->parent = NULL;
1942 
1943  copy_v4_fl4(view->storage.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f);
1944 
1945  DRW_view_update(view, viewmat, winmat, culling_viewmat, culling_winmat);
1946 
1947  return view;
1948 }
1949 
1951  const float viewmat[4][4],
1952  const float winmat[4][4])
1953 {
1954  /* Search original parent. */
1955  const DRWView *ori_view = parent_view;
1956  while (ori_view->parent != NULL) {
1957  ori_view = ori_view->parent;
1958  }
1959 
1961 
1962  /* Perform copy. */
1963  *view = *ori_view;
1964  view->parent = (DRWView *)ori_view;
1965 
1966  DRW_view_update_sub(view, viewmat, winmat);
1967 
1968  return view;
1969 }
1970 
1971 /* DRWView Update:
1972  * This is meant to be done on existing views when rendering in a loop and there is no
1973  * need to allocate more DRWViews. */
1974 
1975 void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
1976 {
1977  BLI_assert(view->parent != NULL);
1978 
1979  view->is_dirty = true;
1980  view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
1981 
1982  draw_view_matrix_state_update(&view->storage, viewmat, winmat);
1983 }
1984 
1986  const float viewmat[4][4],
1987  const float winmat[4][4],
1988  const float (*culling_viewmat)[4],
1989  const float (*culling_winmat)[4])
1990 {
1991  /* DO NOT UPDATE THE DEFAULT VIEW.
1992  * Create sub-views instead, or a copy. */
1994  BLI_assert(view->parent == NULL);
1995 
1996  view->is_dirty = true;
1997  view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
1998 
1999  draw_view_matrix_state_update(&view->storage, viewmat, winmat);
2000 
2001  /* Prepare frustum culling. */
2002 
2003 #ifdef DRW_DEBUG_CULLING
2004  static float mv[MAX_CULLED_VIEWS][4][4], mw[MAX_CULLED_VIEWS][4][4];
2005 
2006  /* Select view here. */
2007  if (view->culling_mask != 0) {
2008  uint index = bitscan_forward_uint(view->culling_mask);
2009 
2010  if (G.debug_value == 0) {
2011  copy_m4_m4(mv[index], culling_viewmat ? culling_viewmat : viewmat);
2012  copy_m4_m4(mw[index], culling_winmat ? culling_winmat : winmat);
2013  }
2014  else {
2015  culling_winmat = mw[index];
2016  culling_viewmat = mv[index];
2017  }
2018  }
2019 #endif
2020 
2021  float wininv[4][4];
2022  if (culling_winmat) {
2023  winmat = culling_winmat;
2024  invert_m4_m4(wininv, winmat);
2025  }
2026  else {
2027  copy_m4_m4(wininv, view->storage.wininv);
2028  }
2029 
2030  float viewinv[4][4];
2031  if (culling_viewmat) {
2032  viewmat = culling_viewmat;
2033  invert_m4_m4(viewinv, viewmat);
2034  }
2035  else {
2036  copy_m4_m4(viewinv, view->storage.viewinv);
2037  }
2038 
2039  draw_frustum_boundbox_calc(viewinv, winmat, &view->frustum_corners);
2040  draw_frustum_culling_planes_calc(view->storage.persmat, view->frustum_planes);
2042  &view->frustum_corners, viewinv, winmat, wininv, &view->frustum_bsphere);
2043 
2044 #ifdef DRW_DEBUG_CULLING
2045  if (G.debug_value != 0) {
2047  view->frustum_bsphere.center, view->frustum_bsphere.radius, (const float[4]){1, 1, 0, 1});
2048  DRW_debug_bbox(&view->frustum_corners, (const float[4]){1, 1, 0, 1});
2049  }
2050 #endif
2051 }
2052 
2054 {
2055  return DST.view_default;
2056 }
2057 
2058 void DRW_view_reset(void)
2059 {
2060  DST.view_default = NULL;
2061  DST.view_active = NULL;
2063 }
2064 
2066 {
2069 }
2070 
2071 void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len)
2072 {
2073  BLI_assert(plane_len <= MAX_CLIP_PLANES);
2074  view->clip_planes_len = plane_len;
2075  if (plane_len > 0) {
2076  memcpy(view->storage.clip_planes, planes, sizeof(float[4]) * plane_len);
2077  }
2078 }
2079 
2080 void DRW_view_camtexco_set(DRWView *view, float texco[4])
2081 {
2082  copy_v4_v4(view->storage.viewcamtexcofac, texco);
2083 }
2084 
2085 void DRW_view_camtexco_get(const DRWView *view, float r_texco[4])
2086 {
2087  copy_v4_v4(r_texco, view->storage.viewcamtexcofac);
2088 }
2089 
2091 {
2092  memcpy(corners, &view->frustum_corners, sizeof(view->frustum_corners));
2093 }
2094 
2095 void DRW_view_frustum_planes_get(const DRWView *view, float planes[6][4])
2096 {
2097  memcpy(planes, &view->frustum_planes, sizeof(view->frustum_planes));
2098 }
2099 
2101 {
2102  view = (view) ? view : DST.view_default;
2103  return view->storage.winmat[3][3] == 0.0f;
2104 }
2105 
2107 {
2108  view = (view) ? view : DST.view_default;
2109  const float(*projmat)[4] = view->storage.winmat;
2110 
2111  if (DRW_view_is_persp_get(view)) {
2112  return -projmat[3][2] / (projmat[2][2] - 1.0f);
2113  }
2114 
2115  return -(projmat[3][2] + 1.0f) / projmat[2][2];
2116 }
2117 
2119 {
2120  view = (view) ? view : DST.view_default;
2121  const float(*projmat)[4] = view->storage.winmat;
2122 
2123  if (DRW_view_is_persp_get(view)) {
2124  return -projmat[3][2] / (projmat[2][2] + 1.0f);
2125  }
2126 
2127  return -(projmat[3][2] - 1.0f) / projmat[2][2];
2128 }
2129 
2130 void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse)
2131 {
2132  view = (view) ? view : DST.view_default;
2133  const ViewInfos *storage = &view->storage;
2134  copy_m4_m4(mat, (inverse) ? storage->viewinv : storage->viewmat);
2135 }
2136 
2137 void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse)
2138 {
2139  view = (view) ? view : DST.view_default;
2140  const ViewInfos *storage = &view->storage;
2141  copy_m4_m4(mat, (inverse) ? storage->wininv : storage->winmat);
2142 }
2143 
2144 void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse)
2145 {
2146  view = (view) ? view : DST.view_default;
2147  const ViewInfos *storage = &view->storage;
2148  copy_m4_m4(mat, (inverse) ? storage->persinv : storage->persmat);
2149 }
2150 
2153 /* -------------------------------------------------------------------- */
2158 {
2161  if (G.debug & G_DEBUG_GPU) {
2162  BLI_strncpy(pass->name, name, MAX_PASS_NAME);
2163  }
2164 
2165  pass->shgroups.first = NULL;
2166  pass->shgroups.last = NULL;
2167  pass->handle = DST.pass_handle;
2169 
2170  pass->original = NULL;
2171  pass->next = NULL;
2172 
2173  return pass;
2174 }
2175 
2177 {
2178  DRWPass *pass = DRW_pass_create(name, state);
2179  pass->original = original;
2180 
2181  return pass;
2182 }
2183 
2184 void DRW_pass_link(DRWPass *first, DRWPass *second)
2185 {
2186  BLI_assert(first != second);
2187  BLI_assert(first->next == NULL);
2188  first->next = second;
2189 }
2190 
2192 {
2193  if (pass->original) {
2194  return DRW_pass_is_empty(pass->original);
2195  }
2196 
2197  LISTBASE_FOREACH (DRWShadingGroup *, shgroup, &pass->shgroups) {
2198  if (!DRW_shgroup_is_empty(shgroup)) {
2199  return false;
2200  }
2201  }
2202  return true;
2203 }
2204 
2206  void (*callback)(void *userData, DRWShadingGroup *shgrp),
2207  void *userData)
2208 {
2209  LISTBASE_FOREACH (DRWShadingGroup *, shgroup, &pass->shgroups) {
2210  callback(userData, shgroup);
2211  }
2212 }
2213 
2214 static int pass_shgroup_dist_sort(const void *a, const void *b)
2215 {
2216  const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
2217  const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
2218 
2219  if (shgrp_a->z_sorting.distance < shgrp_b->z_sorting.distance) {
2220  return 1;
2221  }
2222  if (shgrp_a->z_sorting.distance > shgrp_b->z_sorting.distance) {
2223  return -1;
2224  }
2225 
2226  /* If distances are the same, keep original order. */
2227  if (shgrp_a->z_sorting.original_index > shgrp_b->z_sorting.original_index) {
2228  return -1;
2229  }
2230 
2231  return 0;
2232 }
2233 
2234 /* ------------------ Shading group sorting --------------------- */
2235 
2236 #define SORT_IMPL_LINKTYPE DRWShadingGroup
2237 
2238 #define SORT_IMPL_FUNC shgroup_sort_fn_r
2239 #include "../../blenlib/intern/list_sort_impl.h"
2240 #undef SORT_IMPL_FUNC
2241 
2242 #undef SORT_IMPL_LINKTYPE
2243 
2245 {
2246  const float(*viewinv)[4] = DST.view_active->storage.viewinv;
2247 
2248  if (!(pass->shgroups.first && pass->shgroups.first->next)) {
2249  /* Nothing to sort */
2250  return;
2251  }
2252 
2253  uint index = 0;
2254  DRWShadingGroup *shgroup = pass->shgroups.first;
2255  do {
2256  DRWResourceHandle handle = 0;
2257  /* Find first DRWCommandDraw. */
2258  DRWCommandChunk *cmd_chunk = shgroup->cmd.first;
2259  for (; cmd_chunk && handle == 0; cmd_chunk = cmd_chunk->next) {
2260  for (int i = 0; i < cmd_chunk->command_used && handle == 0; i++) {
2261  if (DRW_CMD_DRAW == command_type_get(cmd_chunk->command_type, i)) {
2262  handle = cmd_chunk->commands[i].draw.handle;
2263  }
2264  }
2265  }
2266  /* To be sorted a shgroup needs to have at least one draw command. */
2267  /* FIXME(fclem): In some case, we can still have empty shading group to sort. However their
2268  * final order is not well defined.
2269  * (see T76730 & D7729). */
2270  // BLI_assert(handle != 0);
2271 
2273 
2274  /* Compute distance to camera. */
2275  float tmp[3];
2276  sub_v3_v3v3(tmp, viewinv[3], obmats->model[3]);
2277  shgroup->z_sorting.distance = dot_v3v3(viewinv[2], tmp);
2278  shgroup->z_sorting.original_index = index++;
2279 
2280  } while ((shgroup = shgroup->next));
2281 
2282  /* Sort using computed distances. */
2283  pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort);
2284 
2285  /* Find the new last */
2286  DRWShadingGroup *last = pass->shgroups.first;
2287  while ((last = last->next)) {
2288  /* Reset the pass id for debugging. */
2289  last->pass_handle = pass->handle;
2290  }
2291  pass->shgroups.last = last;
2292 }
2293 
2295 {
2296  pass->shgroups.last = pass->shgroups.first;
2297  /* WARNING: Assume that DRWShadingGroup->next is the first member. */
2299 }
2300 
typedef float(TangentPoint)[2]
void BKE_curve_texspace_ensure(struct Curve *cu)
Definition: curve.cc:555
@ G_DEBUG_GPU
Definition: BKE_global.h:193
@ G_FLAG_PICKSEL
Definition: BKE_global.h:149
struct GPUTexture * BKE_image_get_gpu_tiles(struct Image *image, struct ImageUser *iuser, struct ImBuf *ibuf)
Definition: image_gpu.cc:443
struct GPUTexture * BKE_image_get_gpu_texture(struct Image *image, struct ImageUser *iuser, struct ImBuf *ibuf)
Definition: image_gpu.cc:438
struct GPUTexture * BKE_image_get_gpu_tilemap(struct Image *image, struct ImageUser *iuser, struct ImBuf *ibuf)
Definition: image_gpu.cc:448
void BKE_mesh_texspace_get_reference(struct Mesh *me, char **r_texflag, float **r_loc, float **r_size)
Definition: mesh.cc:1287
General operations, lookup, etc. for blender objects.
void BKE_boundbox_init_from_minmax(struct BoundBox *bb, const float min[3], const float max[3])
Definition: object.cc:3645
const struct BoundBox * BKE_object_boundbox_get(struct Object *ob)
Definition: object.cc:3684
struct Paint * BKE_paint_get_active_from_context(const struct bContext *C)
A BVH for high poly meshes.
void BKE_pbvh_set_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes)
Definition: pbvh.c:3195
void BKE_pbvh_get_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes)
Definition: pbvh.c:3203
void BKE_pbvh_draw_debug_cb(PBVH *pbvh, void(*draw_fn)(void *user_data, const float bmin[3], const float bmax[3], PBVHNodeFlags flag), void *user_data)
Definition: pbvh.c:2920
void BKE_pbvh_update_normals(PBVH *pbvh, struct SubdivCCG *subdiv_ccg)
Definition: pbvh.c:2813
void BKE_pbvh_draw_cb(PBVH *pbvh, bool update_only_visible, PBVHFrustumPlanes *update_frustum, PBVHFrustumPlanes *draw_frustum, void(*draw_fn)(void *user_data, struct GPU_PBVH_Buffers *buffers), void *user_data, bool full_render)
PBVHNodeFlags
Definition: BKE_pbvh.h:63
@ PBVH_Leaf
Definition: BKE_pbvh.h:64
Volume data-block.
struct BoundBox * BKE_volume_boundbox_get(struct Object *ob)
Definition: volume.cc:1007
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition: BLI_assert.h:53
#define BLI_INLINE
BLI_INLINE unsigned int BLI_hash_string(const char *str)
Definition: BLI_hash.h:69
BLI_INLINE unsigned int BLI_hash_int_2d(unsigned int kx, unsigned int ky)
Definition: BLI_hash.h:53
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:336
MINLINE unsigned int bitscan_forward_uint(unsigned int a)
void projmat_dimensions(const float winmat[4][4], float *r_left, float *r_right, float *r_bottom, float *r_top, float *r_near, float *r_far)
Definition: math_geom.c:4668
void planes_from_projmat(const float mat[4][4], float left[4], float right[4], float bottom[4], float top[4], float near[4], float far[4])
Definition: math_geom.c:4615
void mul_project_m4_v3(const float M[4][4], float vec[3])
Definition: math_matrix.c:820
void mul_v3_project_m4_v3(float r[3], const float mat[4][4], const float vec[3])
Definition: math_matrix.c:831
void mul_m4_m4m4(float R[4][4], const float A[4][4], const float B[4][4])
Definition: math_matrix.c:259
void mul_m4_v4(const float M[4][4], float r[4])
Definition: math_matrix.c:862
void transpose_m4_m4(float R[4][4], const float M[4][4])
Definition: math_matrix.c:1403
bool invert_m4_m4(float R[4][4], const float A[4][4])
Definition: math_matrix.c:1287
void mul_m4_v3(const float M[4][4], float r[3])
Definition: math_matrix.c:729
void copy_m4_m4(float m1[4][4], const float m2[4][4])
Definition: math_matrix.c:77
bool is_negative_m4(const float mat[4][4])
Definition: math_matrix.c:2509
void mul_v3_m4v3(float r[3], const float M[4][4], const float v[3])
Definition: math_matrix.c:739
MINLINE void copy_v4_v4(float r[4], const float a[4])
MINLINE float len_squared_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float len_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v4_fl4(float v[4], float x, float y, float z, float w)
MINLINE void sub_v2_v2(float r[2], const float a[2])
MINLINE float normalize_v3(float r[3])
MINLINE void mul_v3_v3(float r[3], const float a[3])
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v2_fl(float r[2], float f)
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void copy_v3_fl3(float v[3], float x, float y, float z)
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
void interp_v3_v3v3(float r[3], const float a[3], const float b[3], float t)
Definition: math_vector.c:29
MINLINE void negate_v3(float r[3])
MINLINE void invert_v3(float r[3])
void mid_v3_v3v3(float r[3], const float a[3], const float b[3])
Definition: math_vector.c:237
MINLINE void copy_v3_fl(float r[3], float f)
MINLINE float len_v2(const float a[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void mul_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void mul_v2_v2fl(float r[2], const float a[2], float f)
MINLINE void add_v3_v3(float r[3], const float a[3])
void * BLI_memblock_elem_get(BLI_memblock *mblk, int chunk, int elem) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: BLI_memblock.c:176
void BLI_memblock_iternew(BLI_memblock *mblk, BLI_memblock_iter *iter) ATTR_NONNULL()
Definition: BLI_memblock.c:145
void * BLI_memblock_iterstep(BLI_memblock_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: BLI_memblock.c:157
void * BLI_memblock_alloc(BLI_memblock *mblk) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition: BLI_memblock.c:115
char * BLI_strncpy(char *__restrict dst, const char *__restrict src, size_t maxncpy) ATTR_NONNULL()
Definition: string.c:64
unsigned char uchar
Definition: BLI_sys_types.h:70
unsigned int uint
Definition: BLI_sys_types.h:67
#define ARRAY_SIZE(arr)
#define UNUSED_VARS(...)
#define UNUSED_VARS_NDEBUG(...)
#define UNUSED(x)
#define UNLIKELY(x)
#define ELEM(...)
@ ID_VO
Definition: DNA_ID_enums.h:83
@ ID_CU_LEGACY
Definition: DNA_ID_enums.h:49
@ ID_ME
Definition: DNA_ID_enums.h:48
@ ID_MB
Definition: DNA_ID_enums.h:50
@ BASE_FROM_DUPLI
@ BASE_FROM_SET
@ BASE_SELECTED
@ OB_NEG_SCALE
@ PAINT_SCULPT_DELAY_UPDATES
@ PAINT_FAST_NAVIGATE
#define RV3D_PAINTING
#define RV3D_NAVIGATING
@ DRW_ATTR_INT
Definition: DRW_render.h:369
bool() DRWCallVisibilityFn(bool vis_in, void *user_data)
Definition: DRW_render.h:402
#define DRW_shgroup_call_no_cull(shgroup, geom, ob)
Definition: DRW_render.h:431
#define DRW_DEBUG_FILE_LINE_ARGS
Definition: DRW_render.h:62
DRWState
Definition: DRW_render.h:298
@ DRW_STATE_PROGRAM_POINT_SIZE
Definition: DRW_render.h:345
#define DRW_shgroup_uniform_block(shgroup, name, ubo)
Definition: DRW_render.h:651
static AppView * view
GPUBatch
Definition: GPU_batch.h:78
short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1439
struct GPUBatch * GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
Definition: gpu_buffers.c:1425
bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1434
bool GPU_compute_shader_support(void)
eGPUFrameBufferBits
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble z
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble GLdouble r _GL_VOID_RET _GL_VOID GLfloat GLfloat r _GL_VOID_RET _GL_VOID GLint GLint r _GL_VOID_RET _GL_VOID GLshort GLshort r _GL_VOID_RET _GL_VOID GLdouble GLdouble r
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble right
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint * textures
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble top
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble bottom
GPUUniformAttrList * GPU_material_uniform_attributes(GPUMaterial *material)
Definition: gpu_material.c:226
ListBase GPU_material_textures(GPUMaterial *material)
Definition: gpu_material.c:221
struct GPUPass * GPU_material_get_pass(GPUMaterial *material)
Definition: gpu_material.c:186
struct GPUUniformBuf * GPU_material_uniform_buffer_get(GPUMaterial *material)
Definition: gpu_material.c:201
GPUPrimType
Definition: GPU_primitive.h:18
@ GPU_PRIM_TRI_FAN
Definition: GPU_primitive.h:25
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:20
@ GPU_PRIM_POINTS
Definition: GPU_primitive.h:19
int GPU_shader_get_uniform(GPUShader *shader, const char *name)
Definition: gpu_shader.cc:559
struct GPUShader GPUShader
Definition: GPU_shader.h:20
int GPU_shader_get_builtin_block(GPUShader *shader, int builtin)
Definition: gpu_shader.cc:572
int GPU_shader_get_uniform_block_binding(GPUShader *shader, const char *name)
Definition: gpu_shader.cc:592
@ GPU_UNIFORM_VIEWPROJECTION_INV
Definition: GPU_shader.h:123
@ GPU_UNIFORM_PROJECTION
Definition: GPU_shader.h:115
@ GPU_UNIFORM_RESOURCE_ID
Definition: GPU_shader.h:132
@ GPU_UNIFORM_VIEWPROJECTION
Definition: GPU_shader.h:116
@ GPU_UNIFORM_VIEW
Definition: GPU_shader.h:113
@ GPU_UNIFORM_MODEL
Definition: GPU_shader.h:112
@ GPU_UNIFORM_MODELVIEW
Definition: GPU_shader.h:114
@ GPU_UNIFORM_BASE_INSTANCE
Definition: GPU_shader.h:130
@ GPU_UNIFORM_VIEW_INV
Definition: GPU_shader.h:120
@ GPU_UNIFORM_MODEL_INV
Definition: GPU_shader.h:119
@ GPU_UNIFORM_PROJECTION_INV
Definition: GPU_shader.h:122
@ GPU_UNIFORM_CLIPPLANES
Definition: GPU_shader.h:127
@ GPU_UNIFORM_MODELVIEW_INV
Definition: GPU_shader.h:121
@ GPU_UNIFORM_NORMAL
Definition: GPU_shader.h:125
@ GPU_UNIFORM_RESOURCE_CHUNK
Definition: GPU_shader.h:131
@ GPU_UNIFORM_MVP
Definition: GPU_shader.h:117
int GPU_shader_get_texture_binding(GPUShader *shader, const char *name)
Definition: gpu_shader.cc:599
int GPU_shader_get_builtin_uniform(GPUShader *shader, int builtin)
Definition: gpu_shader.cc:566
int GPU_shader_get_ssbo(GPUShader *shader, const char *name)
Definition: gpu_shader.cc:578
@ GPU_UNIFORM_BLOCK_DRW_VIEW
Definition: GPU_shader.h:144
@ GPU_UNIFORM_BLOCK_DRW_MODEL
Definition: GPU_shader.h:145
@ GPU_UNIFORM_BLOCK_MODEL
Definition: GPU_shader.h:141
@ GPU_UNIFORM_BLOCK_VIEW
Definition: GPU_shader.h:140
@ GPU_UNIFORM_BLOCK_DRW_INFOS
Definition: GPU_shader.h:146
@ GPU_UNIFORM_BLOCK_INFO
Definition: GPU_shader.h:142
eGPUBarrier
Definition: GPU_state.h:24
struct GPUStorageBuf GPUStorageBuf
eGPUSamplerState
Definition: GPU_texture.h:25
struct GPUTexture GPUTexture
Definition: GPU_texture.h:17
void GPU_texture_ref(GPUTexture *tex)
Definition: gpu_texture.cc:578
static const int GPU_SAMPLER_MAX
Definition: GPU_texture.h:52
struct GPUUniformBuf GPUUniformBuf
#define GPU_UBO_BLOCK_NAME
#define GPU_uniformbuf_create(size)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_free(GPUUniformBuf *ubo)
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME
uint GPU_vertbuf_get_vertex_alloc(const GPUVertBuf *verts)
const GPUVertFormat * GPU_vertbuf_get_format(const GPUVertBuf *verts)
void GPU_vertbuf_vert_set(GPUVertBuf *verts, uint v_idx, const void *data)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data)
void GPU_vertbuf_data_resize(GPUVertBuf *, uint v_len)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
@ GPU_COMP_I32
#define MEM_recallocN(vmemh, len)
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Bright Control the brightness and contrast of the input color Vector Map an input vectors to used to fine tune the interpolation of the input Camera Retrieve information about the camera and how it relates to the current shading point s position CLAMP
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Sky Generate a procedural sky texture Noise Generate fractal Perlin noise Wave Generate procedural bands or rings with noise Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a point
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Bright Control the brightness and contrast of the input color Vector Map an input vectors to used to fine tune the interpolation of the input Camera Retrieve information about the camera and how it relates to the current shading point s position Clamp a value between a minimum and a maximum Vector Perform vector math operation Invert a producing a negative Combine Generate a color from its and blue channels(Deprecated)") DefNode(ShaderNode
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
btMatrix3x3 inverse() const
Return the inverse of the matrix.
Definition: btTransform.h:182
FILE * file
Material material
void * user_data
DEGForeachIDComponentCallback callback
SyclQueue void void * src
GPUBatch * drw_cache_procedural_points_get(void)
Definition: draw_cache.c:169
GPUBatch * drw_cache_procedural_triangles_get(void)
Definition: draw_cache.c:197
GPUBatch * drw_cache_procedural_lines_get(void)
Definition: draw_cache.c:183
struct DRW_Global G_draw
Definition: draw_common.c:32
void DRW_debug_modelmat(const float modelmat[4][4])
Definition: draw_debug.c:34
void DRW_debug_sphere(const float center[3], const float radius, const float color[4])
Definition: draw_debug.c:111
void DRW_debug_bbox(const BoundBox *bbox, const float color[4])
Definition: draw_debug.c:74
void DRW_uniform_attrs_pool_flush_all(GHash *table)
GPUVertBuf * DRW_temp_buffer_request(DRWInstanceDataList *idatalist, GPUVertFormat *format, int *vert_len)
GPUBatch * DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist, GPUVertBuf *buf, GPUBatch *instancer, GPUBatch *geom)
void drw_uniform_attrs_pool_update(GHash *table, GPUUniformAttrList *key, DRWResourceHandle *handle, Object *ob, Object *dupli_parent, DupliObject *dupli_source)
GPUBatch * DRW_temp_batch_request(DRWInstanceDataList *idatalist, GPUVertBuf *buf, GPUPrimType prim_type)
#define DRW_BUFFER_VERTS_CHUNK
struct Object * DRW_object_get_dupli_parent(const Object *UNUSED(ob))
Definition: draw_manager.c:264
int len
Definition: draw_manager.c:108
DRWManager DST
Definition: draw_manager.c:104
const DRWContextState * DRW_context_state_get(void)
#define DRW_MAX_DRAW_CMD_TYPE
Definition: draw_manager.h:206
DRWUniformType
Definition: draw_manager.h:308
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF
Definition: draw_manager.h:323
@ DRW_UNIFORM_BLOCK_OBINFOS
Definition: draw_manager.h:328
@ DRW_UNIFORM_TFEEDBACK_TARGET
Definition: draw_manager.h:321
@ DRW_UNIFORM_TEXTURE_REF
Definition: draw_manager.h:314
@ DRW_UNIFORM_MODEL_MATRIX
Definition: draw_manager.h:334
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE
Definition: draw_manager.h:324
@ DRW_UNIFORM_FLOAT_COPY
Definition: draw_manager.h:312
@ DRW_UNIFORM_MODEL_MATRIX_INVERSE
Definition: draw_manager.h:335
@ DRW_UNIFORM_FLOAT
Definition: draw_manager.h:311
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF
Definition: draw_manager.h:325
@ DRW_UNIFORM_BASE_INSTANCE
Definition: draw_manager.h:333
@ DRW_UNIFORM_BLOCK_OBMATS
Definition: draw_manager.h:327
@ DRW_UNIFORM_IMAGE_REF
Definition: draw_manager.h:316
@ DRW_UNIFORM_RESOURCE_ID
Definition: draw_manager.h:331
@ DRW_UNIFORM_BLOCK
Definition: draw_manager.h:317
@ DRW_UNIFORM_TEXTURE
Definition: draw_manager.h:313
@ DRW_UNIFORM_STORAGE_BLOCK_REF
Definition: draw_manager.h:320
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE
Definition: draw_manager.h:322
@ DRW_UNIFORM_RESOURCE_CHUNK
Definition: draw_manager.h:330
@ DRW_UNIFORM_IMAGE
Definition: draw_manager.h:315
@ DRW_UNIFORM_BLOCK_OBATTRS
Definition: draw_manager.h:329
@ DRW_UNIFORM_STORAGE_BLOCK
Definition: draw_manager.h:319
@ DRW_UNIFORM_INT
Definition: draw_manager.h:309
@ DRW_UNIFORM_BLOCK_REF
Definition: draw_manager.h:318
@ DRW_UNIFORM_INT_COPY
Definition: draw_manager.h:310
BLI_INLINE uint32_t DRW_handle_chunk_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:139
#define MAX_CLIP_PLANES
Definition: draw_manager.h:555
#define DRW_RESOURCE_CHUNK_LEN
Definition: draw_manager.h:116
BLI_INLINE void * DRW_memblock_elem_from_handle(struct BLI_memblock *memblock, const DRWResourceHandle *handle)
Definition: draw_manager.h:159
eDRWCommandType
Definition: draw_manager.h:184
@ DRW_CMD_COMPUTE_INDIRECT
Definition: draw_manager.h:195
@ DRW_CMD_COMPUTE
Definition: draw_manager.h:193
@ DRW_CMD_COMPUTE_REF
Definition: draw_manager.h:194
@ DRW_CMD_DRAW
Definition: draw_manager.h:186
@ DRW_CMD_DRWSTATE
Definition: draw_manager.h:200
@ DRW_CMD_DRAW_RANGE
Definition: draw_manager.h:187
@ DRW_CMD_CLEAR
Definition: draw_manager.h:199
@ DRW_CMD_BARRIER
Definition: draw_manager.h:198
@ DRW_CMD_STENCIL
Definition: draw_manager.h:201
@ DRW_CMD_DRAW_INSTANCE_RANGE
Definition: draw_manager.h:189
@ DRW_CMD_DRAW_PROCEDURAL
Definition: draw_manager.h:190
@ DRW_CMD_SELECTID
Definition: draw_manager.h:202
@ DRW_CMD_DRAW_INSTANCE
Definition: draw_manager.h:188
#define MAX_CULLED_VIEWS
Definition: draw_manager.h:430
BLI_INLINE void DRW_handle_increment(DRWResourceHandle *handle)
Definition: draw_manager.h:149
BLI_INLINE uint32_t DRW_handle_id_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:144
uint32_t DRWResourceHandle
Definition: draw_manager.h:132
#define MAX_PASS_NAME
Definition: draw_manager.h:409
BLI_INLINE void DRW_handle_negative_scale_enable(DRWResourceHandle *handle)
Definition: draw_manager.h:154
static void drw_command_compute(DRWShadingGroup *shgroup, int groups_x_len, int groups_y_len, int groups_z_len)
void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUVertBuf **vertex_buffer)
void DRW_view_frustum_corners_get(const DRWView *view, BoundBox *corners)
DRWShadingGroup * DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass)
float DRW_view_near_distance_get(const DRWView *view)
void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup, const char *name, GPUVertBuf *vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
void drw_resource_buffer_finish(DRWData *vmempool)
void DRW_shgroup_call_ex(DRWShadingGroup *shgroup, Object *ob, float(*obmat)[4], struct GPUBatch *geom, bool bypass_culling, void *user_data)
static void drw_call_calc_orco(Object *ob, float(*r_orcofacs)[4])
void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse)
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc, DRWUniformType type, const void *value, eGPUSamplerState sampler_state, int length, int arraysize)
static void drw_command_draw_range(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
static void drw_command_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUVertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
void DRW_view_frustum_planes_get(const DRWView *view, float planes[6][4])
void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type)
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
void DRW_shgroup_uniform_ivec3_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
void DRW_shgroup_call_instance_range(DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_num)
static int pass_shgroup_dist_sort(const void *a, const void *b)
static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup, float(*obmat)[4], Object *ob)
void DRW_view_camtexco_get(const DRWView *view, float r_texco[4])
void DRW_shgroup_call_procedural_lines(DRWShadingGroup *shgroup, Object *ob, uint line_count)
struct DRWSculptCallbackData DRWSculptCallbackData
const DRWView * DRW_view_default_get(void)
void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint count)
void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data)
void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup, uint write_mask, uint reference, uint compare_mask)
void DRW_shgroup_call_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len)
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
void DRW_shgroup_uniform_ivec2_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
void DRW_pass_link(DRWPass *first, DRWPass *second)
void DRW_shgroup_uniform_vec3_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
bool DRW_view_is_persp_get(const DRWView *view)
DRWCallBuffer * DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup, struct GPUVertFormat *format, GPUBatch *geom)
DRWPass * DRW_pass_create_instance(const char *name, DRWPass *original, DRWState state)
void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, struct GPUBatch *inst_attributes)
static void draw_view_matrix_state_update(ViewInfos *storage, const float viewmat[4][4], const float winmat[4][4])
void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex, eGPUSamplerState sampler_state)
static void drw_call_culling_init(DRWCullingState *cull, Object *ob)
void DRW_shgroup_call_compute(DRWShadingGroup *shgroup, int groups_x_len, int groups_y_len, int groups_z_len)
static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers)
void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, Object *ob, uint point_count)
static void drw_command_set_mutable_state(DRWShadingGroup *shgroup, DRWState enable, DRWState disable)
void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, Object *ob, uint tri_count)
void DRW_shgroup_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
static void drw_command_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse)
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
static void drw_shgroup_material_texture(DRWShadingGroup *grp, GPUTexture *gputex, const char *name, eGPUSamplerState state)
void DRW_pass_sort_shgroup_z(DRWPass *pass)
bool DRW_pass_is_empty(DRWPass *pass)
#define SCULPT_DEBUG_BUFFERS
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float(*value)[3])
DRWView * DRW_view_create(const float viewmat[4][4], const float winmat[4][4], const float(*culling_viewmat)[4], const float(*culling_winmat)[4], DRWCallVisibilityFn *visibility_fn)
void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask)
void DRW_shgroup_call_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup, const char *name, GPUVertBuf *vertex_buffer)
static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex, eGPUSamplerState sampler_state)
void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup, const char *name, const GPUStorageBuf *ssbo DRW_DEBUG_FILE_LINE_ARGS)
GPUVertFormat * DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[], int arraysize)
DRWView * DRW_view_create_sub(const DRWView *parent_view, const float viewmat[4][4], const float winmat[4][4])
void DRW_view_update(DRWView *view, const float viewmat[4][4], const float winmat[4][4], const float(*culling_viewmat)[4], const float(*culling_winmat)[4])
DRWShadingGroup * DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
BLI_INLINE void drw_call_matrix_init(DRWObjectMatrix *ob_mats, Object *ob, float(*obmat)[4])
static float sculpt_debug_colors[9][4]
void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
static void drw_command_set_stencil_mask(DRWShadingGroup *shgroup, uint write_mask, uint reference, uint compare_mask)
DRWCallBuffer * DRW_shgroup_call_buffer(DRWShadingGroup *shgroup, struct GPUVertFormat *format, GPUPrimType prim_type)
static void drw_command_draw_instance(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint count, bool use_attr)
static void * drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type)
static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name, DRWUniformType type, const void *value, int length, int arraysize)
DRWShadingGroup * DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target)
void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
float DRW_view_far_distance_get(const DRWView *view)
void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial *material)
void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUStorageBuf **ssbo DRW_DEBUG_FILE_LINE_ARGS)
uint32_t DRW_object_resource_id_get(Object *UNUSED(ob))
void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS)
void DRW_view_clip_planes_set(DRWView *view, float(*planes)[4], int plane_len)
static void drw_command_draw_procedural(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint vert_count)
void DRW_pass_foreach_shgroup(DRWPass *pass, void(*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
static GPUVertFormat inst_select_format
static void drw_command_draw(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle)
DRWShadingGroup * DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
void DRW_shgroup_uniform_vec4_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
static void drw_command_set_select_id(DRWShadingGroup *shgroup, GPUVertBuf *buf, uint select_id)
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
DRWPass * DRW_pass_create(const char *name, DRWState state)
static DRWShadingGroup * drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
void DRW_shgroup_clear_framebuffer(DRWShadingGroup *shgroup, eGPUFrameBufferBits channels, uchar r, uchar g, uchar b, uchar a, float depth, uchar stencil)
void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
void DRW_shgroup_uniform_mat4_copy(DRWShadingGroup *shgroup, const char *name, const float(*value)[4])
static DRWShadingGroup * drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float(*value)[4])
void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
static void draw_frustum_culling_planes_calc(const float(*persmat)[4], float(*frustum_planes)[4])
void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups, int num_shgroups, Object *ob)
static void draw_frustum_boundbox_calc(const float(*viewinv)[4], const float(*projmat)[4], BoundBox *r_bbox)
void DRW_shgroup_uniform_vec2_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
static void sculpt_debug_cb(void *user_data, const float bmin[3], const float bmax[3], PBVHNodeFlags flag)
static void draw_frustum_bound_sphere_calc(const BoundBox *bbox, const float(*viewinv)[4], const float(*projmat)[4], const float(*projinv)[4], BoundSphere *bsphere)
static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, uint vert_count)
void DRW_view_default_set(const DRWView *view)
void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS)
static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
void DRW_pass_sort_shgroup_reverse(DRWPass *pass)
void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
static DRWResourceHandle drw_resource_handle_new(float(*obmat)[4], Object *ob)
void DRW_shgroup_call_range(DRWShadingGroup *shgroup, struct Object *ob, GPUBatch *geom, uint v_sta, uint v_num)
void DRW_shgroup_uniform_ivec4_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
void DRW_view_camtexco_set(DRWView *view, float texco[4])
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
static void drw_command_draw_intance_range(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
#define KEY(a)
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
static void drw_sculpt_get_frustum_planes(Object *ob, float planes[6][4])
void DRW_view_reset(void)
static void drw_command_clear(DRWShadingGroup *shgroup, eGPUFrameBufferBits channels, uchar r, uchar g, uchar b, uchar a, float depth, uchar stencil)
#define SCULPT_DEBUG_COLOR(id)
void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse)
IMETHOD void random(Vector &a)
addDelta operator for displacement rotational velocity.
Definition: frames.inl:1282
struct @653::@655 batch
GPUShader * GPU_pass_shader_get(GPUPass *pass)
Definition: gpu_codegen.cc:733
int count
#define GS(x)
Definition: iris.c:225
const int state
ccl_gpu_kernel_postfix ccl_global float int int int int sh
format
Definition: logImageCore.h:38
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_mallocN_aligned)(size_t len, size_t alignment, const char *str)
Definition: mallocn.c:35
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
ccl_device_inline float4 mask(const int4 &mask, const float4 &a)
Definition: math_float4.h:513
static int left
#define N
#define F
#define G(x, y, z)
static unsigned c
Definition: RandGen.cpp:83
static unsigned a[3]
Definition: RandGen.cpp:78
T length(const vec_base< T, Size > &a)
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
static const pxr::TfToken g("g", pxr::TfToken::Immortal)
#define min(a, b)
Definition: sort.c:35
unsigned int uint32_t
Definition: stdint.h:80
unsigned __int64 uint64_t
Definition: stdint.h:90
float vec[8][3]
float center[3]
Definition: DRW_render.h:86
float radius
Definition: DRW_render.h:86
float loc[3]
float size[3]
GPUVertBuf * buf_select
Definition: draw_manager.h:302
GPUVertBuf * buf
Definition: draw_manager.h:301
eGPUBarrier type
Definition: draw_manager.h:250
struct DRWCommandChunk * next
Definition: draw_manager.h:471
uint32_t command_len
Definition: draw_manager.h:472
uint32_t command_used
Definition: draw_manager.h:473
DRWCommand commands[96]
Definition: draw_manager.h:477
uint64_t command_type[6]
Definition: draw_manager.h:475
eGPUFrameBufferBits clear_channels
Definition: draw_manager.h:277
GPUStorageBuf * indirect_buf
Definition: draw_manager.h:246
DRWResourceHandle handle
Definition: draw_manager.h:230
DRWResourceHandle handle
Definition: draw_manager.h:223
DRWResourceHandle handle
Definition: draw_manager.h:255
DRWResourceHandle handle
Definition: draw_manager.h:216
GPUBatch * batch
Definition: draw_manager.h:209
DRWResourceHandle handle
Definition: draw_manager.h:210
GPUVertBuf * select_buf
Definition: draw_manager.h:272
DRWCommand commands[6]
Definition: draw_manager.h:488
uint64_t command_type[6]
Definition: draw_manager.h:487
struct Object * obact
Definition: DRW_render.h:983
const struct bContext * evil_C
Definition: DRW_render.h:997
struct RegionView3D * rv3d
Definition: DRW_render.h:975
BoundSphere bsphere
Definition: draw_manager.h:107
struct GHash * obattrs_ubo_pool
Definition: draw_manager.h:530
struct BLI_memblock * images
Definition: draw_manager.h:527
struct BLI_memblock * commands_small
Definition: draw_manager.h:518
uint ubo_len
Definition: draw_manager.h:531
struct BLI_memblock * obinfos
Definition: draw_manager.h:521
struct BLI_memblock * commands
Definition: draw_manager.h:517
struct BLI_memblock * shgroups
Definition: draw_manager.h:523
struct BLI_memblock * passes
Definition: draw_manager.h:526
struct BLI_memblock * uniforms
Definition: draw_manager.h:524
struct GPUUniformBuf ** obinfos_ubo
Definition: draw_manager.h:529
struct BLI_memblock * obmats
Definition: draw_manager.h:520
struct GPUUniformBuf ** matrices_ubo
Definition: draw_manager.h:528
DRWInstanceDataList * idatalist
Definition: draw_manager.h:515
struct BLI_memblock * views
Definition: draw_manager.h:525
struct BLI_memblock * callbuffers
Definition: draw_manager.h:519
struct BLI_memblock * cullstates
Definition: draw_manager.h:522
uint select_id
Definition: draw_manager.h:632
struct DupliObject * dupli_source
Definition: draw_manager.h:575
DRWResourceHandle ob_handle
Definition: draw_manager.h:566
DRWResourceHandle pass_handle
Definition: draw_manager.h:572
DRWView * view_active
Definition: draw_manager.h:624
DRWContextState draw_ctx
Definition: draw_manager.h:616
DRWView * view_previous
Definition: draw_manager.h:625
DRWResourceHandle resource_handle
Definition: draw_manager.h:570
uint primary_view_num
Definition: draw_manager.h:626
struct Object * dupli_parent
Definition: draw_manager.h:577
bool ob_state_obinfo_init
Definition: draw_manager.h:568
DRWData * vmempool
Definition: draw_manager.h:562
DRWView * view_default
Definition: draw_manager.h:623
float orcotexfac[2][4]
Definition: draw_manager.h:173
float ob_color[4]
Definition: draw_manager.h:174
float modelinverse[4][4]
Definition: draw_manager.h:169
float model[4][4]
Definition: draw_manager.h:168
DRWResourceHandle handle
Definition: draw_manager.h:425
DRWState state
Definition: draw_manager.h:426
char name[MAX_PASS_NAME]
Definition: draw_manager.h:427
DRWShadingGroup * last
Definition: draw_manager.h:415
DRWShadingGroup * first
Definition: draw_manager.h:414
struct DRWPass::@312 shgroups
DRWPass * original
Definition: draw_manager.h:421
DRWPass * next
Definition: draw_manager.h:423
DRWShadingGroup ** shading_groups
struct DRWCommandChunk * first
Definition: draw_manager.h:388
struct DRWShadingGroup::@307::@311 z_sorting
struct GPUUniformAttrList * uniform_attrs
Definition: draw_manager.h:398
struct DRWUniformChunk * uniforms
Definition: draw_manager.h:384
struct DRWShadingGroup::@306 cmd
DRWResourceHandle pass_handle
Definition: draw_manager.h:395
DRWShadingGroup * next
Definition: draw_manager.h:381
struct DRWCommandChunk * last
Definition: draw_manager.h:388
GPUShader * shader
Definition: draw_manager.h:383
uint32_t uniform_used
Definition: draw_manager.h:466
DRWUniform uniforms[10]
Definition: draw_manager.h:467
uint32_t uniform_len
Definition: draw_manager.h:465
int ivalue[4]
Definition: draw_manager.h:370
uint8_t arraysize
Definition: draw_manager.h:377
struct GPUUniformAttrList * uniform_attrs
Definition: draw_manager.h:372
GPUTexture ** texture_ref
Definition: draw_manager.h:348
GPUUniformBuf ** block_ref
Definition: draw_manager.h:355
GPUUniformBuf * block
Definition: draw_manager.h:354
uint8_t type
Definition: draw_manager.h:375
uint8_t length
Definition: draw_manager.h:376
GPUTexture * texture
Definition: draw_manager.h:347
eGPUSamplerState sampler_state
Definition: draw_manager.h:350
float fvalue[4]
Definition: draw_manager.h:368
const void * pvalue
Definition: draw_manager.h:343
ViewInfos storage
Definition: draw_manager.h:436
struct DRWView * parent
Definition: draw_manager.h:434
struct GPUUniformBuf * view_ubo
Definition: draw_common.h:134
unsigned int random_id
Definition: BKE_duplilist.h:51
Definition: DNA_ID.h:368
char name[66]
Definition: DNA_ID.h:378
struct SubdivCCG * subdiv_ccg
Mesh_Runtime runtime
float size[3]
float loc[3]
short transflag
short base_flag
float imat[4][4]
float obmat[4][4]
float color[4]
struct SculptSession * sculpt
void * data
float(* planes)[4]
Definition: BKE_pbvh.h:86
struct PBVH * pbvh
Definition: BKE_paint.h:550
struct ImageUser iuser
struct Image * ima
float4x4 persmat
float4x4 viewinv
float4x4 winmat
float4 viewvecs[2]
float4x4 persinv
float4x4 viewmat
float4x4 wininv
float max
DRWCommandDraw draw
Definition: draw_manager.h:284