Blender  V3.3
draw_manager_exec.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2016 Blender Foundation. */
3 
8 #include "draw_manager.h"
9 
10 #include "BLI_alloca.h"
11 #include "BLI_math.h"
12 #include "BLI_math_bits.h"
13 #include "BLI_memblock.h"
14 
15 #include "BKE_global.h"
16 
17 #include "GPU_compute.h"
18 #include "GPU_platform.h"
19 #include "GPU_shader.h"
20 #include "GPU_state.h"
21 
22 #ifdef USE_GPU_SELECT
23 # include "GPU_select.h"
24 #endif
25 
27 {
28 #ifdef USE_GPU_SELECT
30  DST.select_id = id;
31 #endif
32 }
33 
34 #define DEBUG_UBO_BINDING
35 
36 typedef struct DRWCommandsState {
40  int base_inst;
42  bool neg_scale;
43  /* Resource location. */
50  /* Legacy matrix support. */
51  int obmat_loc;
52  int obinv_loc;
53  /* Uniform Attributes. */
55  /* Selection ID state. */
58  /* Drawing State */
62 
63 /* -------------------------------------------------------------------- */
68 {
69  /* Mask locked state. */
71 
72  if (DST.state == state) {
73  return;
74  }
75 
76  eGPUWriteMask write_mask = 0;
77  eGPUBlend blend = 0;
78  eGPUFaceCullTest culling_test = 0;
79  eGPUDepthTest depth_test = 0;
80  eGPUStencilTest stencil_test = 0;
81  eGPUStencilOp stencil_op = 0;
82  eGPUProvokingVertex provoking_vert = 0;
83 
85  write_mask |= GPU_WRITE_DEPTH;
86  }
88  write_mask |= GPU_WRITE_COLOR;
89  }
91  write_mask |= GPU_WRITE_STENCIL;
92  }
93 
96  culling_test = GPU_CULL_BACK;
97  break;
99  culling_test = GPU_CULL_FRONT;
100  break;
101  default:
102  culling_test = GPU_CULL_NONE;
103  break;
104  }
105 
108  depth_test = GPU_DEPTH_LESS;
109  break;
111  depth_test = GPU_DEPTH_LESS_EQUAL;
112  break;
114  depth_test = GPU_DEPTH_EQUAL;
115  break;
117  depth_test = GPU_DEPTH_GREATER;
118  break;
120  depth_test = GPU_DEPTH_GREATER_EQUAL;
121  break;
123  depth_test = GPU_DEPTH_ALWAYS;
124  break;
125  default:
126  depth_test = GPU_DEPTH_NONE;
127  break;
128  }
129 
132  stencil_op = GPU_STENCIL_OP_REPLACE;
134  break;
136  stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_PASS;
138  break;
140  stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_FAIL;
142  break;
143  default:
144  stencil_op = GPU_STENCIL_OP_NONE;
146  break;
147  }
148 
151  stencil_test = GPU_STENCIL_ALWAYS;
152  break;
154  stencil_test = GPU_STENCIL_EQUAL;
155  break;
157  stencil_test = GPU_STENCIL_NEQUAL;
158  break;
159  default:
160  stencil_test = GPU_STENCIL_NONE;
161  break;
162  }
163 
164  switch (state & DRW_STATE_BLEND_ENABLED) {
165  case DRW_STATE_BLEND_ADD:
167  break;
170  break;
173  break;
176  break;
179  break;
180  case DRW_STATE_BLEND_OIT:
182  break;
183  case DRW_STATE_BLEND_MUL:
185  break;
186  case DRW_STATE_BLEND_SUB:
188  break;
191  break;
194  break;
197  break;
198  default:
200  break;
201  }
202 
204  write_mask, blend, culling_test, depth_test, stencil_test, stencil_op, provoking_vert);
205 
207  GPU_shadow_offset(true);
208  }
209  else {
210  GPU_shadow_offset(false);
211  }
212 
213  /* TODO: this should be part of shader state. */
216  }
217  else {
219  }
220 
222  /* XXX `GPU_depth_range` is not a perfect solution
223  * since very distant geometries can still be occluded.
224  * Also the depth test precision of these geometries is impaired.
225  * However, it solves the selection for the vast majority of cases. */
226  GPU_depth_range(0.0f, 0.01f);
227  }
228  else {
229  GPU_depth_range(0.0f, 1.0f);
230  }
231 
234  }
235  else {
236  GPU_program_point_size(false);
237  }
238 
241  }
242  else {
244  }
245 
246  DST.state = state;
247 }
248 
249 static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
250 {
251  /* Reminders:
252  * - (compare_mask & reference) is what is tested against (compare_mask & stencil_value)
253  * stencil_value being the value stored in the stencil buffer.
254  * - (write-mask & reference) is what gets written if the test condition is fulfilled.
255  */
256  GPU_stencil_write_mask_set(write_mask);
257  GPU_stencil_reference_set(reference);
258  GPU_stencil_compare_mask_set(compare_mask);
259 }
260 
262 {
263  DST.state = ~state;
265 }
266 
267 static void drw_state_validate(void)
268 {
269  /* Cannot write to stencil buffer without stencil test. */
272  }
273  /* Cannot write to depth buffer without depth test. */
276  }
277 }
278 
280 {
281  DST.state_lock = state;
282 
283  /* We must get the current state to avoid overriding it. */
284  /* Not complete, but that just what we need for now. */
287  }
290 
291  switch (GPU_depth_test_get()) {
292  case GPU_DEPTH_ALWAYS:
294  break;
295  case GPU_DEPTH_LESS:
297  break;
300  break;
301  case GPU_DEPTH_EQUAL:
303  break;
304  case GPU_DEPTH_GREATER:
306  break;
309  break;
310  default:
311  break;
312  }
313  }
314 }
315 
316 void DRW_state_reset(void)
317 {
319 
323 
324  /* Should stay constant during the whole rendering. */
325  GPU_point_size(5);
326  GPU_line_smooth(false);
327  /* Bypass #U.pixelsize factor by using a factor of 0.0f. Will be clamped to 1.0f. */
328  GPU_line_width(0.0f);
329 }
330 
333 /* -------------------------------------------------------------------- */
337 static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
338 {
340  return (culling->mask & view->culling_mask) != 0;
341 }
342 
344 {
346 }
347 
349 {
350  return DST.view_active;
351 }
352 
353 /* Return True if the given BoundSphere intersect the current view frustum */
354 static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere,
355  const float (*frustum_planes)[4],
356  const BoundSphere *bsphere)
357 {
358  /* Bypass test if radius is negative. */
359  if (bsphere->radius < 0.0f) {
360  return true;
361  }
362 
363  /* Do a rough test first: Sphere VS Sphere intersect. */
364  float center_dist_sq = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
365  float radius_sum = bsphere->radius + frustum_bsphere->radius;
366  if (center_dist_sq > square_f(radius_sum)) {
367  return false;
368  }
369  /* TODO: we could test against the inscribed sphere of the frustum to early out positively. */
370 
371  /* Test against the 6 frustum planes. */
372  /* TODO: order planes with sides first then far then near clip. Should be better culling
373  * heuristic when sculpting. */
374  for (int p = 0; p < 6; p++) {
375  float dist = plane_point_side_v3(frustum_planes[p], bsphere->center);
376  if (dist < -bsphere->radius) {
377  return false;
378  }
379  }
380  return true;
381 }
382 
383 static bool draw_culling_box_test(const float (*frustum_planes)[4], const BoundBox *bbox)
384 {
385  /* 6 view frustum planes */
386  for (int p = 0; p < 6; p++) {
387  /* 8 box vertices. */
388  for (int v = 0; v < 8; v++) {
389  float dist = plane_point_side_v3(frustum_planes[p], bbox->vec[v]);
390  if (dist > 0.0f) {
391  /* At least one point in front of this plane.
392  * Go to next plane. */
393  break;
394  }
395  if (v == 7) {
396  /* 8 points behind this plane. */
397  return false;
398  }
399  }
400  }
401  return true;
402 }
403 
404 static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
405 {
406  /* Test against the 8 frustum corners. */
407  for (int c = 0; c < 8; c++) {
408  float dist = plane_point_side_v3(plane, corners->vec[c]);
409  if (dist < 0.0f) {
410  return true;
411  }
412  }
413  return false;
414 }
415 
416 bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
417 {
418  view = view ? view : DST.view_default;
419  return draw_culling_sphere_test(&view->frustum_bsphere, view->frustum_planes, bsphere);
420 }
421 
422 bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
423 {
424  view = view ? view : DST.view_default;
425  return draw_culling_box_test(view->frustum_planes, bbox);
426 }
427 
428 bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
429 {
430  view = view ? view : DST.view_default;
431  return draw_culling_plane_test(&view->frustum_corners, plane);
432 }
433 
434 bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
435 {
436  view = view ? view : DST.view_default;
437  float tobmat[4][4];
438  transpose_m4_m4(tobmat, obmat);
439  for (int i = 6; i--;) {
440  float frustum_plane_local[4], bb_near[3], bb_far[3];
441  mul_v4_m4v4(frustum_plane_local, tobmat, view->frustum_planes[i]);
442  aabb_get_near_far_from_plane(frustum_plane_local, min, max, bb_near, bb_far);
443 
444  if (plane_point_side_v3(frustum_plane_local, bb_far) < 0.0f) {
445  return false;
446  }
447  }
448 
449  return true;
450 }
451 
453 {
454  view = view ? view : DST.view_default;
455  *corners = view->frustum_corners;
456 }
457 
458 void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
459 {
460  view = view ? view : DST.view_default;
461  memcpy(planes, view->frustum_planes, sizeof(float[6][4]));
462 }
463 
465 {
466  view = view->parent ? view->parent : view;
467 
468  /* TODO(fclem): multi-thread this. */
469  /* TODO(fclem): compute all dirty views at once. */
470  if (!view->is_dirty) {
471  return;
472  }
473 
474  BLI_memblock_iter iter;
476  DRWCullingState *cull;
477  while ((cull = BLI_memblock_iterstep(&iter))) {
478  if (cull->bsphere.radius < 0.0) {
479  cull->mask = 0;
480  }
481  else {
482  bool culled = !draw_culling_sphere_test(
483  &view->frustum_bsphere, view->frustum_planes, &cull->bsphere);
484 
485 #ifdef DRW_DEBUG_CULLING
486  if (G.debug_value != 0) {
487  if (culled) {
489  cull->bsphere.center, cull->bsphere.radius, (const float[4]){1, 0, 0, 1});
490  }
491  else {
493  cull->bsphere.center, cull->bsphere.radius, (const float[4]){0, 1, 0, 1});
494  }
495  }
496 #endif
497 
498  if (view->visibility_fn) {
499  culled = !view->visibility_fn(!culled, cull->user_data);
500  }
501 
502  SET_FLAG_FROM_TEST(cull->mask, culled, view->culling_mask);
503  }
504  }
505 
506  view->is_dirty = false;
507 }
508 
511 /* -------------------------------------------------------------------- */
516  DRWResourceHandle *handle,
517  float obmat_loc,
518  float obinv_loc)
519 {
520  /* Still supported for compatibility with gpu_shader_* but should be forbidden. */
522  if (obmat_loc != -1) {
523  GPU_shader_uniform_vector(shgroup->shader, obmat_loc, 16, 1, (float *)ob_mats->model);
524  }
525  if (obinv_loc != -1) {
526  GPU_shader_uniform_vector(shgroup->shader, obinv_loc, 16, 1, (float *)ob_mats->modelinverse);
527  }
528 }
529 
531 {
532  DST.batch = geom;
533 
534  GPU_batch_set_shader(geom, shgroup->shader);
535 }
536 
538  GPUBatch *geom,
539  int vert_first,
540  int vert_count,
541  int inst_first,
542  int inst_count,
543  int baseinst_loc)
544 {
545  /* inst_count can be -1. */
546  inst_count = max_ii(0, inst_count);
547 
548  if (baseinst_loc != -1) {
549  /* Fallback when ARB_shader_draw_parameters is not supported. */
550  GPU_shader_uniform_vector_int(shgroup->shader, baseinst_loc, 1, 1, (int *)&inst_first);
551  /* Avoids VAO reconfiguration on older hardware. (see GPU_batch_draw_advanced) */
552  inst_first = 0;
553  }
554 
555  /* bind vertex array */
556  if (DST.batch != geom) {
557  draw_geometry_bind(shgroup, geom);
558  }
559 
560  GPU_batch_draw_advanced(geom, vert_first, vert_count, inst_first, inst_count);
561 }
562 
564 {
565  if (state->inst_count == 0) {
566  return;
567  }
568  if (state->baseinst_loc == -1) {
569  /* bind vertex array */
570  if (DST.batch != state->batch) {
572  draw_geometry_bind(shgroup, state->batch);
573  }
574  GPU_draw_list_append(DST.draw_list, state->batch, state->base_inst, state->inst_count);
575  }
576  /* Fallback when unsupported */
577  else {
579  shgroup, state->batch, 0, 0, state->base_inst, state->inst_count, state->baseinst_loc);
580  }
581 }
582 
585  bool *use_tfeedback)
586 {
587 #define MAX_UNIFORM_STACK_SIZE 64
588 
589  /* Uniform array elements stored as separate entries. We need to batch these together */
590  int array_uniform_loc = -1;
591  int array_index = 0;
592  float mat4_stack[4 * 4];
593 
594  /* Loop through uniforms in reverse order. */
595  for (DRWUniformChunk *unichunk = shgroup->uniforms; unichunk; unichunk = unichunk->next) {
596  DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used - 1;
597 
598  for (int i = 0; i < unichunk->uniform_used; i++, uni--) {
599  /* For uniform array copies, copy per-array-element data into local buffer before upload. */
600  if (uni->arraysize > 1 && uni->type == DRW_UNIFORM_FLOAT_COPY) {
601  /* Only written for mat4 copy for now and is not meant to become generalized. */
602  /* TODO(@fclem): Use UBOs/SSBOs instead of inline mat4 copies. */
603  BLI_assert(uni->arraysize == 4 && uni->length == 4);
604  /* Begin copying uniform array. */
605  if (array_uniform_loc == -1) {
606  array_uniform_loc = uni->location;
607  array_index = uni->arraysize * uni->length;
608  }
609  /* Debug check same array loc. */
610  BLI_assert(array_uniform_loc > -1 && array_uniform_loc == uni->location);
611  /* Copy array element data to local buffer. */
612  array_index -= uni->length;
613  memcpy(&mat4_stack[array_index], uni->fvalue, sizeof(float) * uni->length);
614  /* Flush array data to shader. */
615  if (array_index <= 0) {
616  GPU_shader_uniform_vector(shgroup->shader, uni->location, 16, 1, mat4_stack);
617  array_uniform_loc = -1;
618  }
619  continue;
620  }
621 
622  /* Handle standard cases. */
623  switch (uni->type) {
625  BLI_assert(uni->arraysize == 1);
626  if (uni->arraysize == 1) {
628  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->ivalue);
629  }
630  break;
631  case DRW_UNIFORM_INT:
633  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
634  break;
636  BLI_assert(uni->arraysize == 1);
637  if (uni->arraysize == 1) {
639  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->fvalue);
640  }
641  break;
642  case DRW_UNIFORM_FLOAT:
644  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
645  break;
646  case DRW_UNIFORM_TEXTURE:
647  GPU_texture_bind_ex(uni->texture, uni->sampler_state, uni->location, false);
648  break;
650  GPU_texture_bind_ex(*uni->texture_ref, uni->sampler_state, uni->location, false);
651  break;
652  case DRW_UNIFORM_IMAGE:
654  break;
657  break;
658  case DRW_UNIFORM_BLOCK:
659  GPU_uniformbuf_bind(uni->block, uni->location);
660  break;
663  break;
665  GPU_storagebuf_bind(uni->ssbo, uni->location);
666  break;
669  break;
671  state->obmats_loc = uni->location;
673  break;
675  state->obinfos_loc = uni->location;
677  break;
679  state->obattrs_loc = uni->location;
681  uni->uniform_attrs);
682  DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
683  break;
685  state->chunkid_loc = uni->location;
686  GPU_shader_uniform_int(shgroup->shader, uni->location, 0);
687  break;
689  state->resourceid_loc = uni->location;
690  break;
692  BLI_assert(uni->pvalue && (*use_tfeedback == false));
693  *use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
694  ((GPUVertBuf *)uni->pvalue));
695  break;
698  break;
701  break;
704  break;
707  break;
708  /* Legacy/Fallback support. */
710  state->baseinst_loc = uni->location;
711  break;
713  state->obmat_loc = uni->location;
714  break;
716  state->obinv_loc = uni->location;
717  break;
718  }
719  }
720  }
721  /* Ensure uniform arrays copied. */
722  BLI_assert(array_index == 0);
723  BLI_assert(array_uniform_loc == -1);
724  UNUSED_VARS_NDEBUG(array_uniform_loc);
725 }
726 
729  GPUBatch *batch,
730  const DRWResourceHandle *handle)
731 {
732  const bool is_instancing = (batch->inst[0] != NULL);
733  int start = 0;
734  int count = 1;
735  int tot = is_instancing ? GPU_vertbuf_get_vertex_len(batch->inst[0]) :
737  /* HACK: get VBO data without actually drawing. */
738  int *select_id = (void *)GPU_vertbuf_get_data(state->select_buf);
739 
740  /* Batching */
741  if (!is_instancing) {
742  /* FIXME: Meh a bit nasty. */
743  if (batch->prim_type == GPU_PRIM_TRIS) {
744  count = 3;
745  }
746  else if (batch->prim_type == GPU_PRIM_LINES) {
747  count = 2;
748  }
749  }
750 
751  while (start < tot) {
752  GPU_select_load_id(select_id[start]);
753  if (is_instancing) {
754  draw_geometry_execute(shgroup, batch, 0, 0, start, count, state->baseinst_loc);
755  }
756  else {
758  shgroup, batch, start, count, DRW_handle_id_get(handle), 0, state->baseinst_loc);
759  }
760  start += count;
761  }
762 }
763 
764 typedef struct DRWCommandIterator {
768 
770 {
771  iter->curr_chunk = shgroup->cmd.first;
772  iter->cmd_index = 0;
773 }
774 
776 {
777  if (iter->curr_chunk) {
778  if (iter->cmd_index == iter->curr_chunk->command_len) {
779  iter->curr_chunk = iter->curr_chunk->next;
780  iter->cmd_index = 0;
781  }
782  if (iter->curr_chunk) {
783  *cmd_type = command_type_get(iter->curr_chunk->command_type, iter->cmd_index);
784  if (iter->cmd_index < iter->curr_chunk->command_used) {
785  return iter->curr_chunk->commands + iter->cmd_index++;
786  }
787  }
788  }
789  return NULL;
790 }
791 
793 {
794  /* Front face is not a resource but it is inside the resource handle. */
795  bool neg_scale = DRW_handle_negative_scale_get(handle);
796  if (neg_scale != state->neg_scale) {
797  state->neg_scale = neg_scale;
799  }
800 
801  int chunk = DRW_handle_chunk_get(handle);
802  if (state->resource_chunk != chunk) {
803  if (state->chunkid_loc != -1) {
804  GPU_shader_uniform_int(DST.shader, state->chunkid_loc, chunk);
805  }
806  if (state->obmats_loc != -1) {
808  GPU_uniformbuf_bind(DST.vmempool->matrices_ubo[chunk], state->obmats_loc);
809  }
810  if (state->obinfos_loc != -1) {
812  GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[chunk], state->obinfos_loc);
813  }
814  if (state->obattrs_loc != -1) {
815  DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
816  DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, chunk, state->obattrs_loc);
817  }
818  state->resource_chunk = chunk;
819  }
820 
821  if (state->resourceid_loc != -1) {
822  int id = DRW_handle_id_get(handle);
823  if (state->resource_id != id) {
824  GPU_shader_uniform_int(DST.shader, state->resourceid_loc, id);
825  state->resource_id = id;
826  }
827  }
828 }
829 
831 {
832  draw_indirect_call(shgroup, state);
834 
835  state->batch = NULL;
836  state->inst_count = 0;
837  state->base_inst = -1;
838 }
839 
842  GPUBatch *batch,
843  DRWResourceHandle handle,
844  int vert_first,
845  int vert_count,
846  int inst_first,
847  int inst_count,
848  bool do_base_instance)
849 {
851 
852  draw_call_resource_bind(state, &handle);
853 
854  /* TODO: This is Legacy. Need to be removed. */
855  if (state->obmats_loc == -1 && (state->obmat_loc != -1 || state->obinv_loc != -1)) {
856  draw_legacy_matrix_update(shgroup, &handle, state->obmat_loc, state->obinv_loc);
857  }
858 
859  if (G.f & G_FLAG_PICKSEL) {
860  if (state->select_buf != NULL) {
861  draw_select_buffer(shgroup, state, batch, &handle);
862  return;
863  }
864 
865  GPU_select_load_id(state->select_id);
866  }
867 
868  draw_geometry_execute(shgroup,
869  batch,
870  vert_first,
871  vert_count,
872  do_base_instance ? DRW_handle_id_get(&handle) : inst_first,
873  inst_count,
874  state->baseinst_loc);
875 }
876 
878 {
879  state->neg_scale = false;
880  state->resource_chunk = 0;
881  state->resource_id = -1;
882  state->base_inst = 0;
883  state->inst_count = 0;
884  state->batch = NULL;
885 
886  state->select_id = -1;
887  state->select_buf = NULL;
888 }
889 
890 /* NOTE: Does not support batches with instancing VBOs. */
893  DRWCommandDraw *call)
894 {
895  /* If any condition requires to interrupt the merging. */
896  bool neg_scale = DRW_handle_negative_scale_get(&call->handle);
897  int chunk = DRW_handle_chunk_get(&call->handle);
898  int id = DRW_handle_id_get(&call->handle);
899  if ((state->neg_scale != neg_scale) || /* Need to change state. */
900  (state->resource_chunk != chunk) || /* Need to change UBOs. */
901  (state->batch != call->batch) /* Need to change VAO. */
902  ) {
904 
905  state->batch = call->batch;
906  state->inst_count = 1;
907  state->base_inst = id;
908 
910  }
911  /* Is the id consecutive? */
912  else if (id != state->base_inst + state->inst_count) {
913  /* We need to add a draw command for the pending instances. */
914  draw_indirect_call(shgroup, state);
915  state->inst_count = 1;
916  state->base_inst = id;
917  }
918  /* We avoid a drawcall by merging with the precedent
919  * drawcall using instancing. */
920  else {
921  state->inst_count++;
922  }
923 }
924 
925 /* Flush remaining pending drawcalls. */
927 {
929 
930  /* Reset state */
931  if (state->neg_scale) {
933  }
934  if (state->obmats_loc != -1) {
936  }
937  if (state->obinfos_loc != -1) {
939  }
940  if (state->obattrs_loc != -1) {
941  DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
942  }
943 }
944 
945 static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
946 {
947  BLI_assert(shgroup->shader);
948 
950  .obmats_loc = -1,
951  .obinfos_loc = -1,
952  .obattrs_loc = -1,
953  .baseinst_loc = -1,
954  .chunkid_loc = -1,
955  .resourceid_loc = -1,
956  .obmat_loc = -1,
957  .obinv_loc = -1,
958  .obattrs_ubo = NULL,
959  .drw_state_enabled = 0,
960  .drw_state_disabled = 0,
961  };
962 
963  const bool shader_changed = (DST.shader != shgroup->shader);
964  bool use_tfeedback = false;
965 
966  if (shader_changed) {
967  if (DST.shader) {
969 
970  /* Unbinding can be costly. Skip in normal condition. */
971  if (G.debug & G_DEBUG_GPU) {
975  }
976  }
977  GPU_shader_bind(shgroup->shader);
978  DST.shader = shgroup->shader;
979  DST.batch = NULL;
980  }
981 
982  draw_update_uniforms(shgroup, &state, &use_tfeedback);
983 
984  drw_state_set(pass_state);
985 
986  /* Rendering Calls */
987  {
988  DRWCommandIterator iter;
989  DRWCommand *cmd;
990  eDRWCommandType cmd_type;
991 
992  draw_command_iter_begin(&iter, shgroup);
993 
995 
996  while ((cmd = draw_command_iter_step(&iter, &cmd_type))) {
997 
998  switch (cmd_type) {
999  case DRW_CMD_DRWSTATE:
1000  case DRW_CMD_STENCIL:
1001  draw_call_batching_flush(shgroup, &state);
1002  break;
1003  case DRW_CMD_DRAW:
1005  case DRW_CMD_DRAW_INSTANCE:
1007  continue;
1008  }
1009  break;
1010  default:
1011  break;
1012  }
1013 
1014  switch (cmd_type) {
1015  case DRW_CMD_CLEAR:
1017  cmd->clear.clear_channels,
1018  (float[4]){cmd->clear.r / 255.0f,
1019  cmd->clear.g / 255.0f,
1020  cmd->clear.b / 255.0f,
1021  cmd->clear.a / 255.0f},
1022  cmd->clear.depth,
1023  cmd->clear.stencil);
1024  break;
1025  case DRW_CMD_DRWSTATE:
1026  state.drw_state_enabled |= cmd->state.enable;
1027  state.drw_state_disabled |= cmd->state.disable;
1028  drw_state_set((pass_state & ~state.drw_state_disabled) | state.drw_state_enabled);
1029  break;
1030  case DRW_CMD_STENCIL:
1032  break;
1033  case DRW_CMD_SELECTID:
1034  state.select_id = cmd->select_id.select_id;
1035  state.select_buf = cmd->select_id.select_buf;
1036  break;
1037  case DRW_CMD_DRAW:
1038  if (!USE_BATCHING || state.obmats_loc == -1 || (G.f & G_FLAG_PICKSEL) ||
1039  cmd->draw.batch->inst[0]) {
1041  shgroup, &state, cmd->draw.batch, cmd->draw.handle, 0, 0, 0, 0, true);
1042  }
1043  else {
1044  draw_call_batching_do(shgroup, &state, &cmd->draw);
1045  }
1046  break;
1048  draw_call_single_do(shgroup,
1049  &state,
1050  cmd->procedural.batch,
1051  cmd->procedural.handle,
1052  0,
1053  cmd->procedural.vert_count,
1054  0,
1055  1,
1056  true);
1057  break;
1058  case DRW_CMD_DRAW_INSTANCE:
1059  draw_call_single_do(shgroup,
1060  &state,
1061  cmd->instance.batch,
1062  cmd->instance.handle,
1063  0,
1064  0,
1065  0,
1066  cmd->instance.inst_count,
1067  cmd->instance.use_attrs == 0);
1068  break;
1069  case DRW_CMD_DRAW_RANGE:
1070  draw_call_single_do(shgroup,
1071  &state,
1072  cmd->range.batch,
1073  cmd->range.handle,
1074  cmd->range.vert_first,
1075  cmd->range.vert_count,
1076  0,
1077  1,
1078  true);
1079  break;
1081  draw_call_single_do(shgroup,
1082  &state,
1083  cmd->instance_range.batch,
1084  cmd->instance_range.handle,
1085  0,
1086  0,
1089  false);
1090  break;
1091  case DRW_CMD_COMPUTE:
1092  GPU_compute_dispatch(shgroup->shader,
1093  cmd->compute.groups_x_len,
1094  cmd->compute.groups_y_len,
1095  cmd->compute.groups_z_len);
1096  break;
1097  case DRW_CMD_COMPUTE_REF:
1098  GPU_compute_dispatch(shgroup->shader,
1099  cmd->compute_ref.groups_ref[0],
1100  cmd->compute_ref.groups_ref[1],
1101  cmd->compute_ref.groups_ref[2]);
1102  break;
1105  break;
1106  case DRW_CMD_BARRIER:
1108  break;
1109  }
1110  }
1111 
1112  draw_call_batching_finish(shgroup, &state);
1113  }
1114 
1115  if (use_tfeedback) {
1117  }
1118 }
1119 
1120 static void drw_update_view(const float viewport_size[2])
1121 {
1122  ViewInfos *storage = &DST.view_active->storage;
1123  copy_v2_v2(storage->viewport_size, viewport_size);
1124  copy_v2_v2(storage->viewport_size_inverse, viewport_size);
1125  invert_v2(storage->viewport_size_inverse);
1126 
1127  /* TODO(fclem): update a big UBO and only bind ranges here. */
1129 
1130  /* TODO: get rid of this. */
1132 
1134 }
1135 
1136 static void drw_draw_pass_ex(DRWPass *pass,
1137  DRWShadingGroup *start_group,
1138  DRWShadingGroup *end_group)
1139 {
1140  if (pass->original) {
1141  start_group = pass->original->shgroups.first;
1142  end_group = pass->original->shgroups.last;
1143  }
1144 
1145  if (start_group == NULL) {
1146  return;
1147  }
1148 
1149  DST.shader = NULL;
1150 
1152  "DRW_render_instance_buffer_finish had not been called before drawing");
1153 
1154  float viewport[4];
1155  GPU_viewport_size_get_f(viewport);
1157  !equals_v2v2(DST.view_active->storage.viewport_size, &viewport[2])) {
1158  drw_update_view(&viewport[2]);
1159  DST.view_active->is_dirty = false;
1161  }
1162 
1163  /* GPU_framebuffer_clear calls can change the state outside the DRW module.
1164  * Force reset the affected states to avoid problems later. */
1166 
1167  drw_state_set(pass->state);
1169 
1170  if (DST.view_active->is_inverted) {
1171  GPU_front_facing(true);
1172  }
1173 
1174  DRW_stats_query_start(pass->name);
1175 
1176  for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
1177  draw_shgroup(shgroup, pass->state);
1178  /* break if upper limit */
1179  if (shgroup == end_group) {
1180  break;
1181  }
1182  }
1183 
1184  if (DST.shader) {
1186  DST.shader = NULL;
1187  }
1188 
1189  if (DST.batch) {
1190  DST.batch = NULL;
1191  }
1192 
1193  /* Fix T67342 for some reason. AMD Pro driver bug. */
1194  if ((DST.state & DRW_STATE_BLEND_CUSTOM) != 0 &&
1197  }
1198 
1199  /* HACK: Rasterized discard can affect clear commands which are not
1200  * part of a DRWPass (as of now). So disable rasterized discard here
1201  * if it has been enabled. */
1202  if ((DST.state & DRW_STATE_RASTERIZER_ENABLED) == 0) {
1204  }
1205 
1206  /* Reset default. */
1207  if (DST.view_active->is_inverted) {
1208  GPU_front_facing(false);
1209  }
1210 
1212 }
1213 
1215 {
1216  for (; pass; pass = pass->next) {
1217  drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
1218  }
1219 }
1220 
1221 void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
1222 {
1223  drw_draw_pass_ex(pass, start_group, end_group);
1224 }
1225 
@ G_DEBUG_GPU
Definition: BKE_global.h:193
@ G_FLAG_PICKSEL
Definition: BKE_global.h:149
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_INLINE
MINLINE int max_ii(int a, int b)
MINLINE float square_f(float a)
MINLINE float plane_point_side_v3(const float plane[4], const float co[3])
void aabb_get_near_far_from_plane(const float plane_no[3], const float bbmin[3], const float bbmax[3], float bb_near[3], float bb_afar[3])
Definition: math_geom.c:615
void mul_v4_m4v4(float r[4], const float M[4][4], const float v[4])
Definition: math_matrix.c:850
void transpose_m4_m4(float R[4][4], const float M[4][4])
Definition: math_matrix.c:1403
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE bool equals_v2v2(const float v1[2], const float v2[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void invert_v2(float r[2])
void BLI_memblock_iternew(BLI_memblock *mblk, BLI_memblock_iter *iter) ATTR_NONNULL()
Definition: BLI_memblock.c:145
void * BLI_memblock_iterstep(BLI_memblock_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: BLI_memblock.c:157
unsigned int uint
Definition: BLI_sys_types.h:67
#define UNUSED_VARS_NDEBUG(...)
#define SET_FLAG_FROM_TEST(value, test, flag)
#define DRW_STATE_WRITE_STENCIL_ENABLED
Definition: DRW_render.h:364
DRWState
Definition: DRW_render.h:298
@ DRW_STATE_STENCIL_EQUAL
Definition: DRW_render.h:320
@ DRW_STATE_CLIP_PLANES
Definition: DRW_render.h:342
@ DRW_STATE_BLEND_ALPHA
Definition: DRW_render.h:328
@ DRW_STATE_BLEND_ADD
Definition: DRW_render.h:324
@ DRW_STATE_BLEND_BACKGROUND
Definition: DRW_render.h:331
@ DRW_STATE_CULL_FRONT
Definition: DRW_render.h:317
@ DRW_STATE_STENCIL_ALWAYS
Definition: DRW_render.h:319
@ DRW_STATE_DEPTH_LESS
Definition: DRW_render.h:310
@ DRW_STATE_IN_FRONT_SELECT
Definition: DRW_render.h:340
@ DRW_STATE_BLEND_SUB
Definition: DRW_render.h:334
@ DRW_STATE_DEPTH_GREATER_EQUAL
Definition: DRW_render.h:314
@ DRW_STATE_WRITE_STENCIL_SHADOW_FAIL
Definition: DRW_render.h:307
@ DRW_STATE_DEPTH_EQUAL
Definition: DRW_render.h:312
@ DRW_STATE_PROGRAM_POINT_SIZE
Definition: DRW_render.h:345
@ DRW_STATE_WRITE_DEPTH
Definition: DRW_render.h:302
@ DRW_STATE_BLEND_OIT
Definition: DRW_render.h:332
@ DRW_STATE_LOGIC_INVERT
Definition: DRW_render.h:337
@ DRW_STATE_SHADOW_OFFSET
Definition: DRW_render.h:341
@ DRW_STATE_BLEND_ADD_FULL
Definition: DRW_render.h:326
@ DRW_STATE_WRITE_COLOR
Definition: DRW_render.h:303
@ DRW_STATE_BLEND_ALPHA_UNDER_PREMUL
Definition: DRW_render.h:338
@ DRW_STATE_DEPTH_LESS_EQUAL
Definition: DRW_render.h:311
@ DRW_STATE_WRITE_STENCIL_SHADOW_PASS
Definition: DRW_render.h:306
@ DRW_STATE_CULL_BACK
Definition: DRW_render.h:316
@ DRW_STATE_FIRST_VERTEX_CONVENTION
Definition: DRW_render.h:343
@ DRW_STATE_STENCIL_NEQUAL
Definition: DRW_render.h:321
@ DRW_STATE_DEPTH_ALWAYS
Definition: DRW_render.h:309
@ DRW_STATE_BLEND_CUSTOM
Definition: DRW_render.h:336
@ DRW_STATE_BLEND_ALPHA_PREMUL
Definition: DRW_render.h:330
@ DRW_STATE_DEPTH_GREATER
Definition: DRW_render.h:313
@ DRW_STATE_BLEND_MUL
Definition: DRW_render.h:333
@ DRW_STATE_WRITE_STENCIL
Definition: DRW_render.h:305
#define DRW_STATE_RASTERIZER_ENABLED
Definition: DRW_render.h:356
#define DRW_STATE_STENCIL_TEST_ENABLED
Definition: DRW_render.h:362
#define DRW_STATE_DEPTH_TEST_ENABLED
Definition: DRW_render.h:359
#define DRW_STATE_BLEND_ENABLED
Definition: DRW_render.h:352
#define DRW_STATE_DEFAULT
Definition: DRW_render.h:350
static AppView * view
GPUBatch
Definition: GPU_batch.h:78
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
Definition: gpu_batch.cc:211
void GPU_batch_draw_advanced(GPUBatch *, int v_first, int v_count, int i_first, int i_count)
Definition: gpu_batch.cc:243
void GPU_compute_dispatch_indirect(GPUShader *shader, GPUStorageBuf *indirect_buf)
Definition: gpu_compute.cc:21
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len)
Definition: gpu_compute.cc:11
void GPU_draw_list_append(GPUDrawList *list, GPUBatch *batch, int i_first, int i_count)
Definition: gpu_drawlist.cc:30
void GPU_draw_list_submit(GPUDrawList *list)
Definition: gpu_drawlist.cc:36
GPUFrameBuffer * GPU_framebuffer_active_get(void)
@ GPU_DRIVER_OFFICIAL
Definition: GPU_platform.h:44
@ GPU_OS_ANY
Definition: GPU_platform.h:40
@ GPU_DEVICE_ATI
Definition: GPU_platform.h:25
bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:20
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:21
bool GPU_select_load_id(unsigned int id)
Definition: gpu_select.c:117
void GPU_shader_unbind(void)
Definition: gpu_shader.cc:513
void GPU_shader_uniform_vector_int(GPUShader *shader, int location, int length, int arraysize, const int *value)
Definition: gpu_shader.cc:636
void GPU_shader_uniform_vector(GPUShader *shader, int location, int length, int arraysize, const float *value)
Definition: gpu_shader.cc:630
bool GPU_shader_transform_feedback_enable(GPUShader *shader, struct GPUVertBuf *vertbuf)
Definition: gpu_shader.cc:543
void GPU_shader_uniform_int(GPUShader *shader, int location, int value)
Definition: gpu_shader.cc:642
void GPU_shader_transform_feedback_disable(GPUShader *shader)
Definition: gpu_shader.cc:548
void GPU_shader_bind(GPUShader *shader)
Definition: gpu_shader.cc:491
void GPU_memory_barrier(eGPUBarrier barrier)
Definition: gpu_state.cc:371
void GPU_program_point_size(bool enable)
Definition: gpu_state.cc:172
eGPUBlend
Definition: GPU_state.h:59
@ GPU_BLEND_ADDITIVE_PREMULT
Definition: GPU_state.h:65
@ GPU_BLEND_INVERT
Definition: GPU_state.h:70
@ GPU_BLEND_OIT
Definition: GPU_state.h:73
@ GPU_BLEND_MULTIPLY
Definition: GPU_state.h:66
@ GPU_BLEND_NONE
Definition: GPU_state.h:60
@ GPU_BLEND_ALPHA
Definition: GPU_state.h:62
@ GPU_BLEND_CUSTOM
Definition: GPU_state.h:78
@ GPU_BLEND_ADDITIVE
Definition: GPU_state.h:64
@ GPU_BLEND_SUBTRACT
Definition: GPU_state.h:67
@ GPU_BLEND_ALPHA_UNDER_PREMUL
Definition: GPU_state.h:79
@ GPU_BLEND_BACKGROUND
Definition: GPU_state.h:75
@ GPU_BLEND_ALPHA_PREMULT
Definition: GPU_state.h:63
void GPU_line_width(float width)
Definition: gpu_state.cc:158
eGPUWriteMask
Definition: GPU_state.h:11
@ GPU_WRITE_STENCIL
Definition: GPU_state.h:18
@ GPU_WRITE_DEPTH
Definition: GPU_state.h:17
@ GPU_WRITE_COLOR
Definition: GPU_state.h:19
void GPU_line_smooth(bool enable)
Definition: gpu_state.cc:75
eGPUProvokingVertex
Definition: GPU_state.h:113
@ GPU_VERTEX_LAST
Definition: GPU_state.h:114
@ GPU_VERTEX_FIRST
Definition: GPU_state.h:115
void GPU_stencil_write_mask_set(uint write_mask)
Definition: gpu_state.cc:202
eGPUFaceCullTest
Definition: GPU_state.h:107
@ GPU_CULL_FRONT
Definition: GPU_state.h:109
@ GPU_CULL_NONE
Definition: GPU_state.h:108
@ GPU_CULL_BACK
Definition: GPU_state.h:110
void GPU_depth_range(float near, float far)
Definition: gpu_state.cc:151
void GPU_stencil_reference_set(uint reference)
Definition: gpu_state.cc:197
eGPUStencilOp
Definition: GPU_state.h:99
@ GPU_STENCIL_OP_COUNT_DEPTH_FAIL
Definition: GPU_state.h:104
@ GPU_STENCIL_OP_COUNT_DEPTH_PASS
Definition: GPU_state.h:103
@ GPU_STENCIL_OP_REPLACE
Definition: GPU_state.h:101
@ GPU_STENCIL_OP_NONE
Definition: GPU_state.h:100
void GPU_stencil_compare_mask_set(uint compare_mask)
Definition: gpu_state.cc:207
void GPU_front_facing(bool invert)
Definition: gpu_state.cc:55
void GPU_point_size(float size)
Definition: gpu_state.cc:164
bool GPU_depth_mask_get(void)
Definition: gpu_state.cc:273
eGPUDepthTest
Definition: GPU_state.h:82
@ GPU_DEPTH_GREATER
Definition: GPU_state.h:88
@ GPU_DEPTH_EQUAL
Definition: GPU_state.h:87
@ GPU_DEPTH_ALWAYS
Definition: GPU_state.h:84
@ GPU_DEPTH_GREATER_EQUAL
Definition: GPU_state.h:89
@ GPU_DEPTH_LESS
Definition: GPU_state.h:85
@ GPU_DEPTH_LESS_EQUAL
Definition: GPU_state.h:86
@ GPU_DEPTH_NONE
Definition: GPU_state.h:83
eGPUDepthTest GPU_depth_test_get(void)
Definition: gpu_state.cc:236
void GPU_state_set(eGPUWriteMask write_mask, eGPUBlend blend, eGPUFaceCullTest culling_test, eGPUDepthTest depth_test, eGPUStencilTest stencil_test, eGPUStencilOp stencil_op, eGPUProvokingVertex provoking_vert)
Definition: gpu_state.cc:126
eGPUStencilTest
Definition: GPU_state.h:92
@ GPU_STENCIL_EQUAL
Definition: GPU_state.h:95
@ GPU_STENCIL_NEQUAL
Definition: GPU_state.h:96
@ GPU_STENCIL_ALWAYS
Definition: GPU_state.h:94
@ GPU_STENCIL_NONE
Definition: GPU_state.h:93
void GPU_viewport_size_get_f(float coords[4])
Definition: gpu_state.cc:259
void GPU_clip_distances(int distances_enabled)
Definition: gpu_state.cc:121
void GPU_provoking_vertex(eGPUProvokingVertex vert)
Definition: gpu_state.cc:60
void GPU_shadow_offset(bool enable)
Definition: gpu_state.cc:116
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
void GPU_storagebuf_unbind_all(void)
void GPU_texture_unbind_all(void)
Definition: gpu_texture.cc:478
void GPU_texture_image_bind(GPUTexture *tex, int unit)
Definition: gpu_texture.cc:483
void GPU_texture_bind_ex(GPUTexture *tex, eGPUSamplerState state, int unit, bool set_number)
void GPU_uniformbuf_unbind_all(void)
void GPU_uniformbuf_unbind(GPUUniformBuf *ubo)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_bind_as_ssbo(struct GPUVertBuf *verts, int binding)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
void GPU_vertbuf_bind_as_texture(struct GPUVertBuf *verts, int binding)
ATTR_WARN_UNUSED_RESULT const BMVert * v
struct DRW_Global G_draw
Definition: draw_common.c:32
void DRW_debug_sphere(const float center[3], const float radius, const float color[4])
Definition: draw_debug.c:111
DRWSparseUniformBuf * DRW_uniform_attrs_pool_find_ubo(GHash *table, struct GPUUniformAttrList *key)
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
DRWManager DST
Definition: draw_manager.c:104
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF
Definition: draw_manager.h:323
@ DRW_UNIFORM_BLOCK_OBINFOS
Definition: draw_manager.h:328
@ DRW_UNIFORM_TFEEDBACK_TARGET
Definition: draw_manager.h:321
@ DRW_UNIFORM_TEXTURE_REF
Definition: draw_manager.h:314
@ DRW_UNIFORM_MODEL_MATRIX
Definition: draw_manager.h:334
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE
Definition: draw_manager.h:324
@ DRW_UNIFORM_FLOAT_COPY
Definition: draw_manager.h:312
@ DRW_UNIFORM_MODEL_MATRIX_INVERSE
Definition: draw_manager.h:335
@ DRW_UNIFORM_FLOAT
Definition: draw_manager.h:311
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF
Definition: draw_manager.h:325
@ DRW_UNIFORM_BASE_INSTANCE
Definition: draw_manager.h:333
@ DRW_UNIFORM_BLOCK_OBMATS
Definition: draw_manager.h:327
@ DRW_UNIFORM_IMAGE_REF
Definition: draw_manager.h:316
@ DRW_UNIFORM_RESOURCE_ID
Definition: draw_manager.h:331
@ DRW_UNIFORM_BLOCK
Definition: draw_manager.h:317
@ DRW_UNIFORM_TEXTURE
Definition: draw_manager.h:313
@ DRW_UNIFORM_STORAGE_BLOCK_REF
Definition: draw_manager.h:320
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE
Definition: draw_manager.h:322
@ DRW_UNIFORM_RESOURCE_CHUNK
Definition: draw_manager.h:330
@ DRW_UNIFORM_IMAGE
Definition: draw_manager.h:315
@ DRW_UNIFORM_BLOCK_OBATTRS
Definition: draw_manager.h:329
@ DRW_UNIFORM_STORAGE_BLOCK
Definition: draw_manager.h:319
@ DRW_UNIFORM_INT
Definition: draw_manager.h:309
@ DRW_UNIFORM_BLOCK_REF
Definition: draw_manager.h:318
@ DRW_UNIFORM_INT_COPY
Definition: draw_manager.h:310
BLI_INLINE uint32_t DRW_handle_chunk_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:139
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
#define USE_BATCHING
Definition: draw_manager.h:43
BLI_INLINE void * DRW_memblock_elem_from_handle(struct BLI_memblock *memblock, const DRWResourceHandle *handle)
Definition: draw_manager.h:159
eDRWCommandType
Definition: draw_manager.h:184
@ DRW_CMD_COMPUTE_INDIRECT
Definition: draw_manager.h:195
@ DRW_CMD_COMPUTE
Definition: draw_manager.h:193
@ DRW_CMD_COMPUTE_REF
Definition: draw_manager.h:194
@ DRW_CMD_DRAW
Definition: draw_manager.h:186
@ DRW_CMD_DRWSTATE
Definition: draw_manager.h:200
@ DRW_CMD_DRAW_RANGE
Definition: draw_manager.h:187
@ DRW_CMD_CLEAR
Definition: draw_manager.h:199
@ DRW_CMD_BARRIER
Definition: draw_manager.h:198
@ DRW_CMD_STENCIL
Definition: draw_manager.h:201
@ DRW_CMD_DRAW_INSTANCE_RANGE
Definition: draw_manager.h:189
@ DRW_CMD_DRAW_PROCEDURAL
Definition: draw_manager.h:190
@ DRW_CMD_SELECTID
Definition: draw_manager.h:202
@ DRW_CMD_DRAW_INSTANCE
Definition: draw_manager.h:188
BLI_INLINE uint32_t DRW_handle_negative_scale_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:134
BLI_INLINE uint32_t DRW_handle_id_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:144
uint32_t DRWResourceHandle
Definition: draw_manager.h:132
BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup, DRWResourceHandle *handle, float obmat_loc, float obinv_loc)
static void draw_call_single_do(DRWShadingGroup *shgroup, DRWCommandsState *state, GPUBatch *batch, DRWResourceHandle handle, int vert_first, int vert_count, int inst_first, int inst_count, bool do_base_instance)
bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState *state)
static void drw_update_view(const float viewport_size[2])
static DRWCommand * draw_command_iter_step(DRWCommandIterator *iter, eDRWCommandType *cmd_type)
static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere, const float(*frustum_planes)[4], const BoundSphere *bsphere)
static void drw_state_validate(void)
static void draw_update_uniforms(DRWShadingGroup *shgroup, DRWCommandsState *state, bool *use_tfeedback)
bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup, GPUBatch *geom, int vert_first, int vert_count, int inst_first, int inst_count, int baseinst_loc)
static void draw_call_batching_flush(DRWShadingGroup *shgroup, DRWCommandsState *state)
BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup, DRWCommandsState *state, GPUBatch *batch, const DRWResourceHandle *handle)
void DRW_draw_pass(DRWPass *pass)
static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
void drw_state_set(DRWState state)
void DRW_view_set_active(const DRWView *view)
void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
static void draw_call_batching_do(DRWShadingGroup *shgroup, DRWCommandsState *state, DRWCommandDraw *call)
static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
void DRW_culling_frustum_corners_get(const DRWView *view, BoundBox *corners)
static void draw_command_iter_begin(DRWCommandIterator *iter, DRWShadingGroup *shgroup)
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
const DRWView * DRW_view_get_active(void)
void DRW_state_reset(void)
struct DRWCommandsState DRWCommandsState
static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
void DRW_state_reset_ex(DRWState state)
static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHandle *handle)
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *state)
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, GPUBatch *geom)
void DRW_state_lock(DRWState state)
bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
static bool draw_culling_box_test(const float(*frustum_planes)[4], const BoundBox *bbox)
bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
void DRW_select_load_id(uint id)
struct DRWCommandIterator DRWCommandIterator
static void draw_compute_culling(DRWView *view)
static void draw_call_batching_start(DRWCommandsState *state)
void DRW_stats_query_end(void)
void DRW_stats_query_start(const char *name)
struct @653::@655 batch
void GPU_framebuffer_clear(GPUFrameBuffer *gpu_fb, eGPUFrameBufferBits buffers, const float clear_col[4], float clear_depth, uint clear_stencil)
int count
const int state
#define G(x, y, z)
static unsigned c
Definition: RandGen.cpp:83
#define min(a, b)
Definition: sort.c:35
float vec[8][3]
float center[3]
Definition: DRW_render.h:86
float radius
Definition: DRW_render.h:86
eGPUBarrier type
Definition: draw_manager.h:250
struct DRWCommandChunk * next
Definition: draw_manager.h:471
uint32_t command_len
Definition: draw_manager.h:472
uint32_t command_used
Definition: draw_manager.h:473
DRWCommand commands[96]
Definition: draw_manager.h:477
uint64_t command_type[6]
Definition: draw_manager.h:475
eGPUFrameBufferBits clear_channels
Definition: draw_manager.h:277
GPUStorageBuf * indirect_buf
Definition: draw_manager.h:246
DRWResourceHandle handle
Definition: draw_manager.h:230
DRWResourceHandle handle
Definition: draw_manager.h:223
DRWResourceHandle handle
Definition: draw_manager.h:255
DRWResourceHandle handle
Definition: draw_manager.h:216
GPUBatch * batch
Definition: draw_manager.h:209
DRWResourceHandle handle
Definition: draw_manager.h:210
DRWCommandChunk * curr_chunk
GPUVertBuf * select_buf
Definition: draw_manager.h:272
GPUVertBuf * select_buf
DRWSparseUniformBuf * obattrs_ubo
DRWState drw_state_disabled
DRWState drw_state_enabled
BoundSphere bsphere
Definition: draw_manager.h:107
struct GHash * obattrs_ubo_pool
Definition: draw_manager.h:530
struct GPUUniformBuf ** obinfos_ubo
Definition: draw_manager.h:529
struct BLI_memblock * obmats
Definition: draw_manager.h:520
struct GPUUniformBuf ** matrices_ubo
Definition: draw_manager.h:528
struct BLI_memblock * cullstates
Definition: draw_manager.h:522
ViewInfos view_storage_cpy
Definition: draw_manager.h:629
uint select_id
Definition: draw_manager.h:632
DRWView * view_active
Definition: draw_manager.h:624
DRWView * view_previous
Definition: draw_manager.h:625
bool buffer_finish_called
Definition: draw_manager.h:621
DRWState state
Definition: draw_manager.h:594
GPUBatch * batch
Definition: draw_manager.h:591
GPUShader * shader
Definition: draw_manager.h:590
GPUDrawList * draw_list
Definition: draw_manager.h:649
DRWData * vmempool
Definition: draw_manager.h:562
DRWState state_lock
Definition: draw_manager.h:595
DRWView * view_default
Definition: draw_manager.h:623
float modelinverse[4][4]
Definition: draw_manager.h:169
float model[4][4]
Definition: draw_manager.h:168
DRWState state
Definition: draw_manager.h:426
char name[MAX_PASS_NAME]
Definition: draw_manager.h:427
DRWShadingGroup * last
Definition: draw_manager.h:415
DRWShadingGroup * first
Definition: draw_manager.h:414
struct DRWPass::@312 shgroups
DRWPass * original
Definition: draw_manager.h:421
DRWPass * next
Definition: draw_manager.h:423
struct DRWCommandChunk * first
Definition: draw_manager.h:388
struct DRWUniformChunk * uniforms
Definition: draw_manager.h:384
struct DRWShadingGroup::@306 cmd
DRWShadingGroup * next
Definition: draw_manager.h:381
GPUShader * shader
Definition: draw_manager.h:383
struct DRWUniformChunk * next
Definition: draw_manager.h:464
int ivalue[4]
Definition: draw_manager.h:370
GPUStorageBuf ** ssbo_ref
Definition: draw_manager.h:360
uint8_t arraysize
Definition: draw_manager.h:377
struct GPUUniformAttrList * uniform_attrs
Definition: draw_manager.h:372
GPUStorageBuf * ssbo
Definition: draw_manager.h:359
GPUVertBuf * vertbuf
Definition: draw_manager.h:364
GPUTexture ** texture_ref
Definition: draw_manager.h:348
GPUVertBuf ** vertbuf_ref
Definition: draw_manager.h:365
GPUUniformBuf ** block_ref
Definition: draw_manager.h:355
GPUUniformBuf * block
Definition: draw_manager.h:354
uint8_t type
Definition: draw_manager.h:375
uint8_t length
Definition: draw_manager.h:376
GPUTexture * texture
Definition: draw_manager.h:347
eGPUSamplerState sampler_state
Definition: draw_manager.h:350
float fvalue[4]
Definition: draw_manager.h:368
const void * pvalue
Definition: draw_manager.h:343
ViewInfos storage
Definition: draw_manager.h:436
int clip_planes_len
Definition: draw_manager.h:438
bool is_dirty
Definition: draw_manager.h:440
bool is_inverted
Definition: draw_manager.h:442
struct GPUUniformBuf * view_ubo
Definition: draw_common.h:134
float2 viewport_size_inverse
float2 viewport_size
static int blend(const Tex *tex, const float texvec[3], TexResult *texres)
float max
DRWCommandComputeIndirect compute_indirect
Definition: draw_manager.h:291
DRWCommandSetStencil stencil
Definition: draw_manager.h:294
DRWCommandComputeRef compute_ref
Definition: draw_manager.h:290
DRWCommandDraw draw
Definition: draw_manager.h:284
DRWCommandDrawInstance instance
Definition: draw_manager.h:286
DRWCommandDrawRange range
Definition: draw_manager.h:285
DRWCommandSetMutableState state
Definition: draw_manager.h:293
DRWCommandCompute compute
Definition: draw_manager.h:289
DRWCommandBarrier barrier
Definition: draw_manager.h:292
DRWCommandDrawInstanceRange instance_range
Definition: draw_manager.h:287
DRWCommandClear clear
Definition: draw_manager.h:296
DRWCommandDrawProcedural procedural
Definition: draw_manager.h:288
DRWCommandSetSelectID select_id
Definition: draw_manager.h:295