Blender  V3.3
draw_cache_impl_subdivision.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2021 Blender Foundation. */
3 
4 #include "draw_subdivision.h"
5 
6 #include "DNA_mesh_types.h"
7 #include "DNA_object_types.h"
8 #include "DNA_scene_types.h"
9 
10 #include "BKE_editmesh.h"
11 #include "BKE_mesh.h"
12 #include "BKE_modifier.h"
13 #include "BKE_object.h"
14 #include "BKE_scene.h"
15 #include "BKE_subdiv.h"
16 #include "BKE_subdiv_eval.h"
17 #include "BKE_subdiv_foreach.h"
18 #include "BKE_subdiv_mesh.h"
19 #include "BKE_subdiv_modifier.h"
20 
21 #include "BLI_linklist.h"
22 
23 #include "BLI_string.h"
24 
25 #include "PIL_time.h"
26 
27 #include "DRW_engine.h"
28 #include "DRW_render.h"
29 
30 #include "GPU_capabilities.h"
31 #include "GPU_compute.h"
32 #include "GPU_index_buffer.h"
33 #include "GPU_state.h"
34 #include "GPU_vertex_buffer.h"
35 
36 #include "opensubdiv_capi.h"
37 #include "opensubdiv_capi_type.h"
41 
42 #include "draw_cache_extract.hh"
43 #include "draw_cache_impl.h"
44 #include "draw_cache_inline.h"
46 
59 
60 enum {
81 
83 };
84 
86 
87 static const char *get_shader_code(int shader_type)
88 {
89  switch (shader_type) {
93  }
96  }
97  case SHADER_BUFFER_LNOR: {
99  }
100  case SHADER_BUFFER_TRIS:
103  }
106  }
109  }
116  }
122  }
125  }
128  }
131  }
132  }
133  return nullptr;
134 }
135 
136 static const char *get_shader_name(int shader_type)
137 {
138  switch (shader_type) {
139  case SHADER_BUFFER_LINES: {
140  return "subdiv lines build";
141  }
143  return "subdiv lines loose build";
144  }
145  case SHADER_BUFFER_LNOR: {
146  return "subdiv lnor build";
147  }
148  case SHADER_BUFFER_EDGE_FAC: {
149  return "subdiv edge fac build";
150  }
151  case SHADER_BUFFER_TRIS:
153  return "subdiv tris";
154  }
156  return "subdiv normals accumulate";
157  }
159  return "subdiv normals finalize";
160  }
162  return "subdiv patch evaluation";
163  }
165  return "subdiv patch evaluation face-varying";
166  }
168  return "subdiv patch evaluation face dots";
169  }
171  return "subdiv patch evaluation face dots with normals";
172  }
174  return "subdiv patch evaluation orco";
175  }
177  return "subdiv custom data interp 1D";
178  }
180  return "subdiv custom data interp 2D";
181  }
183  return "subdiv custom data interp 3D";
184  }
186  return "subdiv custom data interp 4D";
187  }
189  return "subdiv sculpt data";
190  }
192  return "subdiv uv stretch angle";
193  }
195  return "subdiv uv stretch area";
196  }
197  }
198  return nullptr;
199 }
200 
201 static GPUShader *get_patch_evaluation_shader(int shader_type)
202 {
203  if (g_subdiv_shaders[shader_type] == nullptr) {
204  const char *compute_code = get_shader_code(shader_type);
205 
206  const char *defines = nullptr;
207  if (shader_type == SHADER_PATCH_EVALUATION_FVAR) {
208  defines =
209  "#define OSD_PATCH_BASIS_GLSL\n"
210  "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
211  "#define FVAR_EVALUATION\n";
212  }
213  else if (shader_type == SHADER_PATCH_EVALUATION_FACE_DOTS) {
214  defines =
215  "#define OSD_PATCH_BASIS_GLSL\n"
216  "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
217  "#define FDOTS_EVALUATION\n";
218  }
219  else if (shader_type == SHADER_PATCH_EVALUATION_FACE_DOTS_WITH_NORMALS) {
220  defines =
221  "#define OSD_PATCH_BASIS_GLSL\n"
222  "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
223  "#define FDOTS_EVALUATION\n"
224  "#define FDOTS_NORMALS\n";
225  }
226  else if (shader_type == SHADER_PATCH_EVALUATION_ORCO) {
227  defines =
228  "#define OSD_PATCH_BASIS_GLSL\n"
229  "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
230  "#define ORCO_EVALUATION\n";
231  }
232  else {
233  defines =
234  "#define OSD_PATCH_BASIS_GLSL\n"
235  "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n";
236  }
237 
238  /* Merge OpenSubdiv library code with our own library code. */
239  const char *patch_basis_source = openSubdiv_getGLSLPatchBasisSource();
240  const char *subdiv_lib_code = datatoc_common_subdiv_lib_glsl;
241  char *library_code = static_cast<char *>(
242  MEM_mallocN(strlen(patch_basis_source) + strlen(subdiv_lib_code) + 1,
243  "subdiv patch evaluation library code"));
244  library_code[0] = '\0';
245  strcat(library_code, patch_basis_source);
246  strcat(library_code, subdiv_lib_code);
247 
249  compute_code, library_code, defines, get_shader_name(shader_type));
250 
251  MEM_freeN(library_code);
252  }
253 
254  return g_subdiv_shaders[shader_type];
255 }
256 
257 static GPUShader *get_subdiv_shader(int shader_type, const char *defines)
258 {
259  if (ELEM(shader_type,
264  return get_patch_evaluation_shader(shader_type);
265  }
266  if (g_subdiv_shaders[shader_type] == nullptr) {
267  const char *compute_code = get_shader_code(shader_type);
269  compute_code, datatoc_common_subdiv_lib_glsl, defines, get_shader_name(shader_type));
270  }
271  return g_subdiv_shaders[shader_type];
272 }
273 
274 /* -------------------------------------------------------------------- */
279 {
280  static GPUVertFormat format = {0};
281  if (format.attr_len == 0) {
283  }
284  return &format;
285 }
286 
287 /* Vertex format for `OpenSubdiv::Osd::PatchArray`. */
289 {
290  static GPUVertFormat format = {0};
291  if (format.attr_len == 0) {
297  GPU_vertformat_attr_add(&format, "primitiveIdBase", GPU_COMP_I32, 1, GPU_FETCH_INT);
298  }
299  return &format;
300 }
301 
302 /* Vertex format used for the `PatchTable::PatchHandle`. */
304 {
305  static GPUVertFormat format = {0};
306  if (format.attr_len == 0) {
310  }
311  return &format;
312 }
313 
314 /* Vertex format used for the quad-tree nodes of the PatchMap. */
316 {
317  static GPUVertFormat format = {0};
318  if (format.attr_len == 0) {
320  }
321  return &format;
322 }
323 
324 /* Vertex format for `OpenSubdiv::Osd::PatchParam`, not really used, it is only for making sure
325  * that the #GPUVertBuf used to wrap the OpenSubdiv patch param buffer is valid. */
327 {
328  static GPUVertFormat format = {0};
329  if (format.attr_len == 0) {
331  }
332  return &format;
333 }
334 
335 /* Vertex format for the patches' vertices index buffer. */
337 {
338  static GPUVertFormat format = {0};
339  if (format.attr_len == 0) {
341  }
342  return &format;
343 }
344 
345 /* Vertex format for the OpenSubdiv vertex buffer. */
347 {
348  static GPUVertFormat format = {0};
349  if (format.attr_len == 0) {
350  /* We use 4 components for the vectors to account for padding in the compute shaders, where
351  * vec3 is promoted to vec4. */
353  }
354  return &format;
355 }
356 
359  /* UV coordinate encoded as u << 16 | v, where u and v are quantized on 16-bits. */
360  unsigned int encoded_uv;
361 };
362 
363 MINLINE CompressedPatchCoord make_patch_coord(int ptex_face_index, float u, float v)
364 {
365  CompressedPatchCoord patch_coord = {
366  ptex_face_index,
367  (static_cast<unsigned int>(u * 65535.0f) << 16) | static_cast<unsigned int>(v * 65535.0f),
368  };
369  return patch_coord;
370 }
371 
372 /* Vertex format used for the #CompressedPatchCoord. */
374 {
375  static GPUVertFormat format = {0};
376  if (format.attr_len == 0) {
377  /* WARNING! Adjust #CompressedPatchCoord accordingly. */
378  GPU_vertformat_attr_add(&format, "ptex_face_index", GPU_COMP_U32, 1, GPU_FETCH_INT);
380  }
381  return &format;
382 }
383 
385 {
386  static GPUVertFormat format;
387  if (format.attr_len == 0) {
389  }
390  return &format;
391 }
392 
394 {
395  static GPUVertFormat format = {0};
396  if (format.attr_len == 0) {
400  }
401  return &format;
402 }
403 
406 /* -------------------------------------------------------------------- */
411 {
412  GPUVertBuf *verts = (GPUVertBuf *)(buffer->data);
414 }
415 
416 static void *vertbuf_alloc(const OpenSubdiv_Buffer *interface, const uint len)
417 {
418  GPUVertBuf *verts = (GPUVertBuf *)(interface->data);
420  return GPU_vertbuf_get_data(verts);
421 }
422 
423 static void vertbuf_device_alloc(const OpenSubdiv_Buffer *interface, const uint len)
424 {
425  GPUVertBuf *verts = (GPUVertBuf *)(interface->data);
426  /* This assumes that GPU_USAGE_DEVICE_ONLY was used, which won't allocate host memory. */
427  // BLI_assert(GPU_vertbuf_get_usage(verts) == GPU_USAGE_DEVICE_ONLY);
429 }
430 
431 static void vertbuf_wrap_device_handle(const OpenSubdiv_Buffer *interface, uint64_t handle)
432 {
433  GPUVertBuf *verts = (GPUVertBuf *)(interface->data);
435 }
436 
437 static void vertbuf_update_data(const OpenSubdiv_Buffer *interface,
438  uint start,
439  uint len,
440  const void *data)
441 {
442  GPUVertBuf *verts = (GPUVertBuf *)(interface->data);
444 }
445 
446 static void opensubdiv_gpu_buffer_init(OpenSubdiv_Buffer *buffer_interface, GPUVertBuf *vertbuf)
447 {
448  buffer_interface->data = vertbuf;
449  buffer_interface->bind_gpu = vertbuf_bind_gpu;
450  buffer_interface->buffer_offset = 0;
452  buffer_interface->alloc = vertbuf_alloc;
453  buffer_interface->device_alloc = vertbuf_device_alloc;
454  buffer_interface->device_update = vertbuf_update_data;
455 }
456 
458 {
462  return buffer;
463 }
464 
467 // --------------------------------------------------------
468 
469 static uint tris_count_from_number_of_loops(const uint number_of_loops)
470 {
471  const uint32_t number_of_quads = number_of_loops / 4;
472  return number_of_quads * 2;
473 }
474 
475 /* -------------------------------------------------------------------- */
480  int32_t *vert_origindex,
481  uint num_loops,
482  uint loose_len)
483 {
485  GPU_vertbuf_data_alloc(buffer, num_loops + loose_len);
486 
487  int32_t *vbo_data = (int32_t *)GPU_vertbuf_get_data(buffer);
488  memcpy(vbo_data, vert_origindex, num_loops * sizeof(int32_t));
489 }
490 
491 GPUVertBuf *draw_subdiv_build_origindex_buffer(int *vert_origindex, uint num_loops)
492 {
494  draw_subdiv_init_origindex_buffer(buffer, vert_origindex, num_loops, 0);
495  return buffer;
496 }
497 
500 /* -------------------------------------------------------------------- */
504 static void draw_patch_map_build(DRWPatchMap *gpu_patch_map, Subdiv *subdiv)
505 {
506  GPUVertBuf *patch_map_handles = GPU_vertbuf_calloc();
508 
509  GPUVertBuf *patch_map_quadtree = GPU_vertbuf_calloc();
511 
512  OpenSubdiv_Buffer patch_map_handles_interface;
513  opensubdiv_gpu_buffer_init(&patch_map_handles_interface, patch_map_handles);
514 
515  OpenSubdiv_Buffer patch_map_quad_tree_interface;
516  opensubdiv_gpu_buffer_init(&patch_map_quad_tree_interface, patch_map_quadtree);
517 
518  int min_patch_face = 0;
519  int max_patch_face = 0;
520  int max_depth = 0;
521  int patches_are_triangular = 0;
522 
523  OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
524  evaluator->getPatchMap(evaluator,
525  &patch_map_handles_interface,
526  &patch_map_quad_tree_interface,
527  &min_patch_face,
528  &max_patch_face,
529  &max_depth,
530  &patches_are_triangular);
531 
532  gpu_patch_map->patch_map_handles = patch_map_handles;
533  gpu_patch_map->patch_map_quadtree = patch_map_quadtree;
534  gpu_patch_map->min_patch_face = min_patch_face;
535  gpu_patch_map->max_patch_face = max_patch_face;
536  gpu_patch_map->max_depth = max_depth;
537  gpu_patch_map->patches_are_triangular = patches_are_triangular;
538 }
539 
540 static void draw_patch_map_free(DRWPatchMap *gpu_patch_map)
541 {
544  gpu_patch_map->min_patch_face = 0;
545  gpu_patch_map->max_patch_face = 0;
546  gpu_patch_map->max_depth = 0;
547  gpu_patch_map->patches_are_triangular = 0;
548 }
549 
552 /* -------------------------------------------------------------------- */
557 {
558  return cache->subdiv && cache->subdiv->evaluator && cache->num_subdiv_loops != 0;
559 }
560 
562 {
564  MEM_SAFE_FREE(cache->mat_start);
565  MEM_SAFE_FREE(cache->mat_end);
566 }
567 
569 {
573 }
574 
576 {
588  cache->resolution = 0;
589  cache->num_subdiv_loops = 0;
590  cache->num_subdiv_edges = 0;
591  cache->num_subdiv_verts = 0;
592  cache->num_subdiv_triangles = 0;
593  cache->num_coarse_poly = 0;
594  cache->num_subdiv_quads = 0;
595  cache->may_have_loose_geom = false;
599  if (cache->ubo) {
600  GPU_uniformbuf_free(cache->ubo);
601  cache->ubo = nullptr;
602  }
605  cache->loose_geom.edge_len = 0;
606  cache->loose_geom.vert_len = 0;
607  cache->loose_geom.loop_len = 0;
608 }
609 
610 /* Flags used in #DRWSubdivCache.extra_coarse_face_data. The flags are packed in the upper bits of
611  * each uint (one per coarse face), #SUBDIV_COARSE_FACE_FLAG_OFFSET tells where they are in the
612  * packed bits. */
613 #define SUBDIV_COARSE_FACE_FLAG_SMOOTH 1u
614 #define SUBDIV_COARSE_FACE_FLAG_SELECT 2u
615 #define SUBDIV_COARSE_FACE_FLAG_ACTIVE 4u
616 #define SUBDIV_COARSE_FACE_FLAG_HIDDEN 8u
617 
618 #define SUBDIV_COARSE_FACE_FLAG_OFFSET 28u
619 
620 #define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK \
621  (SUBDIV_COARSE_FACE_FLAG_SMOOTH << SUBDIV_COARSE_FACE_FLAG_OFFSET)
622 #define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK \
623  (SUBDIV_COARSE_FACE_FLAG_SELECT << SUBDIV_COARSE_FACE_FLAG_OFFSET)
624 #define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK \
625  (SUBDIV_COARSE_FACE_FLAG_ACTIVE << SUBDIV_COARSE_FACE_FLAG_OFFSET)
626 #define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK \
627  (SUBDIV_COARSE_FACE_FLAG_HIDDEN << SUBDIV_COARSE_FACE_FLAG_OFFSET)
628 
629 #define SUBDIV_COARSE_FACE_LOOP_START_MASK \
630  ~((SUBDIV_COARSE_FACE_FLAG_SMOOTH | SUBDIV_COARSE_FACE_FLAG_SELECT | \
631  SUBDIV_COARSE_FACE_FLAG_ACTIVE | SUBDIV_COARSE_FACE_FLAG_HIDDEN) \
632  << SUBDIV_COARSE_FACE_FLAG_OFFSET)
633 
635 {
636  if (f == nullptr) {
637  /* May happen during mapped extraction. */
638  return 0;
639  }
640 
641  uint32_t flag = 0;
644  }
647  }
650  }
651  if (f == efa_act) {
653  }
654  const int loopstart = BM_elem_index_get(f->l_first);
655  return (uint)(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
656 }
657 
659  BMFace *efa_act,
660  uint32_t *flags_data)
661 {
662  BMFace *f;
663  BMIter iter;
664 
665  BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
666  const int index = BM_elem_index_get(f);
667  flags_data[index] = compute_coarse_face_flag(f, efa_act);
668  }
669 }
670 
672 {
673  for (int i = 0; i < mesh->totpoly; i++) {
674  uint32_t flag = 0;
675  if ((mesh->mpoly[i].flag & ME_SMOOTH) != 0) {
677  }
678  if ((mesh->mpoly[i].flag & ME_FACE_SEL) != 0) {
680  }
681  if ((mesh->mpoly[i].flag & ME_HIDE) != 0) {
683  }
684  flags_data[i] = (uint)(mesh->mpoly[i].loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
685  }
686 }
687 
689  BMesh *bm,
690  MeshRenderData *mr,
691  uint32_t *flags_data)
692 {
693  if (bm == nullptr) {
695  return;
696  }
697 
698  for (int i = 0; i < mesh->totpoly; i++) {
699  BMFace *f = bm_original_face_get(mr, i);
700  flags_data[i] = compute_coarse_face_flag(f, mr->efa_act);
701  }
702 }
703 
705  Mesh *mesh,
706  MeshRenderData *mr)
707 {
708  if (cache->extra_coarse_face_data == nullptr) {
710  static GPUVertFormat format;
711  if (format.attr_len == 0) {
713  }
716  mr->extract_type == MR_EXTRACT_BMESH ? cache->bm->totface :
717  mesh->totpoly);
718  }
719 
721 
722  if (mr->extract_type == MR_EXTRACT_BMESH) {
723  draw_subdiv_cache_extra_coarse_face_data_bm(cache->bm, mr->efa_act, flags_data);
724  }
725  else if (mr->extract_type == MR_EXTRACT_MAPPED) {
727  }
728  else {
730  }
731 
732  /* Make sure updated data is re-uploaded. */
734 }
735 
737 {
738  DRWSubdivCache *subdiv_cache = mbc->subdiv_cache;
739  if (subdiv_cache == nullptr) {
740  subdiv_cache = static_cast<DRWSubdivCache *>(
741  MEM_callocN(sizeof(DRWSubdivCache), "DRWSubdivCache"));
742  }
743  mbc->subdiv_cache = subdiv_cache;
744  return subdiv_cache;
745 }
746 
748 {
749  const bool has_orco = CustomData_has_layer(&mesh->vdata, CD_ORCO);
750  if (has_orco && subdiv->evaluator && !subdiv->evaluator->hasVertexData(subdiv->evaluator)) {
751  /* If we suddenly have/need original coordinates, recreate the evaluator if the extra
752  * source was not created yet. The refiner also has to be recreated as refinement for source
753  * and vertex data is done only once. */
755  subdiv->evaluator = nullptr;
756 
757  if (subdiv->topology_refiner != nullptr) {
759  subdiv->topology_refiner = nullptr;
760  }
761  }
762 }
763 
766 /* -------------------------------------------------------------------- */
779  const Subdiv *subdiv;
781 
783 
784  /* Pointers into #DRWSubdivCache buffers for easier access during traversal. */
791 
792  /* Temporary buffers used during traversal. */
795 
796  /* #CD_ORIGINDEX layers from the mesh to directly look up during traversal the original-index
797  * from the base mesh for edit data so that we do not have to handle yet another GPU buffer and
798  * do this in the shaders. */
799  const int *v_origindex;
800  const int *e_origindex;
801 };
802 
803 static bool draw_subdiv_topology_info_cb(const SubdivForeachContext *foreach_context,
804  const int num_vertices,
805  const int num_edges,
806  const int num_loops,
807  const int num_polygons,
808  const int *subdiv_polygon_offset)
809 {
810  /* num_loops does not take into account meshes with only loose geometry, which might be meshes
811  * used as custom bone shapes, so let's check the num_vertices also. */
812  if (num_vertices == 0 && num_loops == 0) {
813  return false;
814  }
815 
816  DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
817  DRWSubdivCache *cache = ctx->cache;
818 
819  /* Set topology information only if we have loops. */
820  if (num_loops != 0) {
821  cache->num_subdiv_edges = (uint)num_edges;
822  cache->num_subdiv_loops = (uint)num_loops;
823  cache->num_subdiv_verts = (uint)num_vertices;
824  cache->num_subdiv_quads = (uint)num_polygons;
825  cache->subdiv_polygon_offset = static_cast<int *>(MEM_dupallocN(subdiv_polygon_offset));
826  }
827 
828  cache->may_have_loose_geom = num_vertices != 0 || num_edges != 0;
829 
830  /* Initialize cache buffers, prefer dynamic usage so we can reuse memory on the host even after
831  * it was sent to the device, since we may use the data while building other buffers on the CPU
832  * side. */
833  cache->patch_coords = GPU_vertbuf_calloc();
837 
842 
847 
852 
853  cache->subdiv_loop_subdiv_vert_index = static_cast<int *>(
854  MEM_mallocN(cache->num_subdiv_loops * sizeof(int), "subdiv_loop_subdiv_vert_index"));
855 
856  cache->subdiv_loop_subdiv_edge_index = static_cast<int *>(
857  MEM_mallocN(cache->num_subdiv_loops * sizeof(int), "subdiv_loop_subdiv_edge_index"));
858 
859  cache->subdiv_loop_poly_index = static_cast<int *>(
860  MEM_mallocN(cache->num_subdiv_loops * sizeof(int), "subdiv_loop_poly_index"));
861 
862  /* Initialize context pointers and temporary buffers. */
869 
870  ctx->v_origindex = static_cast<const int *>(
872 
873  ctx->e_origindex = static_cast<const int *>(
875 
876  if (cache->num_subdiv_verts) {
877  ctx->vert_origindex_map = static_cast<int *>(
878  MEM_mallocN(cache->num_subdiv_verts * sizeof(int), "subdiv_vert_origindex_map"));
879  for (int i = 0; i < num_vertices; i++) {
880  ctx->vert_origindex_map[i] = -1;
881  }
882  }
883 
884  if (cache->num_subdiv_edges) {
885  ctx->edge_origindex_map = static_cast<int *>(
886  MEM_mallocN(cache->num_subdiv_edges * sizeof(int), "subdiv_edge_origindex_map"));
887  for (int i = 0; i < num_edges; i++) {
888  ctx->edge_origindex_map[i] = -1;
889  }
890  }
891 
892  return true;
893 }
894 
895 static void draw_subdiv_vertex_corner_cb(const SubdivForeachContext *foreach_context,
896  void *UNUSED(tls),
897  const int UNUSED(ptex_face_index),
898  const float UNUSED(u),
899  const float UNUSED(v),
900  const int coarse_vertex_index,
901  const int UNUSED(coarse_poly_index),
902  const int UNUSED(coarse_corner),
903  const int subdiv_vertex_index)
904 {
905  BLI_assert(coarse_vertex_index != ORIGINDEX_NONE);
906  DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
907  ctx->vert_origindex_map[subdiv_vertex_index] = coarse_vertex_index;
908 }
909 
910 static void draw_subdiv_vertex_edge_cb(const SubdivForeachContext *UNUSED(foreach_context),
911  void *UNUSED(tls_v),
912  const int UNUSED(ptex_face_index),
913  const float UNUSED(u),
914  const float UNUSED(v),
915  const int UNUSED(coarse_edge_index),
916  const int UNUSED(coarse_poly_index),
917  const int UNUSED(coarse_corner),
918  const int UNUSED(subdiv_vertex_index))
919 {
920  /* Required if SubdivForeachContext.vertex_corner is also set. */
921 }
922 
923 static void draw_subdiv_edge_cb(const SubdivForeachContext *foreach_context,
924  void *UNUSED(tls),
925  const int coarse_edge_index,
926  const int subdiv_edge_index,
927  const bool UNUSED(is_loose),
928  const int UNUSED(subdiv_v1),
929  const int UNUSED(subdiv_v2))
930 {
931  DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
932 
933  if (!ctx->edge_origindex_map) {
934  return;
935  }
936 
937  int coarse_index = coarse_edge_index;
938 
939  if (coarse_index != -1) {
940  if (ctx->e_origindex) {
941  coarse_index = ctx->e_origindex[coarse_index];
942  }
943  }
944 
945  ctx->edge_origindex_map[subdiv_edge_index] = coarse_index;
946 }
947 
948 static void draw_subdiv_loop_cb(const SubdivForeachContext *foreach_context,
949  void *UNUSED(tls_v),
950  const int ptex_face_index,
951  const float u,
952  const float v,
953  const int UNUSED(coarse_loop_index),
954  const int coarse_poly_index,
955  const int UNUSED(coarse_corner),
956  const int subdiv_loop_index,
957  const int subdiv_vertex_index,
958  const int subdiv_edge_index)
959 {
960  DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
961  ctx->patch_coords[subdiv_loop_index] = make_patch_coord(ptex_face_index, u, v);
962 
963  int coarse_vertex_index = ctx->vert_origindex_map[subdiv_vertex_index];
964 
965  ctx->subdiv_loop_subdiv_vert_index[subdiv_loop_index] = subdiv_vertex_index;
966  ctx->subdiv_loop_subdiv_edge_index[subdiv_loop_index] = subdiv_edge_index;
967  ctx->subdiv_loop_poly_index[subdiv_loop_index] = coarse_poly_index;
968  ctx->subdiv_loop_vert_index[subdiv_loop_index] = coarse_vertex_index;
969 }
970 
972 {
973  memset(foreach_context, 0, sizeof(*foreach_context));
974  foreach_context->topology_info = draw_subdiv_topology_info_cb;
975  foreach_context->loop = draw_subdiv_loop_cb;
976  foreach_context->edge = draw_subdiv_edge_cb;
977  foreach_context->vertex_corner = draw_subdiv_vertex_corner_cb;
978  foreach_context->vertex_edge = draw_subdiv_vertex_edge_cb;
979 }
980 
981 static void do_subdiv_traversal(DRWCacheBuildingContext *cache_building_context, Subdiv *subdiv)
982 {
983  SubdivForeachContext foreach_context;
984  draw_subdiv_foreach_callbacks(&foreach_context);
985  foreach_context.user_data = cache_building_context;
986 
988  &foreach_context,
989  cache_building_context->settings,
990  cache_building_context->coarse_mesh);
991 
992  /* Now that traversal is done, we can set up the right original indices for the
993  * subdiv-loop-to-coarse-edge map.
994  */
995  for (int i = 0; i < cache_building_context->cache->num_subdiv_loops; i++) {
996  cache_building_context->subdiv_loop_edge_index[i] =
997  cache_building_context
998  ->edge_origindex_map[cache_building_context->subdiv_loop_subdiv_edge_index[i]];
999  }
1000 }
1001 
1003 {
1007  return verts;
1008 }
1009 
1010 /* Build maps to hold enough information to tell which face is adjacent to which vertex; those will
1011  * be used for computing normals if limit surfaces are unavailable. */
1013 {
1014  /* +1 so that we do not require a special case for the last vertex, this extra offset will
1015  * contain the total number of adjacent faces. */
1017  get_origindex_format(), cache->num_subdiv_verts + 1);
1018 
1019  int *vertex_offsets = (int *)GPU_vertbuf_get_data(cache->subdiv_vertex_face_adjacency_offsets);
1020  memset(vertex_offsets, 0, sizeof(int) * cache->num_subdiv_verts + 1);
1021 
1022  for (int i = 0; i < cache->num_subdiv_loops; i++) {
1023  vertex_offsets[cache->subdiv_loop_subdiv_vert_index[i]]++;
1024  }
1025 
1026  int ofs = vertex_offsets[0];
1027  vertex_offsets[0] = 0;
1028  for (uint i = 1; i < cache->num_subdiv_verts + 1; i++) {
1029  int tmp = vertex_offsets[i];
1030  vertex_offsets[i] = ofs;
1031  ofs += tmp;
1032  }
1033 
1035  cache->num_subdiv_loops);
1036  int *adjacent_faces = (int *)GPU_vertbuf_get_data(cache->subdiv_vertex_face_adjacency);
1037  int *tmp_set_faces = static_cast<int *>(
1038  MEM_callocN(sizeof(int) * cache->num_subdiv_verts, "tmp subdiv vertex offset"));
1039 
1040  for (int i = 0; i < cache->num_subdiv_loops / 4; i++) {
1041  for (int j = 0; j < 4; j++) {
1042  const int subdiv_vertex = cache->subdiv_loop_subdiv_vert_index[i * 4 + j];
1043  int first_face_offset = vertex_offsets[subdiv_vertex] + tmp_set_faces[subdiv_vertex];
1044  adjacent_faces[first_face_offset] = i;
1045  tmp_set_faces[subdiv_vertex] += 1;
1046  }
1047  }
1048 
1049  MEM_freeN(tmp_set_faces);
1050 }
1051 
1053  Subdiv *subdiv,
1054  Mesh *mesh_eval,
1055  const SubsurfRuntimeData *runtime_data)
1056 {
1057  SubdivToMeshSettings to_mesh_settings;
1058  to_mesh_settings.resolution = runtime_data->resolution;
1059  to_mesh_settings.use_optimal_display = false;
1060 
1061  if (cache->resolution != to_mesh_settings.resolution) {
1062  /* Resolution changed, we need to rebuild, free any existing cached data. */
1063  draw_subdiv_cache_free(cache);
1064  }
1065 
1066  /* If the resolution between the cache and the settings match for some reason, check if the patch
1067  * coordinates were not already generated. Those coordinates are specific to the resolution, so
1068  * they should be null either after initialization, or after freeing if the resolution (or some
1069  * other subdivision setting) changed.
1070  */
1071  if (cache->patch_coords != nullptr) {
1072  return true;
1073  }
1074 
1075  DRWCacheBuildingContext cache_building_context;
1076  memset(&cache_building_context, 0, sizeof(DRWCacheBuildingContext));
1077  cache_building_context.coarse_mesh = mesh_eval;
1078  cache_building_context.settings = &to_mesh_settings;
1079  cache_building_context.cache = cache;
1080 
1081  do_subdiv_traversal(&cache_building_context, subdiv);
1082  if (cache->num_subdiv_loops == 0 && cache->num_subdiv_verts == 0 &&
1083  !cache->may_have_loose_geom) {
1084  /* Either the traversal failed, or we have an empty mesh, either way we cannot go any further.
1085  * The subdiv_polygon_offset cannot then be reliably stored in the cache, so free it directly.
1086  */
1088  return false;
1089  }
1090 
1091  /* Only build polygon related data if we have polygons. */
1092  if (cache->num_subdiv_loops != 0) {
1093  /* Build buffers for the PatchMap. */
1094  draw_patch_map_build(&cache->gpu_patch_map, subdiv);
1095 
1097 
1098  /* Build patch coordinates for all the face dots. */
1100  mesh_eval->totpoly);
1101  CompressedPatchCoord *blender_fdots_patch_coords = (CompressedPatchCoord *)
1103  for (int i = 0; i < mesh_eval->totpoly; i++) {
1104  const int ptex_face_index = cache->face_ptex_offset[i];
1105  if (mesh_eval->mpoly[i].totloop == 4) {
1106  /* For quads, the center coordinate of the coarse face has `u = v = 0.5`. */
1107  blender_fdots_patch_coords[i] = make_patch_coord(ptex_face_index, 0.5f, 0.5f);
1108  }
1109  else {
1110  /* For N-gons, since they are split into quads from the center, and since the center is
1111  * chosen to be the top right corner of each quad, the center coordinate of the coarse face
1112  * is any one of those top right corners with `u = v = 1.0`. */
1113  blender_fdots_patch_coords[i] = make_patch_coord(ptex_face_index, 1.0f, 1.0f);
1114  }
1115  }
1116 
1118  cache->subdiv_polygon_offset, mesh_eval->totpoly);
1119 
1121  mesh_eval->totpoly + 1);
1122 
1124  }
1125 
1126  cache->resolution = to_mesh_settings.resolution;
1127  cache->num_coarse_poly = mesh_eval->totpoly;
1128 
1129  /* To avoid floating point precision issues when evaluating patches at patch boundaries,
1130  * ensure that all loops sharing a vertex use the same patch coordinate. This could cause
1131  * the mesh to not be watertight, leading to shadowing artifacts (see T97877). */
1132  blender::Vector<int> first_loop_index(cache->num_subdiv_verts, -1);
1133 
1134  /* Save coordinates for corners, as attributes may vary for each loop connected to the same
1135  * vertex. */
1137  cache_building_context.patch_coords,
1138  sizeof(CompressedPatchCoord) * cache->num_subdiv_loops);
1139 
1140  for (int i = 0; i < cache->num_subdiv_loops; i++) {
1141  const int vertex = cache_building_context.subdiv_loop_subdiv_vert_index[i];
1142  if (first_loop_index[vertex] != -1) {
1143  continue;
1144  }
1145  first_loop_index[vertex] = i;
1146  }
1147 
1148  for (int i = 0; i < cache->num_subdiv_loops; i++) {
1149  const int vertex = cache_building_context.subdiv_loop_subdiv_vert_index[i];
1150  cache_building_context.patch_coords[i] =
1151  cache_building_context.patch_coords[first_loop_index[vertex]];
1152  }
1153 
1154  /* Cleanup. */
1155  MEM_SAFE_FREE(cache_building_context.vert_origindex_map);
1156  MEM_SAFE_FREE(cache_building_context.edge_origindex_map);
1157 
1158  return true;
1159 }
1160 
1163 /* -------------------------------------------------------------------- */
1170  /* Offsets in the buffers data where the source and destination data start. */
1173 
1174  /* Parameters for the DRWPatchMap. */
1179 
1180  /* Coarse topology information. */
1183 
1184  /* Refined topology information. */
1186 
1187  /* Subdivision settings, is int in C but bool in the GLSL code, as there, bools have the same
1188  * size as ints, so we should use int in C to ensure that the size of the structure is what GLSL
1189  * expects. */
1191 
1192  /* The sculpt mask data layer may be null. */
1194 
1195  /* Masks for the extra coarse face data. */
1201 
1202  /* Number of elements to process in the compute shader (can be the coarse quad count, or the
1203  * final vertex count, depending on which compute pass we do). This is used to early out in case
1204  * of out of bond accesses as compute dispatch are of fixed size. */
1206 
1209  int _pad3;
1210 };
1211 
1212 static_assert((sizeof(DRWSubdivUboStorage) % 16) == 0,
1213  "DRWSubdivUboStorage is not padded to a multiple of the size of vec4");
1214 
1216  DRWSubdivUboStorage *ubo,
1217  const int src_offset,
1218  const int dst_offset,
1219  const uint total_dispatch_size,
1220  const bool has_sculpt_mask)
1221 {
1222  ubo->src_offset = src_offset;
1223  ubo->dst_offset = dst_offset;
1226  ubo->max_depth = cache->gpu_patch_map.max_depth;
1228  ubo->coarse_poly_count = cache->num_coarse_poly;
1229  ubo->optimal_display = cache->optimal_display;
1230  ubo->num_subdiv_loops = cache->num_subdiv_loops;
1231  ubo->edge_loose_offset = cache->num_subdiv_loops * 2;
1232  ubo->has_sculpt_mask = has_sculpt_mask;
1238  ubo->total_dispatch_size = total_dispatch_size;
1239  ubo->is_edit_mode = cache->is_edit_mode;
1240  ubo->use_hide = cache->use_hide;
1241 }
1242 
1244  GPUShader *shader,
1245  const int src_offset,
1246  const int dst_offset,
1247  const uint total_dispatch_size,
1248  const bool has_sculpt_mask = false)
1249 {
1250  DRWSubdivUboStorage storage;
1252  cache, &storage, src_offset, dst_offset, total_dispatch_size, has_sculpt_mask);
1253 
1254  if (!cache->ubo) {
1255  const_cast<DRWSubdivCache *>(cache)->ubo = GPU_uniformbuf_create_ex(
1256  sizeof(DRWSubdivUboStorage), &storage, "DRWSubdivUboStorage");
1257  }
1258 
1259  GPU_uniformbuf_update(cache->ubo, &storage);
1260 
1261  const int binding = GPU_shader_get_uniform_block_binding(shader, "shader_data");
1262  GPU_uniformbuf_bind(cache->ubo, binding);
1263 }
1264 
1267 // --------------------------------------------------------
1268 
1269 #define SUBDIV_LOCAL_WORK_GROUP_SIZE 64
1270 static uint get_dispatch_size(uint elements)
1271 {
1272  return divide_ceil_u(elements, SUBDIV_LOCAL_WORK_GROUP_SIZE);
1273 }
1274 
1281  GPUShader *shader,
1282  const int src_offset,
1283  const int dst_offset,
1284  uint total_dispatch_size,
1285  const bool has_sculpt_mask = false)
1286 {
1287  const uint max_res_x = static_cast<uint>(GPU_max_work_group_count(0));
1288 
1289  const uint dispatch_size = get_dispatch_size(total_dispatch_size);
1290  uint dispatch_rx = dispatch_size;
1291  uint dispatch_ry = 1u;
1292  if (dispatch_rx > max_res_x) {
1293  /* Since there are some limitations with regards to the maximum work group size (could be as
1294  * low as 64k elements per call), we split the number elements into a "2d" number, with the
1295  * final index being computed as `res_x + res_y * max_work_group_size`. Even with a maximum
1296  * work group size of 64k, that still leaves us with roughly `64k * 64k = 4` billion elements
1297  * total, which should be enough. If not, we could also use the 3rd dimension. */
1298  /* TODO(fclem): We could dispatch fewer groups if we compute the prime factorization and
1299  * get the smallest rect fitting the requirements. */
1300  dispatch_rx = dispatch_ry = ceilf(sqrtf(dispatch_size));
1301  /* Avoid a completely empty dispatch line caused by rounding. */
1302  if ((dispatch_rx * (dispatch_ry - 1)) >= dispatch_size) {
1303  dispatch_ry -= 1;
1304  }
1305  }
1306 
1307  /* X and Y dimensions may have different limits so the above computation may not be right, but
1308  * even with the standard 64k minimum on all dimensions we still have a lot of room. Therefore,
1309  * we presume it all fits. */
1310  BLI_assert(dispatch_ry < static_cast<uint>(GPU_max_work_group_count(1)));
1311 
1313  cache, shader, src_offset, dst_offset, total_dispatch_size, has_sculpt_mask);
1314 
1315  GPU_compute_dispatch(shader, dispatch_rx, dispatch_ry, 1);
1316 }
1317 
1319  GPUVertBuf *pos_nor,
1320  GPUVertBuf *orco)
1321 {
1323  /* Happens on meshes with only loose geometry. */
1324  return;
1325  }
1326 
1327  Subdiv *subdiv = cache->subdiv;
1328  OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
1329 
1330  OpenSubdiv_Buffer src_buffer_interface;
1331  GPUVertBuf *src_buffer = create_buffer_and_interface(&src_buffer_interface,
1333  evaluator->wrapSrcBuffer(evaluator, &src_buffer_interface);
1334 
1335  GPUVertBuf *src_extra_buffer = nullptr;
1336  if (orco) {
1337  OpenSubdiv_Buffer src_extra_buffer_interface;
1338  src_extra_buffer = create_buffer_and_interface(&src_extra_buffer_interface,
1340  evaluator->wrapSrcVertexDataBuffer(evaluator, &src_extra_buffer_interface);
1341  }
1342 
1343  OpenSubdiv_Buffer patch_arrays_buffer_interface;
1344  GPUVertBuf *patch_arrays_buffer = create_buffer_and_interface(&patch_arrays_buffer_interface,
1346  evaluator->fillPatchArraysBuffer(evaluator, &patch_arrays_buffer_interface);
1347 
1348  OpenSubdiv_Buffer patch_index_buffer_interface;
1349  GPUVertBuf *patch_index_buffer = create_buffer_and_interface(&patch_index_buffer_interface,
1351  evaluator->wrapPatchIndexBuffer(evaluator, &patch_index_buffer_interface);
1352 
1353  OpenSubdiv_Buffer patch_param_buffer_interface;
1354  GPUVertBuf *patch_param_buffer = create_buffer_and_interface(&patch_param_buffer_interface,
1356  evaluator->wrapPatchParamBuffer(evaluator, &patch_param_buffer_interface);
1357 
1360  GPU_shader_bind(shader);
1361 
1362  int binding_point = 0;
1363  GPU_vertbuf_bind_as_ssbo(src_buffer, binding_point++);
1364  GPU_vertbuf_bind_as_ssbo(cache->gpu_patch_map.patch_map_handles, binding_point++);
1366  GPU_vertbuf_bind_as_ssbo(cache->patch_coords, binding_point++);
1367  GPU_vertbuf_bind_as_ssbo(cache->verts_orig_index, binding_point++);
1368  GPU_vertbuf_bind_as_ssbo(patch_arrays_buffer, binding_point++);
1369  GPU_vertbuf_bind_as_ssbo(patch_index_buffer, binding_point++);
1370  GPU_vertbuf_bind_as_ssbo(patch_param_buffer, binding_point++);
1371  GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1372  if (orco) {
1373  GPU_vertbuf_bind_as_ssbo(src_extra_buffer, binding_point++);
1374  GPU_vertbuf_bind_as_ssbo(orco, binding_point++);
1375  }
1376  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1377 
1378  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1379 
1380  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1381  * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1382  * needed. */
1384 
1385  /* Cleanup. */
1387 
1388  GPU_vertbuf_discard(patch_index_buffer);
1389  GPU_vertbuf_discard(patch_param_buffer);
1390  GPU_vertbuf_discard(patch_arrays_buffer);
1391  GPU_vertbuf_discard(src_buffer);
1392  GPU_VERTBUF_DISCARD_SAFE(src_extra_buffer);
1393 }
1394 
1396  GPUVertBuf *uvs,
1397  const int face_varying_channel,
1398  const int dst_offset)
1399 {
1401  /* Happens on meshes with only loose geometry. */
1402  return;
1403  }
1404 
1405  Subdiv *subdiv = cache->subdiv;
1406  OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
1407 
1408  OpenSubdiv_Buffer src_buffer_interface;
1409  GPUVertBuf *src_buffer = create_buffer_and_interface(&src_buffer_interface, get_uvs_format());
1410  evaluator->wrapFVarSrcBuffer(evaluator, face_varying_channel, &src_buffer_interface);
1411 
1412  OpenSubdiv_Buffer patch_arrays_buffer_interface;
1413  GPUVertBuf *patch_arrays_buffer = create_buffer_and_interface(&patch_arrays_buffer_interface,
1415  evaluator->fillFVarPatchArraysBuffer(
1416  evaluator, face_varying_channel, &patch_arrays_buffer_interface);
1417 
1418  OpenSubdiv_Buffer patch_index_buffer_interface;
1419  GPUVertBuf *patch_index_buffer = create_buffer_and_interface(&patch_index_buffer_interface,
1421  evaluator->wrapFVarPatchIndexBuffer(
1422  evaluator, face_varying_channel, &patch_index_buffer_interface);
1423 
1424  OpenSubdiv_Buffer patch_param_buffer_interface;
1425  GPUVertBuf *patch_param_buffer = create_buffer_and_interface(&patch_param_buffer_interface,
1427  evaluator->wrapFVarPatchParamBuffer(
1428  evaluator, face_varying_channel, &patch_param_buffer_interface);
1429 
1431  GPU_shader_bind(shader);
1432 
1433  int binding_point = 0;
1434  GPU_vertbuf_bind_as_ssbo(src_buffer, binding_point++);
1435  GPU_vertbuf_bind_as_ssbo(cache->gpu_patch_map.patch_map_handles, binding_point++);
1437  GPU_vertbuf_bind_as_ssbo(cache->corner_patch_coords, binding_point++);
1438  GPU_vertbuf_bind_as_ssbo(cache->verts_orig_index, binding_point++);
1439  GPU_vertbuf_bind_as_ssbo(patch_arrays_buffer, binding_point++);
1440  GPU_vertbuf_bind_as_ssbo(patch_index_buffer, binding_point++);
1441  GPU_vertbuf_bind_as_ssbo(patch_param_buffer, binding_point++);
1442  GPU_vertbuf_bind_as_ssbo(uvs, binding_point++);
1443  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1444 
1445  /* The buffer offset has the stride baked in (which is 2 as we have UVs) so remove the stride by
1446  * dividing by 2 */
1447  const int src_offset = src_buffer_interface.buffer_offset / 2;
1448  drw_subdiv_compute_dispatch(cache, shader, src_offset, dst_offset, cache->num_subdiv_quads);
1449 
1450  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1451  * Since it may also be used for computing UV stretches, we also need a barrier on the shader
1452  * storage. */
1454 
1455  /* Cleanup. */
1457 
1458  GPU_vertbuf_discard(patch_index_buffer);
1459  GPU_vertbuf_discard(patch_param_buffer);
1460  GPU_vertbuf_discard(patch_arrays_buffer);
1461  GPU_vertbuf_discard(src_buffer);
1462 }
1463 
1465  GPUVertBuf *src_data,
1466  GPUVertBuf *dst_data,
1467  int dimensions,
1468  int dst_offset,
1469  bool compress_to_u16)
1470 {
1471  GPUShader *shader = nullptr;
1472 
1474  /* Happens on meshes with only loose geometry. */
1475  return;
1476  }
1477 
1478  if (dimensions == 1) {
1480  "#define SUBDIV_POLYGON_OFFSET\n"
1481  "#define DIMENSIONS 1\n");
1482  }
1483  else if (dimensions == 2) {
1485  "#define SUBDIV_POLYGON_OFFSET\n"
1486  "#define DIMENSIONS 2\n");
1487  }
1488  else if (dimensions == 3) {
1490  "#define SUBDIV_POLYGON_OFFSET\n"
1491  "#define DIMENSIONS 3\n");
1492  }
1493  else if (dimensions == 4) {
1494  if (compress_to_u16) {
1496  "#define SUBDIV_POLYGON_OFFSET\n"
1497  "#define DIMENSIONS 4\n"
1498  "#define GPU_FETCH_U16_TO_FLOAT\n");
1499  }
1500  else {
1502  "#define SUBDIV_POLYGON_OFFSET\n"
1503  "#define DIMENSIONS 4\n");
1504  }
1505  }
1506  else {
1507  /* Crash if dimensions are not supported. */
1508  }
1509 
1510  GPU_shader_bind(shader);
1511 
1512  int binding_point = 0;
1513  /* subdiv_polygon_offset is always at binding point 0 for each shader using it. */
1514  GPU_vertbuf_bind_as_ssbo(cache->subdiv_polygon_offset_buffer, binding_point++);
1515  GPU_vertbuf_bind_as_ssbo(src_data, binding_point++);
1516  GPU_vertbuf_bind_as_ssbo(cache->face_ptex_offset_buffer, binding_point++);
1517  GPU_vertbuf_bind_as_ssbo(cache->corner_patch_coords, binding_point++);
1518  GPU_vertbuf_bind_as_ssbo(cache->extra_coarse_face_data, binding_point++);
1519  GPU_vertbuf_bind_as_ssbo(dst_data, binding_point++);
1520  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1521 
1522  drw_subdiv_compute_dispatch(cache, shader, 0, dst_offset, cache->num_subdiv_quads);
1523 
1524  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. Put
1525  * a barrier on the shader storage as we may use the result in another compute shader. */
1527 
1528  /* Cleanup. */
1530 }
1531 
1533  GPUVertBuf *mask_vbo,
1534  GPUVertBuf *face_set_vbo,
1535  GPUVertBuf *sculpt_data)
1536 {
1538  GPU_shader_bind(shader);
1539 
1540  /* Mask VBO is always at binding point 0. */
1541  if (mask_vbo) {
1542  GPU_vertbuf_bind_as_ssbo(mask_vbo, 0);
1543  }
1544 
1545  int binding_point = 1;
1546  GPU_vertbuf_bind_as_ssbo(face_set_vbo, binding_point++);
1547  GPU_vertbuf_bind_as_ssbo(sculpt_data, binding_point++);
1548  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1549 
1550  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads, mask_vbo != nullptr);
1551 
1552  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1554 
1555  /* Cleanup. */
1557 }
1558 
1560  GPUVertBuf *pos_nor,
1561  GPUVertBuf *face_adjacency_offsets,
1562  GPUVertBuf *face_adjacency_lists,
1563  GPUVertBuf *vertex_loop_map,
1564  GPUVertBuf *vertex_normals)
1565 {
1567  GPU_shader_bind(shader);
1568 
1569  int binding_point = 0;
1570 
1571  GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1572  GPU_vertbuf_bind_as_ssbo(face_adjacency_offsets, binding_point++);
1573  GPU_vertbuf_bind_as_ssbo(face_adjacency_lists, binding_point++);
1574  GPU_vertbuf_bind_as_ssbo(vertex_loop_map, binding_point++);
1575  GPU_vertbuf_bind_as_ssbo(vertex_normals, binding_point++);
1576  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1577 
1578  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_verts);
1579 
1580  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1581  * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1582  * needed. */
1584 
1585  /* Cleanup. */
1587 }
1588 
1590  GPUVertBuf *vertex_normals,
1591  GPUVertBuf *subdiv_loop_subdiv_vert_index,
1592  GPUVertBuf *pos_nor)
1593 {
1595  GPU_shader_bind(shader);
1596 
1597  int binding_point = 0;
1598  GPU_vertbuf_bind_as_ssbo(vertex_normals, binding_point++);
1599  GPU_vertbuf_bind_as_ssbo(subdiv_loop_subdiv_vert_index, binding_point++);
1600  GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1601  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1602 
1603  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1604 
1605  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1606  * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1607  * needed. */
1609 
1610  /* Cleanup. */
1612 }
1613 
1615  GPUVertBuf *src_custom_normals,
1616  GPUVertBuf *pos_nor)
1617 {
1618  GPUShader *shader = get_subdiv_shader(SHADER_BUFFER_NORMALS_FINALIZE, "#define CUSTOM_NORMALS");
1619  GPU_shader_bind(shader);
1620 
1621  int binding_point = 0;
1622  GPU_vertbuf_bind_as_ssbo(src_custom_normals, binding_point++);
1623  /* outputPosNor is bound at index 2 in the base shader. */
1624  binding_point = 2;
1625  GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1626  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1627 
1628  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1629 
1630  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1631  * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1632  * needed. */
1634 
1635  /* Cleanup. */
1637 }
1638 
1640  GPUIndexBuf *subdiv_tris,
1641  const int material_count)
1642 {
1644  /* Happens on meshes with only loose geometry. */
1645  return;
1646  }
1647 
1648  const bool do_single_material = material_count <= 1;
1649 
1650  const char *defines = "#define SUBDIV_POLYGON_OFFSET\n";
1651  if (do_single_material) {
1652  defines =
1653  "#define SUBDIV_POLYGON_OFFSET\n"
1654  "#define SINGLE_MATERIAL\n";
1655  }
1656 
1657  GPUShader *shader = get_subdiv_shader(
1658  do_single_material ? SHADER_BUFFER_TRIS : SHADER_BUFFER_TRIS_MULTIPLE_MATERIALS, defines);
1659  GPU_shader_bind(shader);
1660 
1661  int binding_point = 0;
1662 
1663  /* subdiv_polygon_offset is always at binding point 0 for each shader using it. */
1664  GPU_vertbuf_bind_as_ssbo(cache->subdiv_polygon_offset_buffer, binding_point++);
1665  GPU_vertbuf_bind_as_ssbo(cache->extra_coarse_face_data, binding_point++);
1666 
1667  /* Outputs */
1668  GPU_indexbuf_bind_as_ssbo(subdiv_tris, binding_point++);
1669 
1670  if (!do_single_material) {
1671  GPU_vertbuf_bind_as_ssbo(cache->polygon_mat_offset, binding_point++);
1672  }
1673 
1674  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1675 
1676  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1677 
1678  /* This generates an index buffer, so we need to put a barrier on the element array. */
1680 
1681  /* Cleanup. */
1683 }
1684 
1686  GPUVertBuf *fdots_pos,
1687  GPUVertBuf *fdots_nor,
1688  GPUIndexBuf *fdots_indices)
1689 {
1691  /* Happens on meshes with only loose geometry. */
1692  return;
1693  }
1694 
1695  Subdiv *subdiv = cache->subdiv;
1696  OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
1697 
1698  OpenSubdiv_Buffer src_buffer_interface;
1699  GPUVertBuf *src_buffer = create_buffer_and_interface(&src_buffer_interface,
1701  evaluator->wrapSrcBuffer(evaluator, &src_buffer_interface);
1702 
1703  OpenSubdiv_Buffer patch_arrays_buffer_interface;
1704  GPUVertBuf *patch_arrays_buffer = create_buffer_and_interface(&patch_arrays_buffer_interface,
1706  opensubdiv_gpu_buffer_init(&patch_arrays_buffer_interface, patch_arrays_buffer);
1707  evaluator->fillPatchArraysBuffer(evaluator, &patch_arrays_buffer_interface);
1708 
1709  OpenSubdiv_Buffer patch_index_buffer_interface;
1710  GPUVertBuf *patch_index_buffer = create_buffer_and_interface(&patch_index_buffer_interface,
1712  evaluator->wrapPatchIndexBuffer(evaluator, &patch_index_buffer_interface);
1713 
1714  OpenSubdiv_Buffer patch_param_buffer_interface;
1715  GPUVertBuf *patch_param_buffer = create_buffer_and_interface(&patch_param_buffer_interface,
1717  evaluator->wrapPatchParamBuffer(evaluator, &patch_param_buffer_interface);
1718 
1722  GPU_shader_bind(shader);
1723 
1724  int binding_point = 0;
1725  GPU_vertbuf_bind_as_ssbo(src_buffer, binding_point++);
1726  GPU_vertbuf_bind_as_ssbo(cache->gpu_patch_map.patch_map_handles, binding_point++);
1728  GPU_vertbuf_bind_as_ssbo(cache->fdots_patch_coords, binding_point++);
1729  GPU_vertbuf_bind_as_ssbo(cache->verts_orig_index, binding_point++);
1730  GPU_vertbuf_bind_as_ssbo(patch_arrays_buffer, binding_point++);
1731  GPU_vertbuf_bind_as_ssbo(patch_index_buffer, binding_point++);
1732  GPU_vertbuf_bind_as_ssbo(patch_param_buffer, binding_point++);
1733  GPU_vertbuf_bind_as_ssbo(fdots_pos, binding_point++);
1734  /* F-dots normals may not be requested, still reserve the binding point. */
1735  if (fdots_nor) {
1736  GPU_vertbuf_bind_as_ssbo(fdots_nor, binding_point);
1737  }
1738  binding_point++;
1739  GPU_indexbuf_bind_as_ssbo(fdots_indices, binding_point++);
1740  GPU_vertbuf_bind_as_ssbo(cache->extra_coarse_face_data, binding_point++);
1741  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1742 
1743  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_coarse_poly);
1744 
1745  /* This generates two vertex buffers and an index buffer, so we need to put a barrier on the
1746  * vertex attributes and element arrays. */
1748 
1749  /* Cleanup. */
1751 
1752  GPU_vertbuf_discard(patch_index_buffer);
1753  GPU_vertbuf_discard(patch_param_buffer);
1754  GPU_vertbuf_discard(patch_arrays_buffer);
1755  GPU_vertbuf_discard(src_buffer);
1756 }
1757 
1759 {
1760  GPUShader *shader = get_subdiv_shader(SHADER_BUFFER_LINES, "#define SUBDIV_POLYGON_OFFSET\n");
1761  GPU_shader_bind(shader);
1762 
1763  int binding_point = 0;
1764  GPU_vertbuf_bind_as_ssbo(cache->subdiv_polygon_offset_buffer, binding_point++);
1765  GPU_vertbuf_bind_as_ssbo(cache->edges_orig_index, binding_point++);
1766  GPU_vertbuf_bind_as_ssbo(cache->extra_coarse_face_data, binding_point++);
1767  GPU_indexbuf_bind_as_ssbo(lines_indices, binding_point++);
1768  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1769 
1770  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1771 
1772  /* This generates an index buffer, so we need to put a barrier on the element array. */
1774 
1775  /* Cleanup. */
1777 }
1778 
1780  GPUIndexBuf *lines_indices,
1781  GPUVertBuf *lines_flags,
1782  uint num_loose_edges)
1783 {
1784  GPUShader *shader = get_subdiv_shader(SHADER_BUFFER_LINES_LOOSE, "#define LINES_LOOSE\n");
1785  GPU_shader_bind(shader);
1786 
1787  GPU_indexbuf_bind_as_ssbo(lines_indices, 3);
1788  GPU_vertbuf_bind_as_ssbo(lines_flags, 4);
1789 
1790  drw_subdiv_compute_dispatch(cache, shader, 0, 0, num_loose_edges);
1791 
1792  /* This generates an index buffer, so we need to put a barrier on the element array. */
1794 
1795  /* Cleanup. */
1797 }
1798 
1800  GPUVertBuf *pos_nor,
1801  GPUVertBuf *edge_idx,
1802  GPUVertBuf *edge_fac)
1803 {
1804  /* No separate shader for the AMD driver case as we assume that the GPU will not change during
1805  * the execution of the program. */
1806  const char *defines = GPU_crappy_amd_driver() ? "#define GPU_AMD_DRIVER_BYTE_BUG\n" : nullptr;
1808  GPU_shader_bind(shader);
1809 
1810  int binding_point = 0;
1811  GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1812  GPU_vertbuf_bind_as_ssbo(edge_idx, binding_point++);
1813  GPU_vertbuf_bind_as_ssbo(edge_fac, binding_point++);
1814  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1815 
1816  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1817 
1818  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1820 
1821  /* Cleanup. */
1823 }
1824 
1826  GPUVertBuf *pos_nor,
1827  GPUVertBuf *lnor)
1828 {
1830  /* Happens on meshes with only loose geometry. */
1831  return;
1832  }
1833 
1834  GPUShader *shader = get_subdiv_shader(SHADER_BUFFER_LNOR, "#define SUBDIV_POLYGON_OFFSET\n");
1835  GPU_shader_bind(shader);
1836 
1837  int binding_point = 0;
1838  /* Inputs */
1839  /* subdiv_polygon_offset is always at binding point 0 for each shader using it. */
1840  GPU_vertbuf_bind_as_ssbo(cache->subdiv_polygon_offset_buffer, binding_point++);
1841  GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1842  GPU_vertbuf_bind_as_ssbo(cache->extra_coarse_face_data, binding_point++);
1843  GPU_vertbuf_bind_as_ssbo(cache->verts_orig_index, binding_point++);
1844 
1845  /* Outputs */
1846  GPU_vertbuf_bind_as_ssbo(lnor, binding_point++);
1847  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1848 
1849  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1850 
1851  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1853 
1854  /* Cleanup. */
1856 }
1857 
1859  GPUVertBuf *coarse_data,
1860  GPUVertBuf *subdiv_data)
1861 {
1863  "#define SUBDIV_POLYGON_OFFSET\n");
1864  GPU_shader_bind(shader);
1865 
1866  int binding_point = 0;
1867  /* Inputs */
1868  /* subdiv_polygon_offset is always at binding point 0 for each shader using it. */
1869  GPU_vertbuf_bind_as_ssbo(cache->subdiv_polygon_offset_buffer, binding_point++);
1870  GPU_vertbuf_bind_as_ssbo(coarse_data, binding_point++);
1871 
1872  /* Outputs */
1873  GPU_vertbuf_bind_as_ssbo(subdiv_data, binding_point++);
1874  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1875 
1876  drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
1877 
1878  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1880 
1881  /* Cleanup. */
1883 }
1884 
1886  GPUVertBuf *pos_nor,
1887  GPUVertBuf *uvs,
1888  int uvs_offset,
1889  GPUVertBuf *stretch_angles)
1890 {
1892  GPU_shader_bind(shader);
1893 
1894  int binding_point = 0;
1895  /* Inputs */
1896  GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1897  GPU_vertbuf_bind_as_ssbo(uvs, binding_point++);
1898 
1899  /* Outputs */
1900  GPU_vertbuf_bind_as_ssbo(stretch_angles, binding_point++);
1901  BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1902 
1903  drw_subdiv_compute_dispatch(cache, shader, uvs_offset, 0, cache->num_subdiv_quads);
1904 
1905  /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1907 
1908  /* Cleanup. */
1910 }
1911 
1912 /* -------------------------------------------------------------------- */
1913 
1948  Mesh *mesh_eval,
1949  uint mat_len)
1950 {
1952 
1953  const int number_of_quads = cache->num_subdiv_loops / 4;
1954 
1955  if (mat_len == 1) {
1956  cache->mat_start = static_cast<int *>(MEM_callocN(sizeof(int), "subdiv mat_end"));
1957  cache->mat_end = static_cast<int *>(MEM_callocN(sizeof(int), "subdiv mat_end"));
1958  cache->mat_start[0] = 0;
1959  cache->mat_end[0] = number_of_quads;
1960  return;
1961  }
1962 
1963  /* Count number of subdivided polygons for each material. */
1964  int *mat_start = static_cast<int *>(MEM_callocN(sizeof(int) * mat_len, "subdiv mat_start"));
1965  int *subdiv_polygon_offset = cache->subdiv_polygon_offset;
1966 
1967  /* TODO: parallel_reduce? */
1968  for (int i = 0; i < mesh_eval->totpoly; i++) {
1969  const MPoly *mpoly = &mesh_eval->mpoly[i];
1970  const int next_offset = (i == mesh_eval->totpoly - 1) ? number_of_quads :
1971  subdiv_polygon_offset[i + 1];
1972  const int quad_count = next_offset - subdiv_polygon_offset[i];
1973  const int mat_index = mpoly->mat_nr;
1974  mat_start[mat_index] += quad_count;
1975  }
1976 
1977  /* Accumulate offsets. */
1978  int ofs = mat_start[0];
1979  mat_start[0] = 0;
1980  for (uint i = 1; i < mat_len; i++) {
1981  int tmp = mat_start[i];
1982  mat_start[i] = ofs;
1983  ofs += tmp;
1984  }
1985 
1986  /* Compute per polygon offsets. */
1987  int *mat_end = static_cast<int *>(MEM_dupallocN(mat_start));
1988  int *per_polygon_mat_offset = static_cast<int *>(
1989  MEM_mallocN(sizeof(int) * mesh_eval->totpoly, "per_polygon_mat_offset"));
1990 
1991  for (int i = 0; i < mesh_eval->totpoly; i++) {
1992  const MPoly *mpoly = &mesh_eval->mpoly[i];
1993  const int mat_index = mpoly->mat_nr;
1994  const int single_material_index = subdiv_polygon_offset[i];
1995  const int material_offset = mat_end[mat_index];
1996  const int next_offset = (i == mesh_eval->totpoly - 1) ? number_of_quads :
1997  subdiv_polygon_offset[i + 1];
1998  const int quad_count = next_offset - subdiv_polygon_offset[i];
1999  mat_end[mat_index] += quad_count;
2000 
2001  per_polygon_mat_offset[i] = material_offset - single_material_index;
2002  }
2003 
2004  cache->polygon_mat_offset = draw_subdiv_build_origindex_buffer(per_polygon_mat_offset,
2005  mesh_eval->totpoly);
2006  cache->mat_start = mat_start;
2007  cache->mat_end = mat_end;
2008 
2009  MEM_freeN(per_polygon_mat_offset);
2010 }
2011 
2013  Mesh *mesh,
2014  MeshBatchCache *batch_cache,
2015  MeshBufferCache *mbc,
2016  const bool is_editmode,
2017  const bool is_paint_mode,
2018  const bool is_mode_active,
2019  const float obmat[4][4],
2020  const bool do_final,
2021  const bool do_uvedit,
2022  const bool do_cage,
2023  const ToolSettings *ts,
2024  const bool use_hide,
2025  OpenSubdiv_EvaluatorCache *evaluator_cache)
2026 {
2028  BLI_assert(runtime_data && runtime_data->has_gpu_subdiv);
2029 
2030  if (runtime_data->settings.level == 0) {
2031  return false;
2032  }
2033 
2034  Mesh *mesh_eval = mesh;
2035  BMesh *bm = nullptr;
2036  if (mesh->edit_mesh) {
2037  mesh_eval = BKE_object_get_editmesh_eval_final(ob);
2038  bm = mesh->edit_mesh->bm;
2039  }
2040 
2041  Subdiv *subdiv = BKE_subsurf_modifier_subdiv_descriptor_ensure(runtime_data, mesh_eval, true);
2042  if (!subdiv) {
2043  return false;
2044  }
2045 
2046  draw_subdiv_invalidate_evaluator_for_orco(subdiv, mesh_eval);
2047 
2049  subdiv, mesh_eval, nullptr, SUBDIV_EVALUATOR_TYPE_GPU, evaluator_cache)) {
2050  /* This could happen in two situations:
2051  * - OpenSubdiv is disabled.
2052  * - Something totally bad happened, and OpenSubdiv rejected our
2053  * topology.
2054  * In either way, we can't safely continue. However, we still have to handle potential loose
2055  * geometry, which is done separately. */
2056  if (mesh_eval->totpoly) {
2057  return false;
2058  }
2059  }
2060 
2061  DRWSubdivCache *draw_cache = mesh_batch_cache_ensure_subdiv_cache(batch_cache);
2062  if (!draw_subdiv_build_cache(draw_cache, subdiv, mesh_eval, runtime_data)) {
2063  return false;
2064  }
2065 
2066  /* Edges which do not come from coarse edges should not be drawn in edit cage mode. */
2067  const bool optimal_display = runtime_data->use_optimal_display || (is_editmode && !do_cage);
2068 
2069  draw_cache->bm = bm;
2070  draw_cache->mesh = mesh_eval;
2071  draw_cache->subdiv = subdiv;
2072  draw_cache->optimal_display = optimal_display;
2074 
2075  /* Copy topology information for stats display. */
2076  runtime_data->stats_totvert = draw_cache->num_subdiv_verts;
2077  runtime_data->stats_totedge = draw_cache->num_subdiv_edges;
2078  runtime_data->stats_totpoly = draw_cache->num_subdiv_quads;
2079  runtime_data->stats_totloop = draw_cache->num_subdiv_loops;
2080 
2081  draw_cache->use_custom_loop_normals = (runtime_data->use_loop_normals) &&
2082  (mesh_eval->flag & ME_AUTOSMOOTH) &&
2083  CustomData_has_layer(&mesh_eval->ldata,
2085 
2086  if (DRW_ibo_requested(mbc->buff.ibo.tris)) {
2087  draw_subdiv_cache_ensure_mat_offsets(draw_cache, mesh_eval, batch_cache->mat_len);
2088  }
2089 
2091  ob, mesh, is_editmode, is_paint_mode, is_mode_active, obmat, do_final, do_uvedit, ts);
2092  mr->use_hide = use_hide;
2093  draw_cache->use_hide = use_hide;
2094 
2095  /* Used for setting loop normals flags. Mapped extraction is only used during edit mode.
2096  * See comments in #extract_lnor_iter_poly_mesh.
2097  */
2098  draw_cache->is_edit_mode = mr->edit_bmesh != nullptr;
2099 
2100  draw_subdiv_cache_update_extra_coarse_face_data(draw_cache, mesh_eval, mr);
2101 
2102  blender::draw::mesh_buffer_cache_create_requested_subdiv(batch_cache, mbc, draw_cache, mr);
2103 
2105 
2106  return true;
2107 }
2108 
2110 {
2111  const int coarse_loose_vert_len = cache->loose_geom.vert_len;
2112  const int coarse_loose_edge_len = cache->loose_geom.edge_len;
2113 
2114  if (coarse_loose_vert_len == 0 && coarse_loose_edge_len == 0) {
2115  /* Nothing to do. */
2116  return;
2117  }
2118 
2119  if (subdiv_cache->loose_geom.edges || subdiv_cache->loose_geom.verts) {
2120  /* Already processed. */
2121  return;
2122  }
2123 
2124  const Mesh *coarse_mesh = subdiv_cache->mesh;
2125  const bool is_simple = subdiv_cache->subdiv->settings.is_simple;
2126  const int resolution = subdiv_cache->resolution;
2127  const int resolution_1 = resolution - 1;
2128  const float inv_resolution_1 = 1.0f / (float)resolution_1;
2129  const int num_subdiv_vertices_per_coarse_edge = resolution - 2;
2130 
2131  const int num_subdivided_edge = coarse_loose_edge_len *
2132  (num_subdiv_vertices_per_coarse_edge + 1);
2133 
2134  /* Each edge will store data for its 2 verts, that way we can keep the overall logic simple, here
2135  * and in the buffer extractors. Although it duplicates memory (and work), the buffers also store
2136  * duplicate values. */
2137  const int num_subdivided_verts = num_subdivided_edge * 2;
2138 
2139  DRWSubdivLooseEdge *loose_subd_edges = static_cast<DRWSubdivLooseEdge *>(
2140  MEM_callocN(sizeof(DRWSubdivLooseEdge) * num_subdivided_edge, "DRWSubdivLooseEdge"));
2141 
2142  DRWSubdivLooseVertex *loose_subd_verts = static_cast<DRWSubdivLooseVertex *>(
2143  MEM_callocN(sizeof(DRWSubdivLooseVertex) * (num_subdivided_verts + coarse_loose_vert_len),
2144  "DRWSubdivLooseEdge"));
2145 
2146  int subd_edge_offset = 0;
2147  int subd_vert_offset = 0;
2148 
2149  /* Subdivide each loose coarse edge. */
2150  for (int i = 0; i < coarse_loose_edge_len; i++) {
2151  const int coarse_edge_index = cache->loose_geom.edges[i];
2152  const MEdge *coarse_edge = &coarse_mesh->medge[cache->loose_geom.edges[i]];
2153 
2154  /* Perform interpolation of each vertex. */
2155  for (int i = 0; i < resolution - 1; i++, subd_edge_offset++) {
2156  DRWSubdivLooseEdge &subd_edge = loose_subd_edges[subd_edge_offset];
2157  subd_edge.coarse_edge_index = coarse_edge_index;
2158 
2159  /* First vert. */
2160  DRWSubdivLooseVertex &subd_v1 = loose_subd_verts[subd_vert_offset];
2161  subd_v1.coarse_vertex_index = (i == 0) ? coarse_edge->v1 : -1u;
2162  const float u1 = i * inv_resolution_1;
2164  coarse_mesh, coarse_edge, is_simple, u1, subd_v1.co);
2165 
2166  subd_edge.loose_subdiv_v1_index = subd_vert_offset++;
2167 
2168  /* Second vert. */
2169  DRWSubdivLooseVertex &subd_v2 = loose_subd_verts[subd_vert_offset];
2170  subd_v2.coarse_vertex_index = ((i + 1) == resolution - 1) ? coarse_edge->v2 : -1u;
2171  const float u2 = (i + 1) * inv_resolution_1;
2173  coarse_mesh, coarse_edge, is_simple, u2, subd_v2.co);
2174 
2175  subd_edge.loose_subdiv_v2_index = subd_vert_offset++;
2176  }
2177  }
2178 
2179  /* Copy the remaining loose_verts. */
2180  for (int i = 0; i < coarse_loose_vert_len; i++) {
2181  const int coarse_vertex_index = cache->loose_geom.verts[i];
2182  const MVert &coarse_vertex = coarse_mesh->mvert[coarse_vertex_index];
2183 
2184  DRWSubdivLooseVertex &subd_v = loose_subd_verts[subd_vert_offset++];
2185  subd_v.coarse_vertex_index = cache->loose_geom.verts[i];
2186  copy_v3_v3(subd_v.co, coarse_vertex.co);
2187  }
2188 
2189  subdiv_cache->loose_geom.edges = loose_subd_edges;
2190  subdiv_cache->loose_geom.verts = loose_subd_verts;
2191  subdiv_cache->loose_geom.edge_len = num_subdivided_edge;
2192  subdiv_cache->loose_geom.vert_len = coarse_loose_vert_len;
2193  subdiv_cache->loose_geom.loop_len = num_subdivided_edge * 2 + coarse_loose_vert_len;
2194 }
2195 
2197 {
2198  return {cache->loose_geom.edges, static_cast<int64_t>(cache->loose_geom.edge_len)};
2199 }
2200 
2202 {
2203  return {cache->loose_geom.verts + cache->loose_geom.edge_len * 2,
2204  static_cast<int64_t>(cache->loose_geom.vert_len)};
2205 }
2206 
2208 
2210  Mesh *mesh,
2211  MeshBatchCache *batch_cache,
2212  MeshBufferCache *mbc,
2213  const bool is_editmode,
2214  const bool is_paint_mode,
2215  const bool is_mode_active,
2216  const float obmat[4][4],
2217  const bool do_final,
2218  const bool do_uvedit,
2219  const bool do_cage,
2220  const ToolSettings *ts,
2221  const bool use_hide)
2222 {
2223  if (g_evaluator_cache == nullptr) {
2225  }
2226 
2227 #undef TIME_SUBDIV
2228 
2229 #ifdef TIME_SUBDIV
2230  const double begin_time = PIL_check_seconds_timer();
2231 #endif
2232 
2234  mesh,
2235  batch_cache,
2236  mbc,
2237  is_editmode,
2238  is_paint_mode,
2239  is_mode_active,
2240  obmat,
2241  do_final,
2242  do_uvedit,
2243  do_cage,
2244  ts,
2245  use_hide,
2246  g_evaluator_cache)) {
2247  return;
2248  }
2249 
2250 #ifdef TIME_SUBDIV
2251  const double end_time = PIL_check_seconds_timer();
2252  fprintf(stderr, "Time to update subdivision: %f\n", end_time - begin_time);
2253  fprintf(stderr, "Maximum FPS: %f\n", 1.0 / (end_time - begin_time));
2254 #endif
2255 }
2256 
2258 {
2259  for (int i = 0; i < NUM_SHADERS; ++i) {
2261  }
2262 
2264 
2265  if (g_evaluator_cache) {
2267  g_evaluator_cache = nullptr;
2268  }
2269 }
2270 
2273 
2275 {
2279 }
2280 
2282 {
2283  if (gpu_subdiv_free_queue == nullptr) {
2284  return;
2285  }
2286 
2288 
2289  while (gpu_subdiv_free_queue != nullptr) {
2290  Subdiv *subdiv = static_cast<Subdiv *>(BLI_linklist_pop(&gpu_subdiv_free_queue));
2291  /* Set the type to CPU so that we do actually free the cache. */
2293  BKE_subdiv_free(subdiv);
2294  }
2295 
2297 }
typedef float(TangentPoint)[2]
bool CustomData_has_layer(const struct CustomData *data, int type)
#define ORIGINDEX_NONE
void * CustomData_get_layer(const struct CustomData *data, int type)
General operations, lookup, etc. for blender objects.
struct Mesh * BKE_object_get_editmesh_eval_final(const struct Object *object)
void BKE_subdiv_free(Subdiv *subdiv)
Definition: subdiv.c:184
int * BKE_subdiv_face_ptex_offset_get(Subdiv *subdiv)
Definition: subdiv.c:209
@ SUBDIV_EVALUATOR_TYPE_GPU
bool BKE_subdiv_eval_begin_from_mesh(struct Subdiv *subdiv, const struct Mesh *mesh, const float(*coarse_vertex_cos)[3], eSubdivEvaluatorType evaluator_type, struct OpenSubdiv_EvaluatorCache *evaluator_cache)
bool BKE_subdiv_foreach_subdiv_geometry(struct Subdiv *subdiv, const struct SubdivForeachContext *context, const struct SubdivToMeshSettings *mesh_settings, const struct Mesh *coarse_mesh)
void BKE_subdiv_mesh_interpolate_position_on_edge(const struct Mesh *coarse_mesh, const struct MEdge *coarse_edge, bool is_simple, float u, float pos_r[3])
#define MAX_GPU_SUBDIV_SSBOS
struct Subdiv * BKE_subsurf_modifier_subdiv_descriptor_ensure(struct SubsurfRuntimeData *runtime_data, const struct Mesh *mesh, bool for_draw_code)
#define BLI_assert(a)
Definition: BLI_assert.h:46
MINLINE uint divide_ceil_u(uint a, uint b)
#define MINLINE
MINLINE void copy_v3_v3(float r[3], const float a[3])
unsigned int uint
Definition: BLI_sys_types.h:67
#define BLI_MUTEX_INITIALIZER
Definition: BLI_threads.h:83
void BLI_mutex_lock(ThreadMutex *mutex)
Definition: threads.cc:373
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition: threads.cc:378
pthread_mutex_t ThreadMutex
Definition: BLI_threads.h:82
#define UNUSED(x)
#define ELEM(...)
@ CD_CUSTOMLOOPNORMAL
@ CD_ORIGINDEX
@ ME_AUTOSMOOTH
@ ME_HIDE
@ ME_SMOOTH
@ ME_FACE_SEL
Object is a sort of wrapper for general info.
int GPU_max_work_group_count(int index)
bool GPU_crappy_amd_driver(void)
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len)
Definition: gpu_compute.cc:11
struct GPUIndexBuf GPUIndexBuf
void GPU_indexbuf_bind_as_ssbo(GPUIndexBuf *elem, int binding)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble u2
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble u1
void GPU_shader_unbind(void)
Definition: gpu_shader.cc:513
struct GPUShader GPUShader
Definition: GPU_shader.h:20
int GPU_shader_get_uniform_block_binding(GPUShader *shader, const char *name)
Definition: gpu_shader.cc:592
void GPU_shader_bind(GPUShader *shader)
Definition: gpu_shader.cc:491
GPUShader * GPU_shader_create_compute(const char *computecode, const char *libcode, const char *defines, const char *shname)
Definition: gpu_shader.cc:230
void GPU_shader_free(GPUShader *shader)
Definition: gpu_shader.cc:200
void GPU_memory_barrier(eGPUBarrier barrier)
Definition: gpu_state.cc:371
@ GPU_BARRIER_SHADER_STORAGE
Definition: GPU_state.h:29
@ GPU_BARRIER_ELEMENT_ARRAY
Definition: GPU_state.h:33
@ GPU_BARRIER_VERTEX_ATTRIB_ARRAY
Definition: GPU_state.h:32
GPUUniformBuf * GPU_uniformbuf_create_ex(size_t size, const void *data, const char *name)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_free(GPUUniformBuf *ubo)
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
void GPU_vertbuf_wrap_handle(GPUVertBuf *verts, uint64_t handle)
void GPU_vertbuf_discard(GPUVertBuf *)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_update_sub(GPUVertBuf *verts, uint start, uint len, const void *data)
GPUVertBuf * GPU_vertbuf_calloc(void)
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *, const GPUVertFormat *, GPUUsageType)
void GPU_vertbuf_bind_as_ssbo(struct GPUVertBuf *verts, int binding)
#define GPU_vertbuf_init_with_format(verts, format)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_use(GPUVertBuf *)
void GPU_vertbuf_tag_dirty(GPUVertBuf *verts)
@ GPU_USAGE_STATIC
@ GPU_USAGE_DYNAMIC
@ GPU_USAGE_DEVICE_ONLY
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias)
@ GPU_COMP_F32
@ GPU_COMP_I32
@ GPU_COMP_U32
#define MEM_SAFE_FREE(v)
Platform independent time functions.
@ BM_ELEM_HIDDEN
Definition: bmesh_class.h:472
@ BM_ELEM_SELECT
Definition: bmesh_class.h:471
@ BM_ELEM_SMOOTH
Definition: bmesh_class.h:477
#define BM_elem_index_get(ele)
Definition: bmesh_inline.h:110
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:12
#define BM_ITER_MESH(ele, iter, bm, itype)
@ BM_FACES_OF_MESH
ATTR_WARN_UNUSED_RESULT BMesh * bm
ATTR_WARN_UNUSED_RESULT const BMVert * v
void mesh_render_data_free(MeshRenderData *mr)
MeshRenderData * mesh_render_data_create(Object *object, Mesh *me, const bool is_editmode, const bool is_paint_mode, const bool is_mode_active, const float obmat[4][4], const bool do_final, const bool do_uvedit, const ToolSettings *ts)
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN
char datatoc_common_subdiv_patch_evaluation_comp_glsl[]
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK
void draw_subdiv_build_lines_buffer(const DRWSubdivCache *cache, GPUIndexBuf *lines_indices)
void DRW_subdivide_loose_geom(DRWSubdivCache *subdiv_cache, MeshBufferCache *cache)
static void vertbuf_bind_gpu(const OpenSubdiv_Buffer *buffer)
char datatoc_common_subdiv_lib_glsl[]
static void vertbuf_update_data(const OpenSubdiv_Buffer *interface, uint start, uint len, const void *data)
static bool draw_subdiv_cache_need_polygon_data(const DRWSubdivCache *cache)
static bool draw_subdiv_topology_info_cb(const SubdivForeachContext *foreach_context, const int num_vertices, const int num_edges, const int num_loops, const int num_polygons, const int *subdiv_polygon_offset)
MINLINE CompressedPatchCoord make_patch_coord(int ptex_face_index, float u, float v)
static void do_subdiv_traversal(DRWCacheBuildingContext *cache_building_context, Subdiv *subdiv)
static LinkNode * gpu_subdiv_free_queue
static GPUVertFormat * get_patch_index_format()
static GPUVertBuf * create_buffer_and_interface(OpenSubdiv_Buffer *interface, GPUVertFormat *format)
blender::Span< DRWSubdivLooseVertex > draw_subdiv_cache_get_loose_verts(const DRWSubdivCache *cache)
static GPUVertBuf * gpu_vertbuf_create_from_format(GPUVertFormat *format, uint len)
char datatoc_common_subdiv_ibo_lines_comp_glsl[]
char datatoc_common_subdiv_ibo_tris_comp_glsl[]
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK
static GPUShader * get_subdiv_shader(int shader_type, const char *defines)
static ThreadMutex gpu_subdiv_queue_mutex
void draw_subdiv_build_edituv_stretch_area_buffer(const DRWSubdivCache *cache, GPUVertBuf *coarse_data, GPUVertBuf *subdiv_data)
char datatoc_common_subdiv_vbo_sculpt_data_comp_glsl[]
static GPUVertFormat * get_patch_handle_format()
#define SUBDIV_COARSE_FACE_FLAG_OFFSET
static DRWSubdivCache * mesh_batch_cache_ensure_subdiv_cache(MeshBatchCache *mbc)
static uint tris_count_from_number_of_loops(const uint number_of_loops)
static void draw_patch_map_build(DRWPatchMap *gpu_patch_map, Subdiv *subdiv)
void draw_subdiv_init_origindex_buffer(GPUVertBuf *buffer, int32_t *vert_origindex, uint num_loops, uint loose_len)
static GPUShader * get_patch_evaluation_shader(int shader_type)
void draw_subdiv_build_edge_fac_buffer(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *edge_idx, GPUVertBuf *edge_fac)
void draw_subdiv_build_lnor_buffer(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *lnor)
static void draw_subdiv_edge_cb(const SubdivForeachContext *foreach_context, void *UNUSED(tls), const int coarse_edge_index, const int subdiv_edge_index, const bool UNUSED(is_loose), const int UNUSED(subdiv_v1), const int UNUSED(subdiv_v2))
static void vertbuf_device_alloc(const OpenSubdiv_Buffer *interface, const uint len)
static const char * get_shader_code(int shader_type)
void draw_subdiv_build_edituv_stretch_angle_buffer(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *uvs, int uvs_offset, GPUVertBuf *stretch_angles)
char datatoc_common_subdiv_normals_finalize_comp_glsl[]
static void draw_subdiv_invalidate_evaluator_for_orco(Subdiv *subdiv, Mesh *mesh)
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK
static void draw_subdiv_cache_extra_coarse_face_data_mapped(Mesh *mesh, BMesh *bm, MeshRenderData *mr, uint32_t *flags_data)
void DRW_subdiv_free()
static void draw_subdiv_vertex_corner_cb(const SubdivForeachContext *foreach_context, void *UNUSED(tls), const int UNUSED(ptex_face_index), const float UNUSED(u), const float UNUSED(v), const int coarse_vertex_index, const int UNUSED(coarse_poly_index), const int UNUSED(coarse_corner), const int subdiv_vertex_index)
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH
static void draw_subdiv_vertex_edge_cb(const SubdivForeachContext *UNUSED(foreach_context), void *UNUSED(tls_v), const int UNUSED(ptex_face_index), const float UNUSED(u), const float UNUSED(v), const int UNUSED(coarse_edge_index), const int UNUSED(coarse_poly_index), const int UNUSED(coarse_corner), const int UNUSED(subdiv_vertex_index))
void draw_subdiv_accumulate_normals(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *face_adjacency_offsets, GPUVertBuf *face_adjacency_lists, GPUVertBuf *vertex_loop_map, GPUVertBuf *vertex_normals)
static void draw_subdiv_cache_extra_coarse_face_data_bm(BMesh *bm, BMFace *efa_act, uint32_t *flags_data)
void DRW_subdiv_cache_free(Subdiv *subdiv)
char datatoc_common_subdiv_vbo_lnor_comp_glsl[]
static void * vertbuf_alloc(const OpenSubdiv_Buffer *interface, const uint len)
static void draw_subdiv_free_edit_mode_cache(DRWSubdivCache *cache)
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE
#define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK
static bool draw_subdiv_build_cache(DRWSubdivCache *cache, Subdiv *subdiv, Mesh *mesh_eval, const SubsurfRuntimeData *runtime_data)
void draw_subdiv_build_sculpt_data_buffer(const DRWSubdivCache *cache, GPUVertBuf *mask_vbo, GPUVertBuf *face_set_vbo, GPUVertBuf *sculpt_data)
static void draw_subdiv_cache_free_material_data(DRWSubdivCache *cache)
@ SHADER_BUFFER_LINES_LOOSE
@ SHADER_BUFFER_SCULPT_DATA
@ SHADER_BUFFER_TRIS_MULTIPLE_MATERIALS
@ SHADER_COMP_CUSTOM_DATA_INTERP_2D
@ SHADER_COMP_CUSTOM_DATA_INTERP_3D
@ SHADER_COMP_CUSTOM_DATA_INTERP_1D
@ SHADER_BUFFER_UV_STRETCH_AREA
@ SHADER_PATCH_EVALUATION_FACE_DOTS_WITH_NORMALS
@ SHADER_PATCH_EVALUATION_ORCO
@ SHADER_BUFFER_NORMALS_FINALIZE
@ SHADER_PATCH_EVALUATION_FVAR
@ SHADER_PATCH_EVALUATION_FACE_DOTS
@ SHADER_BUFFER_UV_STRETCH_ANGLE
@ SHADER_BUFFER_NORMALS_ACCUMULATE
@ SHADER_COMP_CUSTOM_DATA_INTERP_4D
void draw_subdiv_interp_custom_data(const DRWSubdivCache *cache, GPUVertBuf *src_data, GPUVertBuf *dst_data, int dimensions, int dst_offset, bool compress_to_u16)
static void build_vertex_face_adjacency_maps(DRWSubdivCache *cache)
static GPUVertFormat * get_patch_param_format()
static void draw_subdiv_cache_extra_coarse_face_data_mesh(Mesh *mesh, uint32_t *flags_data)
void draw_subdiv_build_tris_buffer(const DRWSubdivCache *cache, GPUIndexBuf *subdiv_tris, const int material_count)
void draw_subdiv_build_lines_loose_buffer(const DRWSubdivCache *cache, GPUIndexBuf *lines_indices, GPUVertBuf *lines_flags, uint num_loose_edges)
void draw_subdiv_finalize_custom_normals(const DRWSubdivCache *cache, GPUVertBuf *src_custom_normals, GPUVertBuf *pos_nor)
char datatoc_common_subdiv_custom_data_interp_comp_glsl[]
GPUVertFormat * draw_subdiv_get_pos_nor_format()
static void draw_subdiv_init_ubo_storage(const DRWSubdivCache *cache, DRWSubdivUboStorage *ubo, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask)
static uint32_t compute_coarse_face_flag(BMFace *f, BMFace *efa_act)
static void draw_patch_map_free(DRWPatchMap *gpu_patch_map)
void DRW_create_subdivision(Object *ob, Mesh *mesh, MeshBatchCache *batch_cache, MeshBufferCache *mbc, const bool is_editmode, const bool is_paint_mode, const bool is_mode_active, const float obmat[4][4], const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
char datatoc_common_subdiv_vbo_edituv_strech_area_comp_glsl[]
static void drw_subdiv_compute_dispatch(const DRWSubdivCache *cache, GPUShader *shader, const int src_offset, const int dst_offset, uint total_dispatch_size, const bool has_sculpt_mask=false)
static GPUVertFormat * get_blender_patch_coords_format()
#define SUBDIV_LOCAL_WORK_GROUP_SIZE
char datatoc_common_subdiv_normals_accumulate_comp_glsl[]
static uint get_dispatch_size(uint elements)
static const char * get_shader_name(int shader_type)
static GPUVertFormat * get_uvs_format()
char datatoc_common_subdiv_vbo_edge_fac_comp_glsl[]
static void draw_subdiv_loop_cb(const SubdivForeachContext *foreach_context, void *UNUSED(tls_v), const int ptex_face_index, const float u, const float v, const int UNUSED(coarse_loop_index), const int coarse_poly_index, const int UNUSED(coarse_corner), const int subdiv_loop_index, const int subdiv_vertex_index, const int subdiv_edge_index)
static OpenSubdiv_EvaluatorCache * g_evaluator_cache
static void draw_subdiv_cache_ensure_mat_offsets(DRWSubdivCache *cache, Mesh *mesh_eval, uint mat_len)
void draw_subdiv_extract_uvs(const DRWSubdivCache *cache, GPUVertBuf *uvs, const int face_varying_channel, const int dst_offset)
void draw_subdiv_cache_free(DRWSubdivCache *cache)
static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache *cache, Mesh *mesh, MeshRenderData *mr)
void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *orco)
GPUVertBuf * draw_subdiv_build_origindex_buffer(int *vert_origindex, uint num_loops)
void draw_subdiv_build_fdots_buffers(const DRWSubdivCache *cache, GPUVertBuf *fdots_pos, GPUVertBuf *fdots_nor, GPUIndexBuf *fdots_indices)
static GPUVertFormat * get_quadtree_format()
void draw_subdiv_finalize_normals(const DRWSubdivCache *cache, GPUVertBuf *vertex_normals, GPUVertBuf *subdiv_loop_subdiv_vert_index, GPUVertBuf *pos_nor)
void DRW_cache_free_old_subdiv()
static void draw_subdiv_foreach_callbacks(SubdivForeachContext *foreach_context)
static GPUShader * g_subdiv_shaders[NUM_SHADERS]
char datatoc_common_subdiv_vbo_edituv_strech_angle_comp_glsl[]
static bool draw_subdiv_create_requested_buffers(Object *ob, Mesh *mesh, MeshBatchCache *batch_cache, MeshBufferCache *mbc, const bool is_editmode, const bool is_paint_mode, const bool is_mode_active, const float obmat[4][4], const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide, OpenSubdiv_EvaluatorCache *evaluator_cache)
static void opensubdiv_gpu_buffer_init(OpenSubdiv_Buffer *buffer_interface, GPUVertBuf *vertbuf)
static GPUVertFormat * get_patch_array_format()
static void draw_subdiv_ubo_update_and_bind(const DRWSubdivCache *cache, GPUShader *shader, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask=false)
static GPUVertFormat * get_origindex_format()
#define SUBDIV_COARSE_FACE_FLAG_SELECT
blender::Span< DRWSubdivLooseEdge > draw_subdiv_cache_get_loose_edges(const DRWSubdivCache *cache)
#define SUBDIV_COARSE_FACE_LOOP_START_MASK
static void vertbuf_wrap_device_handle(const OpenSubdiv_Buffer *interface, uint64_t handle)
static GPUVertFormat * get_subdiv_vertex_format()
BLI_INLINE bool DRW_ibo_requested(GPUIndexBuf *ibo)
int len
Definition: draw_manager.c:108
void openSubdiv_deleteEvaluatorCache(OpenSubdiv_EvaluatorCache *evaluator_cache)
const char * openSubdiv_getGLSLPatchBasisSource(void)
void openSubdiv_deleteEvaluator(OpenSubdiv_Evaluator *evaluator)
OpenSubdiv_EvaluatorCache * openSubdiv_createEvaluatorCache(eOpenSubdivEvaluator evaluator_type)
Extraction of Mesh data into VBO to feed to GPU.
BLI_INLINE BMFace * bm_original_face_get(const MeshRenderData *mr, int idx)
@ MR_EXTRACT_BMESH
Definition: extract_mesh.hh:31
@ MR_EXTRACT_MAPPED
Definition: extract_mesh.hh:32
static float verts[][3]
ccl_global float * buffer
format
Definition: logImageCore.h:38
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_dupallocN)(const void *vmemh)
Definition: mallocn.c:28
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
#define ceilf(x)
Definition: metal/compat.h:225
#define sqrtf(x)
Definition: metal/compat.h:243
void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache *cache, MeshBufferCache *mbc, DRWSubdivCache *subdiv_cache, MeshRenderData *mr)
@ OPENSUBDIV_EVALUATOR_GPU
@ OPENSUBDIV_EVALUATOR_CPU
unsigned int uint32_t
Definition: stdint.h:80
__int64 int64_t
Definition: stdint.h:89
signed int int32_t
Definition: stdint.h:77
unsigned __int64 uint64_t
Definition: stdint.h:90
struct BMesh * bm
Definition: BKE_editmesh.h:40
BMLoop * l_first
Definition: bmesh_class.h:261
int totface
Definition: bmesh_class.h:297
const SubdivToMeshSettings * settings
struct GPUVertBuf * patch_map_handles
struct GPUVertBuf * patch_map_quadtree
int patches_are_triangular
struct GPUVertBuf * face_ptex_offset_buffer
struct BMesh * bm
struct GPUVertBuf * edges_orig_index
struct GPUVertBuf * subdiv_vertex_face_adjacency_offsets
struct GPUUniformBuf * ubo
struct GPUVertBuf * patch_coords
int * subdiv_loop_subdiv_vert_index
struct Subdiv * subdiv
struct GPUVertBuf * fdots_patch_coords
struct GPUVertBuf * verts_orig_index
struct GPUVertBuf * polygon_mat_offset
struct GPUVertBuf * corner_patch_coords
struct Mesh * mesh
DRWPatchMap gpu_patch_map
int * subdiv_loop_poly_index
struct GPUVertBuf * extra_coarse_face_data
int * subdiv_loop_subdiv_edge_index
DRWSubdivLooseGeom loose_geom
struct GPUVertBuf * subdiv_vertex_face_adjacency
struct GPUVertBuf * subdiv_polygon_offset_buffer
DRWSubdivLooseVertex * verts
DRWSubdivLooseEdge * edges
unsigned int coarse_vertex_index
unsigned int v1
unsigned int v2
short mat_nr
float co[3]
DRWSubdivCache * subdiv_cache
MeshExtractLooseGeom loose_geom
MeshBufferList buff
GPUIndexBuf * tris
struct MeshBufferList::@273 ibo
eMRExtractType extract_type
Definition: extract_mesh.hh:37
BMEditMesh * edit_bmesh
Definition: extract_mesh.hh:55
BMFace * efa_act
Definition: extract_mesh.hh:80
struct SubsurfRuntimeData * subsurf_runtime_data
struct MEdge * medge
struct BMEditMesh * edit_mesh
CustomData vdata
struct MVert * mvert
uint16_t flag
Mesh_Runtime runtime
int totpoly
CustomData edata
struct MPoly * mpoly
CustomData ldata
void(* wrap_device_handle)(const struct OpenSubdiv_Buffer *buffer, uint64_t device_ptr)
void(* bind_gpu)(const struct OpenSubdiv_Buffer *buffer)
void(* device_update)(const struct OpenSubdiv_Buffer *buffer, unsigned int start, unsigned int len, const void *data)
void(* device_alloc)(const struct OpenSubdiv_Buffer *buffer, const unsigned int size)
void *(* alloc)(const struct OpenSubdiv_Buffer *buffer, const unsigned int size)
void(* wrapSrcVertexDataBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *src_buffer)
bool(* hasVertexData)(struct OpenSubdiv_Evaluator *evaluator)
void(* fillFVarPatchArraysBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *patch_array_buffer)
void(* getPatchMap)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_map_handles, struct OpenSubdiv_Buffer *patch_map_quadtree, int *min_patch_face, int *max_patch_face, int *max_depth, int *patches_are_triangular)
void(* wrapPatchIndexBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_index_buffer)
void(* fillPatchArraysBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_array_buffer)
void(* wrapFVarSrcBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *src_buffer)
void(* wrapPatchParamBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_param_buffer)
void(* wrapFVarPatchParamBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *patch_param_buffer)
void(* wrapFVarPatchIndexBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *patch_index_buffer)
void(* wrapSrcBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *src_buffer)
SubdivForeachTopologyInformationCb topology_info
SubdivForeachEdgeCb edge
SubdivForeachVertexFromEdgeCb vertex_edge
SubdivForeachVertexFromCornerCb vertex_corner
SubdivForeachLoopCb loop
SubdivSettings settings
Definition: BKE_subdiv.h:160
struct OpenSubdiv_TopologyRefiner * topology_refiner
Definition: BKE_subdiv.h:164
struct OpenSubdiv_Evaluator * evaluator
Definition: BKE_subdiv.h:166
SubdivSettings settings
double PIL_check_seconds_timer(void)
Definition: time.c:64
void openSubdiv_deleteTopologyRefiner(OpenSubdiv_TopologyRefiner *topology_refiner)