89 switch (shader_type) {
138 switch (shader_type) {
140 return "subdiv lines build";
143 return "subdiv lines loose build";
146 return "subdiv lnor build";
149 return "subdiv edge fac build";
153 return "subdiv tris";
156 return "subdiv normals accumulate";
159 return "subdiv normals finalize";
162 return "subdiv patch evaluation";
165 return "subdiv patch evaluation face-varying";
168 return "subdiv patch evaluation face dots";
171 return "subdiv patch evaluation face dots with normals";
174 return "subdiv patch evaluation orco";
177 return "subdiv custom data interp 1D";
180 return "subdiv custom data interp 2D";
183 return "subdiv custom data interp 3D";
186 return "subdiv custom data interp 4D";
189 return "subdiv sculpt data";
192 return "subdiv uv stretch angle";
195 return "subdiv uv stretch area";
206 const char *defines =
nullptr;
209 "#define OSD_PATCH_BASIS_GLSL\n"
210 "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
211 "#define FVAR_EVALUATION\n";
215 "#define OSD_PATCH_BASIS_GLSL\n"
216 "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
217 "#define FDOTS_EVALUATION\n";
221 "#define OSD_PATCH_BASIS_GLSL\n"
222 "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
223 "#define FDOTS_EVALUATION\n"
224 "#define FDOTS_NORMALS\n";
228 "#define OSD_PATCH_BASIS_GLSL\n"
229 "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
230 "#define ORCO_EVALUATION\n";
234 "#define OSD_PATCH_BASIS_GLSL\n"
235 "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n";
241 char *library_code =
static_cast<char *
>(
242 MEM_mallocN(strlen(patch_basis_source) + strlen(subdiv_lib_code) + 1,
243 "subdiv patch evaluation library code"));
244 library_code[0] =
'\0';
245 strcat(library_code, patch_basis_source);
246 strcat(library_code, subdiv_lib_code);
259 if (
ELEM(shader_type,
281 if (
format.attr_len == 0) {
291 if (
format.attr_len == 0) {
306 if (
format.attr_len == 0) {
318 if (
format.attr_len == 0) {
329 if (
format.attr_len == 0) {
339 if (
format.attr_len == 0) {
349 if (
format.attr_len == 0) {
367 (
static_cast<unsigned int>(u * 65535.0f) << 16) |
static_cast<unsigned int>(
v * 65535.0f),
376 if (
format.attr_len == 0) {
387 if (
format.attr_len == 0) {
396 if (
format.attr_len == 0) {
448 buffer_interface->
data = vertbuf;
471 const uint32_t number_of_quads = number_of_loops / 4;
472 return number_of_quads * 2;
488 memcpy(vbo_data, vert_origindex, num_loops *
sizeof(
int32_t));
518 int min_patch_face = 0;
519 int max_patch_face = 0;
521 int patches_are_triangular = 0;
525 &patch_map_handles_interface,
526 &patch_map_quad_tree_interface,
530 &patches_are_triangular);
601 cache->
ubo =
nullptr;
613 #define SUBDIV_COARSE_FACE_FLAG_SMOOTH 1u
614 #define SUBDIV_COARSE_FACE_FLAG_SELECT 2u
615 #define SUBDIV_COARSE_FACE_FLAG_ACTIVE 4u
616 #define SUBDIV_COARSE_FACE_FLAG_HIDDEN 8u
618 #define SUBDIV_COARSE_FACE_FLAG_OFFSET 28u
620 #define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK \
621 (SUBDIV_COARSE_FACE_FLAG_SMOOTH << SUBDIV_COARSE_FACE_FLAG_OFFSET)
622 #define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK \
623 (SUBDIV_COARSE_FACE_FLAG_SELECT << SUBDIV_COARSE_FACE_FLAG_OFFSET)
624 #define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK \
625 (SUBDIV_COARSE_FACE_FLAG_ACTIVE << SUBDIV_COARSE_FACE_FLAG_OFFSET)
626 #define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK \
627 (SUBDIV_COARSE_FACE_FLAG_HIDDEN << SUBDIV_COARSE_FACE_FLAG_OFFSET)
629 #define SUBDIV_COARSE_FACE_LOOP_START_MASK \
630 ~((SUBDIV_COARSE_FACE_FLAG_SMOOTH | SUBDIV_COARSE_FACE_FLAG_SELECT | \
631 SUBDIV_COARSE_FACE_FLAG_ACTIVE | SUBDIV_COARSE_FACE_FLAG_HIDDEN) \
632 << SUBDIV_COARSE_FACE_FLAG_OFFSET)
711 if (
format.attr_len == 0) {
739 if (subdiv_cache ==
nullptr) {
804 const int num_vertices,
807 const int num_polygons,
808 const int *subdiv_polygon_offset)
812 if (num_vertices == 0 && num_loops == 0) {
820 if (num_loops != 0) {
879 for (
int i = 0; i < num_vertices; i++) {
887 for (
int i = 0; i < num_edges; i++) {
897 const int UNUSED(ptex_face_index),
900 const int coarse_vertex_index,
901 const int UNUSED(coarse_poly_index),
902 const int UNUSED(coarse_corner),
903 const int subdiv_vertex_index)
912 const int UNUSED(ptex_face_index),
915 const int UNUSED(coarse_edge_index),
916 const int UNUSED(coarse_poly_index),
917 const int UNUSED(coarse_corner),
918 const int UNUSED(subdiv_vertex_index))
925 const int coarse_edge_index,
926 const int subdiv_edge_index,
927 const bool UNUSED(is_loose),
928 const int UNUSED(subdiv_v1),
929 const int UNUSED(subdiv_v2))
937 int coarse_index = coarse_edge_index;
939 if (coarse_index != -1) {
950 const int ptex_face_index,
953 const int UNUSED(coarse_loop_index),
954 const int coarse_poly_index,
955 const int UNUSED(coarse_corner),
956 const int subdiv_loop_index,
957 const int subdiv_vertex_index,
958 const int subdiv_edge_index)
973 memset(foreach_context, 0,
sizeof(*foreach_context));
985 foreach_context.
user_data = cache_building_context;
997 cache_building_context
1026 int ofs = vertex_offsets[0];
1027 vertex_offsets[0] = 0;
1029 int tmp = vertex_offsets[i];
1030 vertex_offsets[i] = ofs;
1037 int *tmp_set_faces =
static_cast<int *
>(
1041 for (
int j = 0; j < 4; j++) {
1043 int first_face_offset = vertex_offsets[subdiv_vertex] + tmp_set_faces[subdiv_vertex];
1044 adjacent_faces[first_face_offset] = i;
1045 tmp_set_faces[subdiv_vertex] += 1;
1078 cache_building_context.
settings = &to_mesh_settings;
1079 cache_building_context.
cache = cache;
1103 for (
int i = 0; i < mesh_eval->
totpoly; i++) {
1107 blender_fdots_patch_coords[i] =
make_patch_coord(ptex_face_index, 0.5f, 0.5f);
1113 blender_fdots_patch_coords[i] =
make_patch_coord(ptex_face_index, 1.0f, 1.0f);
1142 if (first_loop_index[vertex] != -1) {
1145 first_loop_index[vertex] = i;
1151 cache_building_context.
patch_coords[first_loop_index[vertex]];
1213 "DRWSubdivUboStorage is not padded to a multiple of the size of vec4");
1217 const int src_offset,
1218 const int dst_offset,
1219 const uint total_dispatch_size,
1220 const bool has_sculpt_mask)
1245 const int src_offset,
1246 const int dst_offset,
1247 const uint total_dispatch_size,
1248 const bool has_sculpt_mask =
false)
1252 cache, &storage, src_offset, dst_offset, total_dispatch_size, has_sculpt_mask);
1269 #define SUBDIV_LOCAL_WORK_GROUP_SIZE 64
1282 const int src_offset,
1283 const int dst_offset,
1284 uint total_dispatch_size,
1285 const bool has_sculpt_mask =
false)
1290 uint dispatch_rx = dispatch_size;
1291 uint dispatch_ry = 1u;
1292 if (dispatch_rx > max_res_x) {
1300 dispatch_rx = dispatch_ry =
ceilf(
sqrtf(dispatch_size));
1302 if ((dispatch_rx * (dispatch_ry - 1)) >= dispatch_size) {
1313 cache, shader, src_offset, dst_offset, total_dispatch_size, has_sculpt_mask);
1362 int binding_point = 0;
1397 const int face_varying_channel,
1398 const int dst_offset)
1410 evaluator->
wrapFVarSrcBuffer(evaluator, face_varying_channel, &src_buffer_interface);
1416 evaluator, face_varying_channel, &patch_arrays_buffer_interface);
1422 evaluator, face_varying_channel, &patch_index_buffer_interface);
1428 evaluator, face_varying_channel, &patch_param_buffer_interface);
1433 int binding_point = 0;
1447 const int src_offset = src_buffer_interface.
buffer_offset / 2;
1469 bool compress_to_u16)
1478 if (dimensions == 1) {
1480 "#define SUBDIV_POLYGON_OFFSET\n"
1481 "#define DIMENSIONS 1\n");
1483 else if (dimensions == 2) {
1485 "#define SUBDIV_POLYGON_OFFSET\n"
1486 "#define DIMENSIONS 2\n");
1488 else if (dimensions == 3) {
1490 "#define SUBDIV_POLYGON_OFFSET\n"
1491 "#define DIMENSIONS 3\n");
1493 else if (dimensions == 4) {
1494 if (compress_to_u16) {
1496 "#define SUBDIV_POLYGON_OFFSET\n"
1497 "#define DIMENSIONS 4\n"
1498 "#define GPU_FETCH_U16_TO_FLOAT\n");
1502 "#define SUBDIV_POLYGON_OFFSET\n"
1503 "#define DIMENSIONS 4\n");
1512 int binding_point = 0;
1545 int binding_point = 1;
1569 int binding_point = 0;
1597 int binding_point = 0;
1621 int binding_point = 0;
1641 const int material_count)
1648 const bool do_single_material = material_count <= 1;
1650 const char *defines =
"#define SUBDIV_POLYGON_OFFSET\n";
1651 if (do_single_material) {
1653 "#define SUBDIV_POLYGON_OFFSET\n"
1654 "#define SINGLE_MATERIAL\n";
1661 int binding_point = 0;
1670 if (!do_single_material) {
1724 int binding_point = 0;
1763 int binding_point = 0;
1782 uint num_loose_edges)
1810 int binding_point = 0;
1837 int binding_point = 0;
1863 "#define SUBDIV_POLYGON_OFFSET\n");
1866 int binding_point = 0;
1894 int binding_point = 0;
1959 cache->
mat_end[0] = number_of_quads;
1964 int *mat_start =
static_cast<int *
>(
MEM_callocN(
sizeof(
int) * mat_len,
"subdiv mat_start"));
1968 for (
int i = 0; i < mesh_eval->
totpoly; i++) {
1970 const int next_offset = (i == mesh_eval->
totpoly - 1) ? number_of_quads :
1971 subdiv_polygon_offset[i + 1];
1972 const int quad_count = next_offset - subdiv_polygon_offset[i];
1973 const int mat_index = mpoly->
mat_nr;
1974 mat_start[mat_index] += quad_count;
1978 int ofs = mat_start[0];
1980 for (
uint i = 1; i < mat_len; i++) {
1981 int tmp = mat_start[i];
1987 int *mat_end =
static_cast<int *
>(
MEM_dupallocN(mat_start));
1988 int *per_polygon_mat_offset =
static_cast<int *
>(
1991 for (
int i = 0; i < mesh_eval->
totpoly; i++) {
1993 const int mat_index = mpoly->
mat_nr;
1994 const int single_material_index = subdiv_polygon_offset[i];
1995 const int material_offset = mat_end[mat_index];
1996 const int next_offset = (i == mesh_eval->
totpoly - 1) ? number_of_quads :
1997 subdiv_polygon_offset[i + 1];
1998 const int quad_count = next_offset - subdiv_polygon_offset[i];
1999 mat_end[mat_index] += quad_count;
2001 per_polygon_mat_offset[i] = material_offset - single_material_index;
2016 const bool is_editmode,
2017 const bool is_paint_mode,
2018 const bool is_mode_active,
2019 const float obmat[4][4],
2020 const bool do_final,
2021 const bool do_uvedit,
2024 const bool use_hide,
2067 const bool optimal_display = runtime_data->
use_optimal_display || (is_editmode && !do_cage);
2069 draw_cache->
bm =
bm;
2070 draw_cache->
mesh = mesh_eval;
2071 draw_cache->
subdiv = subdiv;
2091 ob,
mesh, is_editmode, is_paint_mode, is_mode_active, obmat, do_final, do_uvedit, ts);
2114 if (coarse_loose_vert_len == 0 && coarse_loose_edge_len == 0) {
2124 const Mesh *coarse_mesh = subdiv_cache->
mesh;
2126 const int resolution = subdiv_cache->
resolution;
2127 const int resolution_1 = resolution - 1;
2128 const float inv_resolution_1 = 1.0f / (
float)resolution_1;
2129 const int num_subdiv_vertices_per_coarse_edge = resolution - 2;
2131 const int num_subdivided_edge = coarse_loose_edge_len *
2132 (num_subdiv_vertices_per_coarse_edge + 1);
2137 const int num_subdivided_verts = num_subdivided_edge * 2;
2144 "DRWSubdivLooseEdge"));
2146 int subd_edge_offset = 0;
2147 int subd_vert_offset = 0;
2150 for (
int i = 0; i < coarse_loose_edge_len; i++) {
2155 for (
int i = 0; i < resolution - 1; i++, subd_edge_offset++) {
2162 const float u1 = i * inv_resolution_1;
2164 coarse_mesh, coarse_edge, is_simple,
u1, subd_v1.
co);
2171 const float u2 = (i + 1) * inv_resolution_1;
2173 coarse_mesh, coarse_edge, is_simple,
u2, subd_v2.
co);
2180 for (
int i = 0; i < coarse_loose_vert_len; i++) {
2182 const MVert &coarse_vertex = coarse_mesh->
mvert[coarse_vertex_index];
2213 const bool is_editmode,
2214 const bool is_paint_mode,
2215 const bool is_mode_active,
2216 const float obmat[4][4],
2217 const bool do_final,
2218 const bool do_uvedit,
2221 const bool use_hide)
2252 fprintf(stderr,
"Time to update subdivision: %f\n", end_time - begin_time);
2253 fprintf(stderr,
"Maximum FPS: %f\n", 1.0 / (end_time - begin_time));
typedef float(TangentPoint)[2]
bool CustomData_has_layer(const struct CustomData *data, int type)
void * CustomData_get_layer(const struct CustomData *data, int type)
General operations, lookup, etc. for blender objects.
struct Mesh * BKE_object_get_editmesh_eval_final(const struct Object *object)
void BKE_subdiv_free(Subdiv *subdiv)
int * BKE_subdiv_face_ptex_offset_get(Subdiv *subdiv)
@ SUBDIV_EVALUATOR_TYPE_GPU
bool BKE_subdiv_eval_begin_from_mesh(struct Subdiv *subdiv, const struct Mesh *mesh, const float(*coarse_vertex_cos)[3], eSubdivEvaluatorType evaluator_type, struct OpenSubdiv_EvaluatorCache *evaluator_cache)
bool BKE_subdiv_foreach_subdiv_geometry(struct Subdiv *subdiv, const struct SubdivForeachContext *context, const struct SubdivToMeshSettings *mesh_settings, const struct Mesh *coarse_mesh)
void BKE_subdiv_mesh_interpolate_position_on_edge(const struct Mesh *coarse_mesh, const struct MEdge *coarse_edge, bool is_simple, float u, float pos_r[3])
#define MAX_GPU_SUBDIV_SSBOS
struct Subdiv * BKE_subsurf_modifier_subdiv_descriptor_ensure(struct SubsurfRuntimeData *runtime_data, const struct Mesh *mesh, bool for_draw_code)
void void void * BLI_linklist_pop(LinkNode **listp) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void void BLI_linklist_prepend(LinkNode **listp, void *ptr) ATTR_NONNULL(1)
MINLINE uint divide_ceil_u(uint a, uint b)
MINLINE void copy_v3_v3(float r[3], const float a[3])
#define BLI_MUTEX_INITIALIZER
void BLI_mutex_lock(ThreadMutex *mutex)
void BLI_mutex_unlock(ThreadMutex *mutex)
pthread_mutex_t ThreadMutex
Object is a sort of wrapper for general info.
int GPU_max_work_group_count(int index)
bool GPU_crappy_amd_driver(void)
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len)
struct GPUIndexBuf GPUIndexBuf
void GPU_indexbuf_bind_as_ssbo(GPUIndexBuf *elem, int binding)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble u2
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble u1
void GPU_shader_unbind(void)
struct GPUShader GPUShader
int GPU_shader_get_uniform_block_binding(GPUShader *shader, const char *name)
void GPU_shader_bind(GPUShader *shader)
GPUShader * GPU_shader_create_compute(const char *computecode, const char *libcode, const char *defines, const char *shname)
void GPU_shader_free(GPUShader *shader)
void GPU_memory_barrier(eGPUBarrier barrier)
@ GPU_BARRIER_SHADER_STORAGE
@ GPU_BARRIER_ELEMENT_ARRAY
@ GPU_BARRIER_VERTEX_ATTRIB_ARRAY
void GPU_vertbuf_wrap_handle(GPUVertBuf *verts, uint64_t handle)
void GPU_vertbuf_discard(GPUVertBuf *)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_update_sub(GPUVertBuf *verts, uint start, uint len, const void *data)
GPUVertBuf * GPU_vertbuf_calloc(void)
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *, const GPUVertFormat *, GPUUsageType)
void GPU_vertbuf_bind_as_ssbo(struct GPUVertBuf *verts, int binding)
#define GPU_vertbuf_init_with_format(verts, format)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_use(GPUVertBuf *)
void GPU_vertbuf_tag_dirty(GPUVertBuf *verts)
Platform independent time functions.
#define BM_elem_index_get(ele)
#define BM_elem_flag_test(ele, hflag)
#define BM_ITER_MESH(ele, iter, bm, itype)
ATTR_WARN_UNUSED_RESULT BMesh * bm
ATTR_WARN_UNUSED_RESULT const BMVert * v
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN
char datatoc_common_subdiv_patch_evaluation_comp_glsl[]
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK
void draw_subdiv_build_lines_buffer(const DRWSubdivCache *cache, GPUIndexBuf *lines_indices)
void DRW_subdivide_loose_geom(DRWSubdivCache *subdiv_cache, MeshBufferCache *cache)
static void vertbuf_bind_gpu(const OpenSubdiv_Buffer *buffer)
char datatoc_common_subdiv_lib_glsl[]
static void vertbuf_update_data(const OpenSubdiv_Buffer *interface, uint start, uint len, const void *data)
static bool draw_subdiv_cache_need_polygon_data(const DRWSubdivCache *cache)
static bool draw_subdiv_topology_info_cb(const SubdivForeachContext *foreach_context, const int num_vertices, const int num_edges, const int num_loops, const int num_polygons, const int *subdiv_polygon_offset)
MINLINE CompressedPatchCoord make_patch_coord(int ptex_face_index, float u, float v)
static void do_subdiv_traversal(DRWCacheBuildingContext *cache_building_context, Subdiv *subdiv)
static LinkNode * gpu_subdiv_free_queue
static GPUVertFormat * get_patch_index_format()
static GPUVertBuf * create_buffer_and_interface(OpenSubdiv_Buffer *interface, GPUVertFormat *format)
blender::Span< DRWSubdivLooseVertex > draw_subdiv_cache_get_loose_verts(const DRWSubdivCache *cache)
static GPUVertBuf * gpu_vertbuf_create_from_format(GPUVertFormat *format, uint len)
char datatoc_common_subdiv_ibo_lines_comp_glsl[]
char datatoc_common_subdiv_ibo_tris_comp_glsl[]
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK
static GPUShader * get_subdiv_shader(int shader_type, const char *defines)
static ThreadMutex gpu_subdiv_queue_mutex
void draw_subdiv_build_edituv_stretch_area_buffer(const DRWSubdivCache *cache, GPUVertBuf *coarse_data, GPUVertBuf *subdiv_data)
char datatoc_common_subdiv_vbo_sculpt_data_comp_glsl[]
static GPUVertFormat * get_patch_handle_format()
#define SUBDIV_COARSE_FACE_FLAG_OFFSET
static DRWSubdivCache * mesh_batch_cache_ensure_subdiv_cache(MeshBatchCache *mbc)
static uint tris_count_from_number_of_loops(const uint number_of_loops)
static void draw_patch_map_build(DRWPatchMap *gpu_patch_map, Subdiv *subdiv)
void draw_subdiv_init_origindex_buffer(GPUVertBuf *buffer, int32_t *vert_origindex, uint num_loops, uint loose_len)
static GPUShader * get_patch_evaluation_shader(int shader_type)
void draw_subdiv_build_edge_fac_buffer(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *edge_idx, GPUVertBuf *edge_fac)
void draw_subdiv_build_lnor_buffer(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *lnor)
static void draw_subdiv_edge_cb(const SubdivForeachContext *foreach_context, void *UNUSED(tls), const int coarse_edge_index, const int subdiv_edge_index, const bool UNUSED(is_loose), const int UNUSED(subdiv_v1), const int UNUSED(subdiv_v2))
static void vertbuf_device_alloc(const OpenSubdiv_Buffer *interface, const uint len)
static const char * get_shader_code(int shader_type)
void draw_subdiv_build_edituv_stretch_angle_buffer(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *uvs, int uvs_offset, GPUVertBuf *stretch_angles)
char datatoc_common_subdiv_normals_finalize_comp_glsl[]
static void draw_subdiv_invalidate_evaluator_for_orco(Subdiv *subdiv, Mesh *mesh)
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK
static void draw_subdiv_cache_extra_coarse_face_data_mapped(Mesh *mesh, BMesh *bm, MeshRenderData *mr, uint32_t *flags_data)
static void draw_subdiv_vertex_corner_cb(const SubdivForeachContext *foreach_context, void *UNUSED(tls), const int UNUSED(ptex_face_index), const float UNUSED(u), const float UNUSED(v), const int coarse_vertex_index, const int UNUSED(coarse_poly_index), const int UNUSED(coarse_corner), const int subdiv_vertex_index)
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH
static void draw_subdiv_vertex_edge_cb(const SubdivForeachContext *UNUSED(foreach_context), void *UNUSED(tls_v), const int UNUSED(ptex_face_index), const float UNUSED(u), const float UNUSED(v), const int UNUSED(coarse_edge_index), const int UNUSED(coarse_poly_index), const int UNUSED(coarse_corner), const int UNUSED(subdiv_vertex_index))
void draw_subdiv_accumulate_normals(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *face_adjacency_offsets, GPUVertBuf *face_adjacency_lists, GPUVertBuf *vertex_loop_map, GPUVertBuf *vertex_normals)
static void draw_subdiv_cache_extra_coarse_face_data_bm(BMesh *bm, BMFace *efa_act, uint32_t *flags_data)
void DRW_subdiv_cache_free(Subdiv *subdiv)
char datatoc_common_subdiv_vbo_lnor_comp_glsl[]
static void * vertbuf_alloc(const OpenSubdiv_Buffer *interface, const uint len)
static void draw_subdiv_free_edit_mode_cache(DRWSubdivCache *cache)
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE
#define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK
static bool draw_subdiv_build_cache(DRWSubdivCache *cache, Subdiv *subdiv, Mesh *mesh_eval, const SubsurfRuntimeData *runtime_data)
void draw_subdiv_build_sculpt_data_buffer(const DRWSubdivCache *cache, GPUVertBuf *mask_vbo, GPUVertBuf *face_set_vbo, GPUVertBuf *sculpt_data)
static void draw_subdiv_cache_free_material_data(DRWSubdivCache *cache)
@ SHADER_BUFFER_LINES_LOOSE
@ SHADER_PATCH_EVALUATION
@ SHADER_BUFFER_SCULPT_DATA
@ SHADER_BUFFER_TRIS_MULTIPLE_MATERIALS
@ SHADER_COMP_CUSTOM_DATA_INTERP_2D
@ SHADER_COMP_CUSTOM_DATA_INTERP_3D
@ SHADER_COMP_CUSTOM_DATA_INTERP_1D
@ SHADER_BUFFER_UV_STRETCH_AREA
@ SHADER_PATCH_EVALUATION_FACE_DOTS_WITH_NORMALS
@ SHADER_PATCH_EVALUATION_ORCO
@ SHADER_BUFFER_NORMALS_FINALIZE
@ SHADER_PATCH_EVALUATION_FVAR
@ SHADER_PATCH_EVALUATION_FACE_DOTS
@ SHADER_BUFFER_UV_STRETCH_ANGLE
@ SHADER_BUFFER_NORMALS_ACCUMULATE
@ SHADER_COMP_CUSTOM_DATA_INTERP_4D
void draw_subdiv_interp_custom_data(const DRWSubdivCache *cache, GPUVertBuf *src_data, GPUVertBuf *dst_data, int dimensions, int dst_offset, bool compress_to_u16)
static void build_vertex_face_adjacency_maps(DRWSubdivCache *cache)
static GPUVertFormat * get_patch_param_format()
static void draw_subdiv_cache_extra_coarse_face_data_mesh(Mesh *mesh, uint32_t *flags_data)
void draw_subdiv_build_tris_buffer(const DRWSubdivCache *cache, GPUIndexBuf *subdiv_tris, const int material_count)
void draw_subdiv_build_lines_loose_buffer(const DRWSubdivCache *cache, GPUIndexBuf *lines_indices, GPUVertBuf *lines_flags, uint num_loose_edges)
void draw_subdiv_finalize_custom_normals(const DRWSubdivCache *cache, GPUVertBuf *src_custom_normals, GPUVertBuf *pos_nor)
char datatoc_common_subdiv_custom_data_interp_comp_glsl[]
GPUVertFormat * draw_subdiv_get_pos_nor_format()
static void draw_subdiv_init_ubo_storage(const DRWSubdivCache *cache, DRWSubdivUboStorage *ubo, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask)
static uint32_t compute_coarse_face_flag(BMFace *f, BMFace *efa_act)
static void draw_patch_map_free(DRWPatchMap *gpu_patch_map)
void DRW_create_subdivision(Object *ob, Mesh *mesh, MeshBatchCache *batch_cache, MeshBufferCache *mbc, const bool is_editmode, const bool is_paint_mode, const bool is_mode_active, const float obmat[4][4], const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
char datatoc_common_subdiv_vbo_edituv_strech_area_comp_glsl[]
static void drw_subdiv_compute_dispatch(const DRWSubdivCache *cache, GPUShader *shader, const int src_offset, const int dst_offset, uint total_dispatch_size, const bool has_sculpt_mask=false)
static GPUVertFormat * get_blender_patch_coords_format()
#define SUBDIV_LOCAL_WORK_GROUP_SIZE
char datatoc_common_subdiv_normals_accumulate_comp_glsl[]
static uint get_dispatch_size(uint elements)
static const char * get_shader_name(int shader_type)
static GPUVertFormat * get_uvs_format()
char datatoc_common_subdiv_vbo_edge_fac_comp_glsl[]
static void draw_subdiv_loop_cb(const SubdivForeachContext *foreach_context, void *UNUSED(tls_v), const int ptex_face_index, const float u, const float v, const int UNUSED(coarse_loop_index), const int coarse_poly_index, const int UNUSED(coarse_corner), const int subdiv_loop_index, const int subdiv_vertex_index, const int subdiv_edge_index)
static OpenSubdiv_EvaluatorCache * g_evaluator_cache
static void draw_subdiv_cache_ensure_mat_offsets(DRWSubdivCache *cache, Mesh *mesh_eval, uint mat_len)
void draw_subdiv_extract_uvs(const DRWSubdivCache *cache, GPUVertBuf *uvs, const int face_varying_channel, const int dst_offset)
void draw_subdiv_cache_free(DRWSubdivCache *cache)
static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache *cache, Mesh *mesh, MeshRenderData *mr)
void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *orco)
GPUVertBuf * draw_subdiv_build_origindex_buffer(int *vert_origindex, uint num_loops)
void draw_subdiv_build_fdots_buffers(const DRWSubdivCache *cache, GPUVertBuf *fdots_pos, GPUVertBuf *fdots_nor, GPUIndexBuf *fdots_indices)
static GPUVertFormat * get_quadtree_format()
void draw_subdiv_finalize_normals(const DRWSubdivCache *cache, GPUVertBuf *vertex_normals, GPUVertBuf *subdiv_loop_subdiv_vert_index, GPUVertBuf *pos_nor)
void DRW_cache_free_old_subdiv()
static void draw_subdiv_foreach_callbacks(SubdivForeachContext *foreach_context)
static GPUShader * g_subdiv_shaders[NUM_SHADERS]
char datatoc_common_subdiv_vbo_edituv_strech_angle_comp_glsl[]
static bool draw_subdiv_create_requested_buffers(Object *ob, Mesh *mesh, MeshBatchCache *batch_cache, MeshBufferCache *mbc, const bool is_editmode, const bool is_paint_mode, const bool is_mode_active, const float obmat[4][4], const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide, OpenSubdiv_EvaluatorCache *evaluator_cache)
static void opensubdiv_gpu_buffer_init(OpenSubdiv_Buffer *buffer_interface, GPUVertBuf *vertbuf)
static GPUVertFormat * get_patch_array_format()
static void draw_subdiv_ubo_update_and_bind(const DRWSubdivCache *cache, GPUShader *shader, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask=false)
static GPUVertFormat * get_origindex_format()
#define SUBDIV_COARSE_FACE_FLAG_SELECT
blender::Span< DRWSubdivLooseEdge > draw_subdiv_cache_get_loose_edges(const DRWSubdivCache *cache)
#define SUBDIV_COARSE_FACE_LOOP_START_MASK
static void vertbuf_wrap_device_handle(const OpenSubdiv_Buffer *interface, uint64_t handle)
static GPUVertFormat * get_subdiv_vertex_format()
BLI_INLINE bool DRW_ibo_requested(GPUIndexBuf *ibo)
void openSubdiv_deleteEvaluatorCache(OpenSubdiv_EvaluatorCache *evaluator_cache)
const char * openSubdiv_getGLSLPatchBasisSource(void)
void openSubdiv_deleteEvaluator(OpenSubdiv_Evaluator *evaluator)
OpenSubdiv_EvaluatorCache * openSubdiv_createEvaluatorCache(eOpenSubdivEvaluator evaluator_type)
ccl_global float * buffer
void(* MEM_freeN)(void *vmemh)
void *(* MEM_dupallocN)(const void *vmemh)
void *(* MEM_callocN)(size_t len, const char *str)
void *(* MEM_mallocN)(size_t len, const char *str)
void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache *cache, MeshBufferCache *mbc, DRWSubdivCache *subdiv_cache, MeshRenderData *mr)
@ OPENSUBDIV_EVALUATOR_GPU
@ OPENSUBDIV_EVALUATOR_CPU
unsigned __int64 uint64_t
int * subdiv_loop_vert_index
int * subdiv_loop_poly_index
int * subdiv_loop_subdiv_vert_index
CompressedPatchCoord * patch_coords
const SubdivToMeshSettings * settings
int * subdiv_loop_edge_index
int * subdiv_loop_subdiv_edge_index
struct GPUVertBuf * patch_map_handles
struct GPUVertBuf * patch_map_quadtree
int patches_are_triangular
struct GPUVertBuf * face_ptex_offset_buffer
struct GPUVertBuf * edges_orig_index
struct GPUVertBuf * subdiv_vertex_face_adjacency_offsets
struct GPUUniformBuf * ubo
bool use_custom_loop_normals
struct GPUVertBuf * patch_coords
int * subdiv_loop_subdiv_vert_index
struct GPUVertBuf * fdots_patch_coords
struct GPUVertBuf * verts_orig_index
struct GPUVertBuf * polygon_mat_offset
struct GPUVertBuf * corner_patch_coords
DRWPatchMap gpu_patch_map
int * subdiv_loop_poly_index
int * subdiv_polygon_offset
struct GPUVertBuf * extra_coarse_face_data
int * subdiv_loop_subdiv_edge_index
uint num_subdiv_triangles
DRWSubdivLooseGeom loose_geom
struct GPUVertBuf * subdiv_vertex_face_adjacency
struct GPUVertBuf * subdiv_polygon_offset_buffer
int loose_subdiv_v2_index
int loose_subdiv_v1_index
DRWSubdivLooseVertex * verts
DRWSubdivLooseEdge * edges
unsigned int coarse_vertex_index
uint coarse_face_active_mask
uint coarse_face_loopstart_mask
uint coarse_face_select_mask
uint coarse_face_smooth_mask
uint coarse_face_hidden_mask
int patches_are_triangular
DRWSubdivCache * subdiv_cache
MeshExtractLooseGeom loose_geom
struct MeshBufferList::@273 ibo
eMRExtractType extract_type
struct SubsurfRuntimeData * subsurf_runtime_data
struct BMEditMesh * edit_mesh
void(* wrap_device_handle)(const struct OpenSubdiv_Buffer *buffer, uint64_t device_ptr)
void(* bind_gpu)(const struct OpenSubdiv_Buffer *buffer)
void(* device_update)(const struct OpenSubdiv_Buffer *buffer, unsigned int start, unsigned int len, const void *data)
void(* device_alloc)(const struct OpenSubdiv_Buffer *buffer, const unsigned int size)
void *(* alloc)(const struct OpenSubdiv_Buffer *buffer, const unsigned int size)
void(* wrapSrcVertexDataBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *src_buffer)
bool(* hasVertexData)(struct OpenSubdiv_Evaluator *evaluator)
void(* fillFVarPatchArraysBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *patch_array_buffer)
void(* getPatchMap)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_map_handles, struct OpenSubdiv_Buffer *patch_map_quadtree, int *min_patch_face, int *max_patch_face, int *max_depth, int *patches_are_triangular)
void(* wrapPatchIndexBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_index_buffer)
void(* fillPatchArraysBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_array_buffer)
void(* wrapFVarSrcBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *src_buffer)
void(* wrapPatchParamBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *patch_param_buffer)
void(* wrapFVarPatchParamBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *patch_param_buffer)
void(* wrapFVarPatchIndexBuffer)(struct OpenSubdiv_Evaluator *evaluator, const int face_varying_channel, struct OpenSubdiv_Buffer *patch_index_buffer)
void(* wrapSrcBuffer)(struct OpenSubdiv_Evaluator *evaluator, struct OpenSubdiv_Buffer *src_buffer)
eOpenSubdivEvaluator type
SubdivForeachTopologyInformationCb topology_info
SubdivForeachVertexFromEdgeCb vertex_edge
SubdivForeachVertexFromCornerCb vertex_corner
struct OpenSubdiv_TopologyRefiner * topology_refiner
struct OpenSubdiv_Evaluator * evaluator
double PIL_check_seconds_timer(void)
void openSubdiv_deleteTopologyRefiner(OpenSubdiv_TopologyRefiner *topology_refiner)