34 #define DEBUG_UBO_BINDING
204 write_mask,
blend, culling_test, depth_test, stencil_test, stencil_op, provoking_vert);
340 return (culling->
mask &
view->culling_mask) != 0;
355 const float (*frustum_planes)[4],
359 if (bsphere->
radius < 0.0f) {
365 float radius_sum = bsphere->
radius + frustum_bsphere->
radius;
366 if (center_dist_sq >
square_f(radius_sum)) {
374 for (
int p = 0; p < 6; p++) {
376 if (dist < -bsphere->radius) {
386 for (
int p = 0; p < 6; p++) {
388 for (
int v = 0;
v < 8;
v++) {
407 for (
int c = 0;
c < 8;
c++) {
439 for (
int i = 6; i--;) {
440 float frustum_plane_local[4], bb_near[3], bb_far[3];
455 *corners =
view->frustum_corners;
461 memcpy(planes,
view->frustum_planes,
sizeof(
float[6][4]));
470 if (!
view->is_dirty) {
485 #ifdef DRW_DEBUG_CULLING
486 if (
G.debug_value != 0) {
498 if (
view->visibility_fn) {
506 view->is_dirty =
false;
522 if (obmat_loc != -1) {
525 if (obinv_loc != -1) {
546 inst_count =
max_ii(0, inst_count);
548 if (baseinst_loc != -1) {
565 if (
state->inst_count == 0) {
568 if (
state->baseinst_loc == -1) {
587 #define MAX_UNIFORM_STACK_SIZE 64
590 int array_uniform_loc = -1;
592 float mat4_stack[4 * 4];
596 DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used - 1;
598 for (
int i = 0; i < unichunk->uniform_used; i++, uni--) {
605 if (array_uniform_loc == -1) {
612 array_index -= uni->
length;
613 memcpy(&mat4_stack[array_index], uni->
fvalue,
sizeof(
float) * uni->
length);
615 if (array_index <= 0) {
617 array_uniform_loc = -1;
732 const bool is_instancing = (
batch->inst[0] !=
NULL);
741 if (!is_instancing) {
751 while (start < tot) {
796 if (neg_scale !=
state->neg_scale) {
797 state->neg_scale = neg_scale;
802 if (
state->resource_chunk != chunk) {
803 if (
state->chunkid_loc != -1) {
806 if (
state->obmats_loc != -1) {
810 if (
state->obinfos_loc != -1) {
814 if (
state->obattrs_loc != -1) {
818 state->resource_chunk = chunk;
821 if (
state->resourceid_loc != -1) {
823 if (
state->resource_id !=
id) {
836 state->inst_count = 0;
837 state->base_inst = -1;
848 bool do_base_instance)
855 if (
state->obmats_loc == -1 && (
state->obmat_loc != -1 ||
state->obinv_loc != -1)) {
874 state->baseinst_loc);
879 state->neg_scale =
false;
880 state->resource_chunk = 0;
881 state->resource_id = -1;
882 state->base_inst = 0;
883 state->inst_count = 0;
886 state->select_id = -1;
899 if ((
state->neg_scale != neg_scale) ||
900 (
state->resource_chunk != chunk) ||
906 state->inst_count = 1;
912 else if (
id !=
state->base_inst +
state->inst_count) {
915 state->inst_count = 1;
931 if (
state->neg_scale) {
934 if (
state->obmats_loc != -1) {
937 if (
state->obinfos_loc != -1) {
940 if (
state->obattrs_loc != -1) {
955 .resourceid_loc = -1,
959 .drw_state_enabled = 0,
960 .drw_state_disabled = 0,
964 bool use_tfeedback =
false;
966 if (shader_changed) {
1018 (
float[4]){cmd->clear.r / 255.0f,
1019 cmd->clear.g / 255.0f,
1020 cmd->clear.b / 255.0f,
1021 cmd->clear.a / 255.0f},
1115 if (use_tfeedback) {
1145 if (start_group ==
NULL) {
1152 "DRW_render_instance_buffer_finish had not been called before drawing");
1179 if (shgroup == end_group) {
1216 for (; pass; pass = pass->
next) {
MINLINE int max_ii(int a, int b)
MINLINE float square_f(float a)
MINLINE float plane_point_side_v3(const float plane[4], const float co[3])
void aabb_get_near_far_from_plane(const float plane_no[3], const float bbmin[3], const float bbmax[3], float bb_near[3], float bb_afar[3])
void mul_v4_m4v4(float r[4], const float M[4][4], const float v[4])
void transpose_m4_m4(float R[4][4], const float M[4][4])
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE bool equals_v2v2(const float v1[2], const float v2[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void invert_v2(float r[2])
void BLI_memblock_iternew(BLI_memblock *mblk, BLI_memblock_iter *iter) ATTR_NONNULL()
void * BLI_memblock_iterstep(BLI_memblock_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
#define UNUSED_VARS_NDEBUG(...)
#define SET_FLAG_FROM_TEST(value, test, flag)
#define DRW_STATE_WRITE_STENCIL_ENABLED
@ DRW_STATE_STENCIL_EQUAL
@ DRW_STATE_BLEND_BACKGROUND
@ DRW_STATE_STENCIL_ALWAYS
@ DRW_STATE_IN_FRONT_SELECT
@ DRW_STATE_DEPTH_GREATER_EQUAL
@ DRW_STATE_WRITE_STENCIL_SHADOW_FAIL
@ DRW_STATE_PROGRAM_POINT_SIZE
@ DRW_STATE_SHADOW_OFFSET
@ DRW_STATE_BLEND_ADD_FULL
@ DRW_STATE_BLEND_ALPHA_UNDER_PREMUL
@ DRW_STATE_DEPTH_LESS_EQUAL
@ DRW_STATE_WRITE_STENCIL_SHADOW_PASS
@ DRW_STATE_FIRST_VERTEX_CONVENTION
@ DRW_STATE_STENCIL_NEQUAL
@ DRW_STATE_BLEND_ALPHA_PREMUL
@ DRW_STATE_DEPTH_GREATER
@ DRW_STATE_WRITE_STENCIL
#define DRW_STATE_RASTERIZER_ENABLED
#define DRW_STATE_STENCIL_TEST_ENABLED
#define DRW_STATE_DEPTH_TEST_ENABLED
#define DRW_STATE_BLEND_ENABLED
#define DRW_STATE_DEFAULT
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
void GPU_batch_draw_advanced(GPUBatch *, int v_first, int v_count, int i_first, int i_count)
void GPU_compute_dispatch_indirect(GPUShader *shader, GPUStorageBuf *indirect_buf)
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len)
void GPU_draw_list_append(GPUDrawList *list, GPUBatch *batch, int i_first, int i_count)
void GPU_draw_list_submit(GPUDrawList *list)
GPUFrameBuffer * GPU_framebuffer_active_get(void)
bool GPU_select_load_id(unsigned int id)
void GPU_shader_unbind(void)
void GPU_shader_uniform_vector_int(GPUShader *shader, int location, int length, int arraysize, const int *value)
void GPU_shader_uniform_vector(GPUShader *shader, int location, int length, int arraysize, const float *value)
bool GPU_shader_transform_feedback_enable(GPUShader *shader, struct GPUVertBuf *vertbuf)
void GPU_shader_uniform_int(GPUShader *shader, int location, int value)
void GPU_shader_transform_feedback_disable(GPUShader *shader)
void GPU_shader_bind(GPUShader *shader)
void GPU_memory_barrier(eGPUBarrier barrier)
void GPU_program_point_size(bool enable)
@ GPU_BLEND_ADDITIVE_PREMULT
@ GPU_BLEND_ALPHA_UNDER_PREMUL
@ GPU_BLEND_ALPHA_PREMULT
void GPU_line_width(float width)
void GPU_line_smooth(bool enable)
void GPU_stencil_write_mask_set(uint write_mask)
void GPU_depth_range(float near, float far)
void GPU_stencil_reference_set(uint reference)
@ GPU_STENCIL_OP_COUNT_DEPTH_FAIL
@ GPU_STENCIL_OP_COUNT_DEPTH_PASS
void GPU_stencil_compare_mask_set(uint compare_mask)
void GPU_front_facing(bool invert)
void GPU_point_size(float size)
bool GPU_depth_mask_get(void)
@ GPU_DEPTH_GREATER_EQUAL
eGPUDepthTest GPU_depth_test_get(void)
void GPU_state_set(eGPUWriteMask write_mask, eGPUBlend blend, eGPUFaceCullTest culling_test, eGPUDepthTest depth_test, eGPUStencilTest stencil_test, eGPUStencilOp stencil_op, eGPUProvokingVertex provoking_vert)
void GPU_viewport_size_get_f(float coords[4])
void GPU_clip_distances(int distances_enabled)
void GPU_provoking_vertex(eGPUProvokingVertex vert)
void GPU_shadow_offset(bool enable)
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
void GPU_storagebuf_unbind_all(void)
void GPU_texture_unbind_all(void)
void GPU_texture_image_bind(GPUTexture *tex, int unit)
void GPU_texture_bind_ex(GPUTexture *tex, eGPUSamplerState state, int unit, bool set_number)
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_bind_as_ssbo(struct GPUVertBuf *verts, int binding)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
void GPU_vertbuf_bind_as_texture(struct GPUVertBuf *verts, int binding)
ATTR_WARN_UNUSED_RESULT const BMVert * v
void DRW_debug_sphere(const float center[3], const float radius, const float color[4])
DRWSparseUniformBuf * DRW_uniform_attrs_pool_find_ubo(GHash *table, struct GPUUniformAttrList *key)
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF
@ DRW_UNIFORM_BLOCK_OBINFOS
@ DRW_UNIFORM_TFEEDBACK_TARGET
@ DRW_UNIFORM_TEXTURE_REF
@ DRW_UNIFORM_MODEL_MATRIX
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE
@ DRW_UNIFORM_MODEL_MATRIX_INVERSE
@ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF
@ DRW_UNIFORM_BASE_INSTANCE
@ DRW_UNIFORM_BLOCK_OBMATS
@ DRW_UNIFORM_RESOURCE_ID
@ DRW_UNIFORM_STORAGE_BLOCK_REF
@ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE
@ DRW_UNIFORM_RESOURCE_CHUNK
@ DRW_UNIFORM_BLOCK_OBATTRS
@ DRW_UNIFORM_STORAGE_BLOCK
BLI_INLINE uint32_t DRW_handle_chunk_get(const DRWResourceHandle *handle)
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
BLI_INLINE void * DRW_memblock_elem_from_handle(struct BLI_memblock *memblock, const DRWResourceHandle *handle)
@ DRW_CMD_COMPUTE_INDIRECT
@ DRW_CMD_DRAW_INSTANCE_RANGE
@ DRW_CMD_DRAW_PROCEDURAL
BLI_INLINE uint32_t DRW_handle_negative_scale_get(const DRWResourceHandle *handle)
BLI_INLINE uint32_t DRW_handle_id_get(const DRWResourceHandle *handle)
uint32_t DRWResourceHandle
BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup, DRWResourceHandle *handle, float obmat_loc, float obinv_loc)
static void draw_call_single_do(DRWShadingGroup *shgroup, DRWCommandsState *state, GPUBatch *batch, DRWResourceHandle handle, int vert_first, int vert_count, int inst_first, int inst_count, bool do_base_instance)
bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState *state)
static void drw_update_view(const float viewport_size[2])
static DRWCommand * draw_command_iter_step(DRWCommandIterator *iter, eDRWCommandType *cmd_type)
static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere, const float(*frustum_planes)[4], const BoundSphere *bsphere)
static void drw_state_validate(void)
static void draw_update_uniforms(DRWShadingGroup *shgroup, DRWCommandsState *state, bool *use_tfeedback)
bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup, GPUBatch *geom, int vert_first, int vert_count, int inst_first, int inst_count, int baseinst_loc)
static void draw_call_batching_flush(DRWShadingGroup *shgroup, DRWCommandsState *state)
BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup, DRWCommandsState *state, GPUBatch *batch, const DRWResourceHandle *handle)
void DRW_draw_pass(DRWPass *pass)
static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
void drw_state_set(DRWState state)
void DRW_view_set_active(const DRWView *view)
void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
static void draw_call_batching_do(DRWShadingGroup *shgroup, DRWCommandsState *state, DRWCommandDraw *call)
static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
void DRW_culling_frustum_corners_get(const DRWView *view, BoundBox *corners)
static void draw_command_iter_begin(DRWCommandIterator *iter, DRWShadingGroup *shgroup)
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
const DRWView * DRW_view_get_active(void)
void DRW_state_reset(void)
struct DRWCommandsState DRWCommandsState
static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
void DRW_state_reset_ex(DRWState state)
static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHandle *handle)
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *state)
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, GPUBatch *geom)
void DRW_state_lock(DRWState state)
bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
static bool draw_culling_box_test(const float(*frustum_planes)[4], const BoundBox *bbox)
bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
void DRW_select_load_id(uint id)
struct DRWCommandIterator DRWCommandIterator
static void draw_compute_culling(DRWView *view)
static void draw_call_batching_start(DRWCommandsState *state)
void DRW_stats_query_end(void)
void DRW_stats_query_start(const char *name)
void GPU_framebuffer_clear(GPUFrameBuffer *gpu_fb, eGPUFrameBufferBits buffers, const float clear_col[4], float clear_depth, uint clear_stencil)
struct DRWCommandChunk * next
eGPUFrameBufferBits clear_channels
GPUStorageBuf * indirect_buf
DRWCommandChunk * curr_chunk
DRWSparseUniformBuf * obattrs_ubo
DRWState drw_state_disabled
DRWState drw_state_enabled
struct GHash * obattrs_ubo_pool
struct GPUUniformBuf ** obinfos_ubo
struct BLI_memblock * obmats
struct GPUUniformBuf ** matrices_ubo
struct BLI_memblock * cullstates
ViewInfos view_storage_cpy
bool buffer_finish_called
struct DRWPass::@312 shgroups
struct DRWCommandChunk * first
struct DRWUniformChunk * uniforms
struct DRWShadingGroup::@306 cmd
struct GPUUniformBuf * view_ubo
float2 viewport_size_inverse
static int blend(const Tex *tex, const float texvec[3], TexResult *texres)
DRWCommandComputeIndirect compute_indirect
DRWCommandSetStencil stencil
DRWCommandComputeRef compute_ref
DRWCommandDrawInstance instance
DRWCommandDrawRange range
DRWCommandSetMutableState state
DRWCommandCompute compute
DRWCommandBarrier barrier
DRWCommandDrawInstanceRange instance_range
DRWCommandDrawProcedural procedural
DRWCommandSetSelectID select_id