196 uint gpumat_array_len)
206 cache->
mat_len = gpumat_array_len;
210 for (
int i = 1; i < cache->
mat_len; i++) {
@ BKE_MBALL_BATCH_DIRTY_ALL
bool BKE_mball_is_basis(const struct Object *ob)
MINLINE int max_ii(int a, int b)
Object is a sort of wrapper for general info.
#define GPU_batch_create(prim, verts, elem)
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo)
#define GPU_BATCH_DISCARD_SAFE(batch)
GPUBatch * GPU_batch_create_ex(GPUPrimType prim, GPUVertBuf *vert, GPUIndexBuf *elem, eGPUBatchFlag owns_flag)
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
GPUIndexBuf * GPU_indexbuf_calloc(void)
struct GPUVertBuf GPUVertBuf
GPUVertBuf * GPU_vertbuf_calloc(void)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
Read Guarded memory(de)allocation.
void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb, struct GPUIndexBuf *ibo, bool *r_is_manifold)
void DRW_displist_vertbuf_create_wiredata(struct ListBase *lb, struct GPUVertBuf *vbo)
void DRW_displist_indexbuf_create_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo)
void DRW_displist_indexbuf_create_lines_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo)
void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb, struct GPUVertBuf *vbo, const struct Scene *scene)
const DRWContextState * DRW_context_state_get(void)
void *(* MEM_callocN)(size_t len, const char *str)
void *(* MEM_mallocN)(size_t len, const char *str)
struct CurveCache * curve_cache