Blender  V3.3
draw_cache_impl_lattice.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2017 Blender Foundation. All rights reserved. */
3 
10 #include "MEM_guardedalloc.h"
11 
12 #include "BLI_math_vector.h"
13 #include "BLI_utildefines.h"
14 
15 #include "DNA_curve_types.h"
16 #include "DNA_lattice_types.h"
17 #include "DNA_meshdata_types.h"
18 #include "DNA_userdef_types.h"
19 
20 #include "BKE_colorband.h"
21 #include "BKE_deform.h"
22 #include "BKE_lattice.h"
23 
24 #include "GPU_batch.h"
25 
26 #include "draw_cache_impl.h" /* own include */
27 
28 #define SELECT 1
29 
36 static void lattice_batch_cache_clear(Lattice *lt);
37 
38 /* ---------------------------------------------------------------------- */
39 /* Lattice Interface, direct access to basic data. */
40 
41 static int vert_len_calc(int u, int v, int w)
42 {
43  if (u <= 0 || v <= 0 || w <= 0) {
44  return 0;
45  }
46  return u * v * w;
47 }
48 
49 static int edge_len_calc(int u, int v, int w)
50 {
51  if (u <= 0 || v <= 0 || w <= 0) {
52  return 0;
53  }
54  return (((((u - 1) * v) + ((v - 1) * u)) * w) + ((w - 1) * (u * v)));
55 }
56 
58 {
59  if (lt->editlatt) {
60  lt = lt->editlatt->latt;
61  }
62 
63  const int u = lt->pntsu;
64  const int v = lt->pntsv;
65  const int w = lt->pntsw;
66 
67  if ((lt->flag & LT_OUTSIDE) == 0) {
68  return vert_len_calc(u, v, w);
69  }
70 
71  /* TODO: remove internal coords. */
72  return vert_len_calc(u, v, w);
73 }
74 
76 {
77  if (lt->editlatt) {
78  lt = lt->editlatt->latt;
79  }
80 
81  const int u = lt->pntsu;
82  const int v = lt->pntsv;
83  const int w = lt->pntsw;
84 
85  if ((lt->flag & LT_OUTSIDE) == 0) {
86  return edge_len_calc(u, v, w);
87  }
88 
89  /* TODO: remove internal coords. */
90  return edge_len_calc(u, v, w);
91 }
92 
93 /* ---------------------------------------------------------------------- */
94 /* Lattice Interface, indirect, partially cached access to complex data. */
95 
96 typedef struct LatticeRenderData {
97  int types;
98 
99  int vert_len;
100  int edge_len;
101 
102  struct {
104  } dims;
106 
109 
110  int actbp;
111 
112  const struct MDeformVert *dvert;
114 
115 enum {
119 };
120 
122 {
123  LatticeRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
124  rdata->types = types;
125 
126  if (lt->editlatt) {
127  EditLatt *editlatt = lt->editlatt;
128  lt = editlatt->latt;
129 
130  rdata->edit_latt = editlatt;
131 
132  rdata->dvert = lt->dvert;
133 
134  if (types & (LR_DATATYPE_VERT)) {
136  }
137  if (types & (LR_DATATYPE_EDGE)) {
139  }
140  if (types & LR_DATATYPE_OVERLAY) {
141  rdata->actbp = lt->actbp;
142  }
143  }
144  else {
145  rdata->dvert = NULL;
146 
147  if (types & (LR_DATATYPE_VERT)) {
149  }
150  if (types & (LR_DATATYPE_EDGE)) {
152  /* No edge data. */
153  }
154  }
155 
156  rdata->bp = lt->def;
157 
158  rdata->dims.u_len = lt->pntsu;
159  rdata->dims.v_len = lt->pntsv;
160  rdata->dims.w_len = lt->pntsw;
161 
162  rdata->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
163  rdata->actbp = lt->actbp;
164 
165  return rdata;
166 }
167 
169 {
170 #if 0
171  if (rdata->loose_verts) {
172  MEM_freeN(rdata->loose_verts);
173  }
174 #endif
175  MEM_freeN(rdata);
176 }
177 
179 {
181  return rdata->vert_len;
182 }
183 
185 {
187  return rdata->edge_len;
188 }
189 
191  const int vert_idx)
192 {
194  return &rdata->bp[vert_idx];
195 }
196 
197 /* ---------------------------------------------------------------------- */
198 /* Lattice GPUBatch Cache */
199 
200 typedef struct LatticeBatchCache {
203 
206 
208 
209  /* settings to determine if cache is invalid */
210  bool is_dirty;
211 
212  struct {
214  } dims;
216 
219 
220 /* GPUBatch cache management. */
221 
223 {
224  LatticeBatchCache *cache = lt->batch_cache;
225 
226  if (cache == NULL) {
227  return false;
228  }
229 
230  if (cache->is_editmode != (lt->editlatt != NULL)) {
231  return false;
232  }
233 
234  if (cache->is_dirty) {
235  return false;
236  }
237 
238  if ((cache->dims.u_len != lt->pntsu) || (cache->dims.v_len != lt->pntsv) ||
239  (cache->dims.w_len != lt->pntsw) ||
240  ((cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0)))) {
241  return false;
242  }
243 
244  return true;
245 }
246 
248 {
249  LatticeBatchCache *cache = lt->batch_cache;
250 
251  if (!cache) {
252  cache = lt->batch_cache = MEM_callocN(sizeof(*cache), __func__);
253  }
254  else {
255  memset(cache, 0, sizeof(*cache));
256  }
257 
258  cache->dims.u_len = lt->pntsu;
259  cache->dims.v_len = lt->pntsv;
260  cache->dims.w_len = lt->pntsw;
261  cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
262 
263  cache->is_editmode = lt->editlatt != NULL;
264 
265  cache->is_dirty = false;
266 }
267 
269 {
270  if (!lattice_batch_cache_valid(lt)) {
273  }
274 }
275 
277 {
278  return lt->batch_cache;
279 }
280 
282 {
283  LatticeBatchCache *cache = lt->batch_cache;
284  if (cache == NULL) {
285  return;
286  }
287  switch (mode) {
289  cache->is_dirty = true;
290  break;
292  /* TODO: Separate Flag VBO. */
294  break;
295  default:
296  BLI_assert(0);
297  }
298 }
299 
301 {
302  LatticeBatchCache *cache = lt->batch_cache;
303  if (!cache) {
304  return;
305  }
306 
310 
313 }
314 
316 {
319 }
320 
321 /* GPUBatch cache usage. */
323  LatticeBatchCache *cache,
324  bool use_weight,
325  const int actdef)
326 {
328 
329  if (cache->pos == NULL) {
330  GPUVertFormat format = {0};
331  struct {
332  uint pos, col;
333  } attr_id;
334 
336  if (use_weight) {
338  }
339 
340  const int vert_len = lattice_render_data_verts_len_get(rdata);
341 
343  GPU_vertbuf_data_alloc(cache->pos, vert_len);
344  for (int i = 0; i < vert_len; i++) {
345  const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
346  GPU_vertbuf_attr_set(cache->pos, attr_id.pos, i, bp->vec);
347 
348  if (use_weight) {
349  const float no_active_weight = 666.0f;
350  float weight = (actdef > -1) ? BKE_defvert_find_weight(rdata->dvert + i, actdef) :
351  no_active_weight;
352  GPU_vertbuf_attr_set(cache->pos, attr_id.col, i, &weight);
353  }
354  }
355  }
356 
357  return cache->pos;
358 }
359 
361  LatticeBatchCache *cache)
362 {
364 
365  if (cache->edges == NULL) {
366  const int vert_len = lattice_render_data_verts_len_get(rdata);
367  const int edge_len = lattice_render_data_edges_len_get(rdata);
368  int edge_len_real = 0;
369 
370  GPUIndexBufBuilder elb;
371  GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
372 
373 #define LATT_INDEX(u, v, w) ((((w)*rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
374 
375  for (int w = 0; w < rdata->dims.w_len; w++) {
376  int wxt = (ELEM(w, 0, rdata->dims.w_len - 1));
377  for (int v = 0; v < rdata->dims.v_len; v++) {
378  int vxt = (ELEM(v, 0, rdata->dims.v_len - 1));
379  for (int u = 0; u < rdata->dims.u_len; u++) {
380  int uxt = (ELEM(u, 0, rdata->dims.u_len - 1));
381 
382  if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
383  GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
384  BLI_assert(edge_len_real <= edge_len);
385  edge_len_real++;
386  }
387  if (v && ((uxt || wxt) || !rdata->show_only_outside)) {
388  GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v - 1, w), LATT_INDEX(u, v, w));
389  BLI_assert(edge_len_real <= edge_len);
390  edge_len_real++;
391  }
392  if (u && ((vxt || wxt) || !rdata->show_only_outside)) {
393  GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u - 1, v, w), LATT_INDEX(u, v, w));
394  BLI_assert(edge_len_real <= edge_len);
395  edge_len_real++;
396  }
397  }
398  }
399  }
400 
401 #undef LATT_INDEX
402 
403  if (rdata->show_only_outside) {
404  BLI_assert(edge_len_real <= edge_len);
405  }
406  else {
407  BLI_assert(edge_len_real == edge_len);
408  }
409 
410  cache->edges = GPU_indexbuf_build(&elb);
411  }
412 
413  return cache->edges;
414 }
415 
417 {
418  /* Since LR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
420 
423 
424  if (cache->overlay_verts == NULL) {
425  static GPUVertFormat format = {0};
426  static struct {
427  uint pos, data;
428  } attr_id;
429  if (format.attr_len == 0) {
430  /* initialize vertex format */
433  }
434 
435  const int vert_len = lattice_render_data_verts_len_get(rdata);
436 
438  GPU_vertbuf_data_alloc(vbo, vert_len);
439  for (int i = 0; i < vert_len; i++) {
440  const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
441 
442  char vflag = 0;
443  if (bp->f1 & SELECT) {
444  if (i == rdata->actbp) {
445  vflag |= VFLAG_VERT_ACTIVE;
446  }
447  else {
448  vflag |= VFLAG_VERT_SELECTED;
449  }
450  }
451 
452  GPU_vertbuf_attr_set(vbo, attr_id.pos, i, bp->vec);
453  GPU_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
454  }
455 
457  }
458 
460 }
461 
462 GPUBatch *DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
463 {
465 
466  if (cache->all_edges == NULL) {
467  /* create batch from Lattice */
469 
470  cache->all_edges = GPU_batch_create(
472  lattice_batch_cache_get_pos(rdata, cache, use_weight, actdef),
473  lattice_batch_cache_get_edges(rdata, cache));
474 
476  }
477 
478  return cache->all_edges;
479 }
480 
482 {
484 
485  if (cache->all_verts == NULL) {
487 
488  cache->all_verts = GPU_batch_create(
489  GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), NULL);
490 
492  }
493 
494  return cache->all_verts;
495 }
496 
498 {
500 
501  if (cache->overlay_verts == NULL) {
503  }
504 
505  return cache->overlay_verts;
506 }
support for deformation groups and hooks.
float BKE_defvert_find_weight(const struct MDeformVert *dvert, int defgroup)
Definition: deform.c:704
@ BKE_LATTICE_BATCH_DIRTY_SELECT
Definition: BKE_lattice.h:71
@ BKE_LATTICE_BATCH_DIRTY_ALL
Definition: BKE_lattice.h:70
#define BLI_assert(a)
Definition: BLI_assert.h:46
unsigned int uint
Definition: BLI_sys_types.h:67
#define ELEM(...)
#define LT_OUTSIDE
GPUBatch
Definition: GPU_batch.h:78
#define GPU_batch_create(prim, verts, elem)
Definition: GPU_batch.h:95
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:216
GPUBatch * GPU_batch_create_ex(GPUPrimType prim, GPUVertBuf *vert, GPUIndexBuf *elem, eGPUBatchFlag owns_flag)
Definition: gpu_batch.cc:43
@ GPU_BATCH_OWNS_VBO
Definition: GPU_batch.h:30
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
GPUIndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:20
@ GPU_PRIM_POINTS
Definition: GPU_primitive.h:19
#define GPU_vertbuf_create_with_format(format)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition: btQuadWord.h:119
CCL_NAMESPACE_BEGIN struct Options options
@ VFLAG_VERT_SELECTED
@ VFLAG_VERT_ACTIVE
void DRW_lattice_batch_cache_free(Lattice *lt)
static void lattice_render_data_free(LatticeRenderData *rdata)
static int edge_len_calc(int u, int v, int w)
GPUBatch * DRW_lattice_batch_cache_get_edit_verts(Lattice *lt)
static GPUIndexBuf * lattice_batch_cache_get_edges(LatticeRenderData *rdata, LatticeBatchCache *cache)
#define SELECT
static int lattice_render_verts_len_get(Lattice *lt)
static const BPoint * lattice_render_data_vert_bpoint(const LatticeRenderData *rdata, const int vert_idx)
@ LR_DATATYPE_EDGE
@ LR_DATATYPE_OVERLAY
@ LR_DATATYPE_VERT
static int lattice_render_edges_len_get(Lattice *lt)
#define LATT_INDEX(u, v, w)
static int vert_len_calc(int u, int v, int w)
GPUBatch * DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
GPUBatch * DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
static void lattice_batch_cache_init(Lattice *lt)
static int lattice_render_data_edges_len_get(const LatticeRenderData *rdata)
void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
static int lattice_render_data_verts_len_get(const LatticeRenderData *rdata)
static GPUVertBuf * lattice_batch_cache_get_pos(LatticeRenderData *rdata, LatticeBatchCache *cache, bool use_weight, const int actdef)
static LatticeRenderData * lattice_render_data_create(Lattice *lt, const int types)
struct LatticeRenderData LatticeRenderData
static LatticeBatchCache * lattice_batch_cache_get(Lattice *lt)
static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
void DRW_lattice_batch_cache_validate(Lattice *lt)
struct LatticeBatchCache LatticeBatchCache
static bool lattice_batch_cache_valid(Lattice *lt)
static void lattice_batch_cache_clear(Lattice *lt)
uint pos
struct @653::@656 attr_id
uint col
format
Definition: logImageCore.h:38
static char ** types
Definition: makesdna.c:67
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
uint8_t f1
float vec[4]
struct Lattice * latt
struct LatticeBatchCache::@289 dims
const struct MDeformVert * dvert
struct LatticeRenderData::@288 dims
struct EditLatt * edit_latt
void * batch_cache
struct MDeformVert * dvert
struct EditLatt * editlatt
struct BPoint * def