Blender  V3.3
draw_cache_impl_curves.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2017 Blender Foundation. All rights reserved. */
3 
10 #include <cstring>
11 
12 #include "MEM_guardedalloc.h"
13 
14 #include "BLI_listbase.h"
15 #include "BLI_math_base.h"
16 #include "BLI_math_vec_types.hh"
17 #include "BLI_math_vector.h"
18 #include "BLI_math_vector.hh"
19 #include "BLI_span.hh"
20 #include "BLI_utildefines.h"
21 
22 #include "DNA_curves_types.h"
23 #include "DNA_object_types.h"
24 #include "DNA_scene_types.h"
25 
26 #include "BKE_curves.hh"
27 #include "BKE_geometry_set.hh"
28 
29 #include "GPU_batch.h"
30 #include "GPU_material.h"
31 #include "GPU_texture.h"
32 
33 #include "DRW_render.h"
34 
35 #include "draw_attributes.h"
36 #include "draw_cache_impl.h" /* own include */
37 #include "draw_cache_inline.h"
38 #include "draw_curves_private.h" /* own include */
39 #include "draw_shader.h"
40 
42 using blender::float3;
45 using blender::Span;
46 
47 /* ---------------------------------------------------------------------- */
48 /* Curves GPUBatch Cache */
49 
52 
54 
55  /* Whether the cache is invalid. */
56  bool is_dirty;
57 
64 };
65 
67 {
68  const CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
69  return (cache && cache->is_dirty == false);
70 }
71 
73 {
74  CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
75 
76  if (!cache) {
77  cache = MEM_cnew<CurvesBatchCache>(__func__);
78  curves.batch_cache = cache;
79  }
80  else {
81  memset(cache, 0, sizeof(*cache));
82  }
83 
85 
86  cache->is_dirty = false;
87 }
88 
89 static void curves_discard_attributes(CurvesEvalCache &curves_cache)
90 {
91  for (const int i : IndexRange(GPU_MAX_ATTR)) {
94  }
95 
96  for (const int i : IndexRange(MAX_HAIR_SUBDIV)) {
97  for (const int j : IndexRange(GPU_MAX_ATTR)) {
98  GPU_VERTBUF_DISCARD_SAFE(curves_cache.final[i].attributes_buf[j]);
99  DRW_TEXTURE_FREE_SAFE(curves_cache.final[i].attributes_tex[j]);
100  }
101 
102  drw_attributes_clear(&curves_cache.final[i].attr_used);
103  }
104 }
105 
107 {
108  /* TODO: more granular update tagging. */
111  DRW_TEXTURE_FREE_SAFE(curves_cache.point_tex);
112  DRW_TEXTURE_FREE_SAFE(curves_cache.length_tex);
113 
116  DRW_TEXTURE_FREE_SAFE(curves_cache.strand_tex);
118 
119  for (const int i : IndexRange(MAX_HAIR_SUBDIV)) {
120  GPU_VERTBUF_DISCARD_SAFE(curves_cache.final[i].proc_buf);
121  DRW_TEXTURE_FREE_SAFE(curves_cache.final[i].proc_tex);
122  for (const int j : IndexRange(MAX_THICKRES)) {
123  GPU_BATCH_DISCARD_SAFE(curves_cache.final[i].proc_hairs[j]);
124  }
125  }
126 
127  curves_discard_attributes(curves_cache);
128 }
129 
131 {
132  CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
133  if (!cache) {
134  return;
135  }
136 
138 
140 }
141 
143 {
147  }
148 }
149 
151 {
153  return *static_cast<CurvesBatchCache *>(curves.batch_cache);
154 }
155 
157 {
158  CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves->batch_cache);
159  if (cache == nullptr) {
160  return;
161  }
162  switch (mode) {
164  cache->is_dirty = true;
165  break;
166  default:
168  }
169 }
170 
172 {
174  CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves->batch_cache);
175  BLI_mutex_end(&cache->render_mutex);
176  MEM_SAFE_FREE(curves->batch_cache);
177 }
178 
180 {
181  CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves->batch_cache);
182  if (cache == nullptr) {
183  return;
184  }
185 
186  bool do_discard = false;
187 
188  for (const int i : IndexRange(MAX_HAIR_SUBDIV)) {
189  CurvesEvalFinalCache &final_cache = cache->curves_cache.final[i];
190 
191  if (drw_attributes_overlap(&final_cache.attr_used_over_time, &final_cache.attr_used)) {
192  final_cache.last_attr_matching_time = ctime;
193  }
194 
195  if (ctime - final_cache.last_attr_matching_time > U.vbotimeout) {
196  do_discard = true;
197  }
198 
200  }
201 
202  if (do_discard) {
204  }
205 }
206 
207 static void ensure_seg_pt_count(const Curves &curves, CurvesEvalCache &curves_cache)
208 {
209  if (curves_cache.proc_point_buf != nullptr) {
210  return;
211  }
212 
213  curves_cache.strands_len = curves.geometry.curve_num;
214  curves_cache.elems_len = curves.geometry.point_num + curves.geometry.curve_num;
215  curves_cache.point_len = curves.geometry.point_num;
216 }
217 
220  float parameter;
221 };
222 
224  const Curves &curves_id,
226  MutableSpan<float> hairLength_data)
227 {
228  /* TODO: use hair radius layer if available. */
229  const int curve_num = curves_id.geometry.curve_num;
231  curves_id.geometry);
232  Span<float3> positions = curves.positions();
233 
234  for (const int i_curve : IndexRange(curve_num)) {
235  const IndexRange points = curves.points_for_curve(i_curve);
236 
237  Span<float3> curve_positions = positions.slice(points);
238  MutableSpan<PositionAndParameter> curve_posTime_data = posTime_data.slice(points);
239 
240  float total_len = 0.0f;
241  for (const int i_point : curve_positions.index_range()) {
242  if (i_point > 0) {
243  total_len += blender::math::distance(curve_positions[i_point - 1],
244  curve_positions[i_point]);
245  }
246  curve_posTime_data[i_point].position = curve_positions[i_point];
247  curve_posTime_data[i_point].parameter = total_len;
248  }
249  hairLength_data[i_curve] = total_len;
250 
251  /* Assign length value. */
252  if (total_len > 0.0f) {
253  const float factor = 1.0f / total_len;
254  /* Divide by total length to have a [0-1] number. */
255  for (const int i_point : curve_positions.index_range()) {
256  curve_posTime_data[i_point].parameter *= factor;
257  }
258  }
259  }
260 }
261 
263  CurvesEvalCache &cache,
264  GPUMaterial *gpu_material)
265 {
266  if (cache.proc_point_buf == nullptr || DRW_vbo_requested(cache.proc_point_buf)) {
267  /* Initialize vertex format. */
268  GPUVertFormat format = {0};
271 
274 
275  MutableSpan posTime_data{
276  reinterpret_cast<PositionAndParameter *>(GPU_vertbuf_get_data(cache.proc_point_buf)),
277  cache.point_len};
278 
279  GPUVertFormat length_format = {0};
280  GPU_vertformat_attr_add(&length_format, "hairLength", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
281 
282  cache.proc_length_buf = GPU_vertbuf_create_with_format(&length_format);
284 
285  MutableSpan hairLength_data{
286  reinterpret_cast<float *>(GPU_vertbuf_get_data(cache.proc_length_buf)), cache.strands_len};
287 
288  curves_batch_cache_fill_segments_proc_pos(curves, posTime_data, hairLength_data);
289 
290  /* Create vbo immediately to bind to texture buffer. */
292  cache.point_tex = GPU_texture_create_from_vertbuf("hair_point", cache.proc_point_buf);
293  }
294 
295  if (gpu_material && cache.proc_length_buf != nullptr && cache.length_tex) {
296  ListBase gpu_attrs = GPU_material_attributes(gpu_material);
297  LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &gpu_attrs) {
298  if (attr->type == CD_HAIRLENGTH) {
300  cache.length_tex = GPU_texture_create_from_vertbuf("hair_length", cache.proc_length_buf);
301  break;
302  }
303  }
304  }
305 }
306 
307 void drw_curves_get_attribute_sampler_name(const char *layer_name, char r_sampler_name[32])
308 {
309  char attr_safe_name[GPU_MAX_SAFE_ATTR_NAME];
310  GPU_vertformat_safe_attr_name(layer_name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
311  /* Attributes use auto-name. */
312  BLI_snprintf(r_sampler_name, 32, "a%s", attr_safe_name);
313 }
314 
316  const GPUVertFormat *format,
317  const int subdiv,
318  const int index,
319  const char *name)
320 {
321  CurvesEvalFinalCache &final_cache = cache.final[subdiv];
324 
325  /* Create a destination buffer for the transform feedback. Sized appropriately */
326  /* Those are points! not line segments. */
327  GPU_vertbuf_data_alloc(final_cache.attributes_buf[index],
328  final_cache.strands_res * cache.strands_len);
329 
330  /* Create vbo immediately to bind to texture buffer. */
331  GPU_vertbuf_use(final_cache.attributes_buf[index]);
332 
333  final_cache.attributes_tex[index] = GPU_texture_create_from_vertbuf(
334  name, final_cache.attributes_buf[index]);
335 }
336 
338  CurvesEvalCache &cache,
339  const DRW_AttributeRequest &request,
340  const int subdiv,
341  const int index)
342 {
345 
346  char sampler_name[32];
348 
349  GPUVertFormat format = {0};
351  /* All attributes use vec4, see comment below. */
353 
355  GPUVertBuf *attr_vbo = cache.proc_attributes_buf[index];
356 
357  GPU_vertbuf_data_alloc(attr_vbo,
358  request.domain == ATTR_DOMAIN_POINT ? curves.geometry.point_num :
359  curves.geometry.curve_num);
360 
361  const blender::bke::AttributeAccessor attributes =
363 
364  /* TODO(@kevindietrich): float4 is used for scalar attributes as the implicit conversion done
365  * by OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following
366  * the Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a
367  * similar texture state swizzle to map the attribute correctly as for volume attributes, so we
368  * can control the conversion ourselves. */
370  request.attribute_name, request.domain, {0.0f, 0.0f, 0.0f, 1.0f});
371 
373  static_cast<ColorGeometry4f *>(GPU_vertbuf_get_data(attr_vbo)),
374  attributes.domain_size(request.domain)};
375 
376  attribute.materialize(vbo_span);
377 
378  GPU_vertbuf_use(attr_vbo);
379  cache.proc_attributes_tex[index] = GPU_texture_create_from_vertbuf(sampler_name, attr_vbo);
380 
381  /* Existing final data may have been for a different attribute (with a different name or domain),
382  * free the data. */
383  GPU_VERTBUF_DISCARD_SAFE(cache.final[subdiv].attributes_buf[index]);
384  DRW_TEXTURE_FREE_SAFE(cache.final[subdiv].attributes_tex[index]);
385 
386  /* Ensure final data for points. */
387  if (request.domain == ATTR_DOMAIN_POINT) {
388  curves_batch_cache_ensure_procedural_final_attr(cache, &format, subdiv, index, sampler_name);
389  }
390 }
391 
392 static void curves_batch_cache_fill_strands_data(const Curves &curves_id,
393  GPUVertBufRaw &data_step,
394  GPUVertBufRaw &seg_step)
395 {
397  curves_id.geometry);
398 
399  for (const int i : IndexRange(curves.curves_num())) {
400  const IndexRange curve_range = curves.points_for_curve(i);
401 
402  *(uint *)GPU_vertbuf_raw_step(&data_step) = curve_range.start();
403  *(ushort *)GPU_vertbuf_raw_step(&seg_step) = curve_range.size() - 1;
404  }
405 }
406 
408  CurvesEvalCache &cache)
409 {
410  GPUVertBufRaw data_step, seg_step;
411 
412  GPUVertFormat format_data = {0};
413  uint data_id = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
414 
415  GPUVertFormat format_seg = {0};
416  uint seg_id = GPU_vertformat_attr_add(&format_seg, "data", GPU_COMP_U16, 1, GPU_FETCH_INT);
417 
418  /* Curve Data. */
419  cache.proc_strand_buf = GPU_vertbuf_create_with_format(&format_data);
421  GPU_vertbuf_attr_get_raw_data(cache.proc_strand_buf, data_id, &data_step);
422 
425  GPU_vertbuf_attr_get_raw_data(cache.proc_strand_seg_buf, seg_id, &seg_step);
426 
427  curves_batch_cache_fill_strands_data(curves, data_step, seg_step);
428 
429  /* Create vbo immediately to bind to texture buffer. */
431  cache.strand_tex = GPU_texture_create_from_vertbuf("curves_strand", cache.proc_strand_buf);
432 
434  cache.strand_seg_tex = GPU_texture_create_from_vertbuf("curves_strand_seg",
435  cache.proc_strand_seg_buf);
436 }
437 
439 {
440  /* Same format as point_tex. */
441  GPUVertFormat format = {0};
443 
445 
446  /* Create a destination buffer for the transform feedback. Sized appropriately */
447  /* Those are points! not line segments. */
448  GPU_vertbuf_data_alloc(cache.final[subdiv].proc_buf,
449  cache.final[subdiv].strands_res * cache.strands_len);
450 
451  /* Create vbo immediately to bind to texture buffer. */
452  GPU_vertbuf_use(cache.final[subdiv].proc_buf);
453 
454  cache.final[subdiv].proc_tex = GPU_texture_create_from_vertbuf("hair_proc",
455  cache.final[subdiv].proc_buf);
456 }
457 
459  const int res,
460  GPUIndexBufBuilder &elb)
461 {
462  const int curves_num = curves.geometry.curve_num;
463 
464  uint curr_point = 0;
465 
466  for ([[maybe_unused]] const int i : IndexRange(curves_num)) {
467  for (int k = 0; k < res; k++) {
468  GPU_indexbuf_add_generic_vert(&elb, curr_point++);
469  }
471  }
472 }
473 
475  CurvesEvalCache &cache,
476  const int thickness_res,
477  const int subdiv)
478 {
479  BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
480 
481  if (cache.final[subdiv].proc_hairs[thickness_res - 1] != nullptr) {
482  return;
483  }
484 
485  int verts_per_curve = cache.final[subdiv].strands_res * thickness_res;
486  /* +1 for primitive restart */
487  int element_count = (verts_per_curve + 1) * cache.strands_len;
488  GPUPrimType prim_type = (thickness_res == 1) ? GPU_PRIM_LINE_STRIP : GPU_PRIM_TRI_STRIP;
489 
490  static GPUVertFormat format = {0};
492 
493  /* initialize vertex format */
495 
497  GPU_vertbuf_data_alloc(vbo, 1);
498 
499  GPUIndexBufBuilder elb;
500  GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count);
501 
502  curves_batch_cache_fill_segments_indices(curves, verts_per_curve, elb);
503 
504  cache.final[subdiv].proc_hairs[thickness_res - 1] = GPU_batch_create_ex(
506 }
507 
509  CurvesBatchCache &cache,
510  GPUMaterial *gpu_material,
511  int subdiv)
512 {
513  ThreadMutex *render_mutex = &cache.render_mutex;
514  const CustomData *cd_curve = &curves.geometry.curve_data;
515  const CustomData *cd_point = &curves.geometry.point_data;
516  CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
517 
518  if (gpu_material) {
519  DRW_Attributes attrs_needed;
520  drw_attributes_clear(&attrs_needed);
521  ListBase gpu_attrs = GPU_material_attributes(gpu_material);
522  LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
523  const char *name = gpu_attr->name;
524 
525  int layer_index;
527  eAttrDomain domain;
528  if (drw_custom_data_match_attribute(cd_curve, name, &layer_index, &type)) {
529  domain = ATTR_DOMAIN_CURVE;
530  }
531  else if (drw_custom_data_match_attribute(cd_point, name, &layer_index, &type)) {
532  domain = ATTR_DOMAIN_POINT;
533  }
534  else {
535  continue;
536  }
537 
538  drw_attributes_add_request(&attrs_needed, name, type, layer_index, domain);
539  }
540 
541  if (!drw_attributes_overlap(&final_cache.attr_used, &attrs_needed)) {
542  /* Some new attributes have been added, free all and start over. */
543  for (const int i : IndexRange(GPU_MAX_ATTR)) {
546  }
547  drw_attributes_merge(&final_cache.attr_used, &attrs_needed, render_mutex);
548  }
549  drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, render_mutex);
550  }
551 
552  bool need_tf_update = false;
553 
554  for (const int i : IndexRange(final_cache.attr_used.num_requests)) {
555  const DRW_AttributeRequest &request = final_cache.attr_used.requests[i];
556 
557  if (cache.curves_cache.proc_attributes_buf[i] != nullptr) {
558  continue;
559  }
560 
561  if (request.domain == ATTR_DOMAIN_POINT) {
562  need_tf_update = true;
563  }
564 
565  curves_batch_ensure_attribute(curves, cache.curves_cache, request, subdiv, i);
566  }
567 
568  return need_tf_update;
569 }
570 
572  CurvesEvalCache **r_hair_cache,
573  GPUMaterial *gpu_material,
574  const int subdiv,
575  const int thickness_res)
576 {
577  bool need_ft_update = false;
578 
580  *r_hair_cache = &cache.curves_cache;
581 
582  const int steps = 3; /* TODO: don't hard-code? */
583  (*r_hair_cache)->final[subdiv].strands_res = 1 << (steps + subdiv);
584 
585  /* Refreshed on combing and simulation. */
586  if ((*r_hair_cache)->proc_point_buf == nullptr) {
589  need_ft_update = true;
590  }
591 
592  /* Refreshed if active layer or custom data changes. */
593  if ((*r_hair_cache)->strand_tex == nullptr) {
595  }
596 
597  /* Refreshed only on subdiv count change. */
598  if ((*r_hair_cache)->final[subdiv].proc_buf == nullptr) {
600  need_ft_update = true;
601  }
602  if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == nullptr) {
604  *curves, cache.curves_cache, thickness_res, subdiv);
605  }
606 
607  need_ft_update |= curves_ensure_attributes(*curves, cache, gpu_material, subdiv);
608 
609  return need_ft_update;
610 }
611 
613 {
614  return max_ii(1, curves->totcol);
615 }
616 
618 {
620  return DRW_batch_request(&cache.edit_points);
621 }
622 
623 static void request_attribute(Curves &curves, const char *name)
624 {
626  const DRWContextState *draw_ctx = DRW_context_state_get();
627  const Scene *scene = draw_ctx->scene;
628  const int subdiv = scene->r.hair_subdiv;
629  CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
630 
631  DRW_Attributes attributes{};
632 
634  curves.geometry);
635  std::optional<blender::bke::AttributeMetaData> meta_data =
636  curves_geometry.attributes().lookup_meta_data(name);
637  if (!meta_data) {
638  return;
639  }
640  const eAttrDomain domain = meta_data->domain;
641  const eCustomDataType type = meta_data->data_type;
642  const CustomData &custom_data = domain == ATTR_DOMAIN_POINT ? curves.geometry.point_data :
643  curves.geometry.curve_data;
644 
646  &attributes, name, type, CustomData_get_named_layer(&custom_data, type, name), domain);
647 
648  drw_attributes_merge(&final_cache.attr_used, &attributes, &cache.render_mutex);
649 }
650 
652  const char *name,
653  bool *r_is_point_domain)
654 {
656  const DRWContextState *draw_ctx = DRW_context_state_get();
657  const Scene *scene = draw_ctx->scene;
658  const int subdiv = scene->r.hair_subdiv;
659  CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
660 
661  request_attribute(*curves, name);
662 
663  int request_i = -1;
664  for (const int i : IndexRange(final_cache.attr_used.num_requests)) {
665  if (STREQ(final_cache.attr_used.requests[i].attribute_name, name)) {
666  request_i = i;
667  break;
668  }
669  }
670  if (request_i == -1) {
671  *r_is_point_domain = false;
672  return nullptr;
673  }
674  switch (final_cache.attr_used.requests[request_i].domain) {
675  case ATTR_DOMAIN_POINT:
676  *r_is_point_domain = true;
677  return &final_cache.attributes_tex[request_i];
678  case ATTR_DOMAIN_CURVE:
679  *r_is_point_domain = false;
680  return &cache.curves_cache.proc_attributes_tex[request_i];
681  default:
683  return nullptr;
684  }
685 }
686 
688 {
689  Curves *curves = static_cast<Curves *>(ob->data);
691 
694  }
695 
698  }
699 }
eAttrDomain
Definition: BKE_attribute.h:25
@ ATTR_DOMAIN_CURVE
Definition: BKE_attribute.h:31
@ ATTR_DOMAIN_POINT
Definition: BKE_attribute.h:27
@ BKE_CURVES_BATCH_DIRTY_ALL
Definition: BKE_curves.h:41
Low-level operations for curves.
int CustomData_get_named_layer(const struct CustomData *data, int type, const char *name)
#define BLI_assert_unreachable()
Definition: BLI_assert.h:93
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:336
MINLINE int max_ii(int a, int b)
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
unsigned int uint
Definition: BLI_sys_types.h:67
unsigned short ushort
Definition: BLI_sys_types.h:68
void BLI_mutex_end(ThreadMutex *mutex)
Definition: threads.cc:388
void BLI_mutex_init(ThreadMutex *mutex)
Definition: threads.cc:368
pthread_mutex_t ThreadMutex
Definition: BLI_threads.h:82
#define STREQ(a, b)
eCustomDataType
@ CD_HAIRLENGTH
Object is a sort of wrapper for general info.
#define DRW_TEXTURE_FREE_SAFE(tex)
Definition: DRW_render.h:183
GPUBatch
Definition: GPU_batch.h:78
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:216
GPUBatch * GPU_batch_create_ex(GPUPrimType prim, GPUVertBuf *vert, GPUIndexBuf *elem, eGPUBatchFlag owns_flag)
Definition: gpu_batch.cc:43
@ GPU_BATCH_OWNS_INDEX
Definition: GPU_batch.h:39
@ GPU_BATCH_OWNS_VBO
Definition: GPU_batch.h:30
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *)
GPUIndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *, uint v)
void GPU_indexbuf_init_ex(GPUIndexBufBuilder *, GPUPrimType, uint index_len, uint vertex_len)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
ListBase GPU_material_attributes(GPUMaterial *material)
Definition: gpu_material.c:216
GPUPrimType
Definition: GPU_primitive.h:18
@ GPU_PRIM_POINTS
Definition: GPU_primitive.h:19
@ GPU_PRIM_LINE_STRIP
Definition: GPU_primitive.h:22
@ GPU_PRIM_TRI_STRIP
Definition: GPU_primitive.h:24
#define GPU_MAX_ATTR
Definition: GPU_shader.h:388
struct GPUTexture GPUTexture
Definition: GPU_texture.h:17
GPUTexture * GPU_texture_create_from_vertbuf(const char *name, struct GPUVertBuf *vert)
Definition: gpu_texture.cc:361
#define GPU_vertbuf_create_with_format(format)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_use(GPUVertBuf *)
GPUVertBuf * GPU_vertbuf_create_with_format_ex(const GPUVertFormat *, GPUUsageType)
GPU_INLINE void * GPU_vertbuf_raw_step(GPUVertBufRaw *a)
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *, uint a_idx, GPUVertBufRaw *access)
@ GPU_USAGE_DEVICE_ONLY
void GPU_vertformat_safe_attr_name(const char *attr_name, char *r_safe_name, uint max_len)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
#define GPU_MAX_SAFE_ATTR_NAME
void GPU_vertformat_clear(GPUVertFormat *)
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias)
void GPU_vertformat_deinterleave(GPUVertFormat *format)
@ GPU_COMP_U16
@ GPU_COMP_F32
@ GPU_COMP_U32
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Bright Control the brightness and contrast of the input color Vector Map an input vectors to curves
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Sky Generate a procedural sky texture Noise Generate fractal Perlin noise Wave Generate procedural bands or rings with noise Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a or normal between and object coordinate space Combine Create a color from its and value channels Color Retrieve a color attribute
unsigned int U
Definition: btGjkEpa3.h:78
constexpr int64_t size() const
constexpr int64_t start() const
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition: BLI_span.hh:581
constexpr IndexRange index_range() const
Definition: BLI_span.hh:401
int domain_size(const eAttrDomain domain) const
std::optional< AttributeMetaData > lookup_meta_data(const AttributeIDRef &attribute_id) const
GVArray lookup_or_default(const AttributeIDRef &attribute_id, const eAttrDomain domain, const eCustomDataType data_type, const void *default_value=nullptr) const
static CurvesGeometry & wrap(::CurvesGeometry &dna_struct)
Definition: BKE_curves.hh:138
AttributeAccessor attributes() const
Scene scene
void drw_attributes_merge(DRW_Attributes *dst, const DRW_Attributes *src, ThreadMutex *render_mutex)
bool drw_custom_data_match_attribute(const CustomData *custom_data, const char *name, int *r_layer_index, eCustomDataType *r_type)
DRW_AttributeRequest * drw_attributes_add_request(DRW_Attributes *attrs, const char *name, const eCustomDataType type, const int layer_index, const eAttrDomain domain)
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b)
void drw_attributes_clear(DRW_Attributes *attributes)
Utilities for rendering attributes.
static void ensure_seg_pt_count(const Curves &curves, CurvesEvalCache &curves_cache)
int DRW_curves_material_count_get(Curves *curves)
bool curves_ensure_procedural_data(Curves *curves, CurvesEvalCache **r_hair_cache, GPUMaterial *gpu_material, const int subdiv, const int thickness_res)
static void curves_discard_attributes(CurvesEvalCache &curves_cache)
static void curves_batch_cache_fill_segments_indices(const Curves &curves, const int res, GPUIndexBufBuilder &elb)
static bool curves_ensure_attributes(const Curves &curves, CurvesBatchCache &cache, GPUMaterial *gpu_material, int subdiv)
static void curves_batch_cache_ensure_procedural_final_attr(CurvesEvalCache &cache, const GPUVertFormat *format, const int subdiv, const int index, const char *name)
void drw_curves_get_attribute_sampler_name(const char *layer_name, char r_sampler_name[32])
static void curves_batch_cache_ensure_procedural_final_points(CurvesEvalCache &cache, int subdiv)
void DRW_curves_batch_cache_validate(Curves *curves)
static void curves_batch_cache_fill_segments_proc_pos(const Curves &curves_id, MutableSpan< PositionAndParameter > posTime_data, MutableSpan< float > hairLength_data)
static void curves_batch_cache_ensure_procedural_pos(const Curves &curves, CurvesEvalCache &cache, GPUMaterial *gpu_material)
void DRW_curves_batch_cache_free_old(Curves *curves, int ctime)
static CurvesBatchCache & curves_batch_cache_get(Curves &curves)
static bool curves_batch_cache_valid(const Curves &curves)
GPUTexture ** DRW_curves_texture_for_evaluated_attribute(Curves *curves, const char *name, bool *r_is_point_domain)
static void curves_batch_cache_ensure_procedural_indices(Curves &curves, CurvesEvalCache &cache, const int thickness_res, const int subdiv)
static void curves_batch_cache_ensure_procedural_strand_data(Curves &curves, CurvesEvalCache &cache)
static void request_attribute(Curves &curves, const char *name)
void DRW_curves_batch_cache_free(Curves *curves)
GPUBatch * DRW_curves_batch_cache_get_edit_points(Curves *curves)
static void curves_batch_cache_init(Curves &curves)
static void curves_batch_cache_clear(Curves &curves)
void DRW_curves_batch_cache_dirty_tag(Curves *curves, int mode)
void DRW_curves_batch_cache_create_requested(Object *ob)
static void curves_batch_cache_clear_data(CurvesEvalCache &curves_cache)
static void curves_batch_ensure_attribute(const Curves &curves, CurvesEvalCache &cache, const DRW_AttributeRequest &request, const int subdiv, const int index)
static void curves_batch_cache_fill_strands_data(const Curves &curves_id, GPUVertBufRaw &data_step, GPUVertBufRaw &seg_step)
BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
BLI_INLINE bool DRW_vbo_requested(GPUVertBuf *vbo)
BLI_INLINE bool DRW_batch_requested(GPUBatch *batch, GPUPrimType prim_type)
BLI_INLINE GPUBatch * DRW_batch_request(GPUBatch **batch)
#define MAX_THICKRES
#define MAX_HAIR_SUBDIV
const DRWContextState * DRW_context_state_get(void)
format
Definition: logImageCore.h:38
T distance(const T &a, const T &b)
vec_base< float, 3 > float3
ColorSceneLinear4f< eAlpha::Premultiplied > ColorGeometry4f
Definition: BLI_color.hh:346
MutableSpan< float3 > positions
static const int steps
Definition: sky_nishita.cpp:19
CurvesEvalCache curves_cache
GPUTexture * point_tex
GPUVertBuf * proc_strand_seg_buf
CurvesEvalFinalCache final[MAX_HAIR_SUBDIV]
GPUVertBuf * proc_attributes_buf[GPU_MAX_ATTR]
GPUTexture * strand_tex
GPUTexture * proc_attributes_tex[GPU_MAX_ATTR]
GPUVertBuf * proc_strand_buf
GPUTexture * length_tex
GPUVertBuf * proc_point_buf
GPUVertBuf * proc_length_buf
GPUTexture * strand_seg_tex
DRW_Attributes attr_used_over_time
GPUBatch * proc_hairs[MAX_THICKRES]
GPUVertBuf * attributes_buf[GPU_MAX_ATTR]
GPUTexture * attributes_tex[GPU_MAX_ATTR]
CurvesGeometry geometry
struct Scene * scene
Definition: DRW_render.h:979
DRW_AttributeRequest requests[GPU_MAX_ATTR]
void * data
struct RenderData r