Blender  V3.3
eevee_film.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2021 Blender Foundation.
3  */
4 
15 #include "BLI_hash.h"
16 #include "BLI_rect.h"
17 
18 #include "GPU_framebuffer.h"
19 #include "GPU_texture.h"
20 
21 #include "DRW_render.h"
22 #include "RE_pipeline.h"
23 
24 #include "eevee_film.hh"
25 #include "eevee_instance.hh"
26 
27 namespace blender::eevee {
28 
30 
31 /* -------------------------------------------------------------------- */
35 void Film::init_aovs()
36 {
37  Vector<ViewLayerAOV *> aovs;
38 
39  aovs_info.display_id = -1;
40  aovs_info.display_is_value = false;
41  aovs_info.value_len = aovs_info.color_len = 0;
42 
43  if (inst_.is_viewport()) {
44  /* Viewport case. */
46  /* AOV display, request only a single AOV. */
48  &inst_.view_layer->aovs, inst_.v3d->shading.aov_name, offsetof(ViewLayerAOV, name));
49 
50  if (aov == nullptr) {
51  /* AOV not found in view layer. */
52  return;
53  }
54 
55  aovs.append(aov);
56  aovs_info.display_id = 0;
57  aovs_info.display_is_value = (aov->type == AOV_TYPE_VALUE);
58  }
59  else {
60  /* TODO(fclem): The realtime compositor could ask for several AOVs. */
61  }
62  }
63  else {
64  /* Render case. */
65  LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
66  aovs.append(aov);
67  }
68  }
69 
70  if (aovs.size() > AOV_MAX) {
71  inst_.info = "Error: Too many AOVs";
72  return;
73  }
74 
75  for (ViewLayerAOV *aov : aovs) {
76  bool is_value = (aov->type == AOV_TYPE_VALUE);
77  uint &index = is_value ? aovs_info.value_len : aovs_info.color_len;
78  uint &hash = is_value ? aovs_info.hash_value[index] : aovs_info.hash_color[index];
79  hash = BLI_hash_string(aov->name);
80  index++;
81  }
82 }
83 
85 {
86  bool is_value = (aov->type == AOV_TYPE_VALUE);
87  Texture &accum_tx = is_value ? value_accum_tx_ : color_accum_tx_;
88 
89  Span<uint> aovs_hash(is_value ? aovs_info.hash_value : aovs_info.hash_color,
90  is_value ? aovs_info.value_len : aovs_info.color_len);
91  /* Find AOV index. */
92  uint hash = BLI_hash_string(aov->name);
93  int aov_index = -1;
94  int i = 0;
95  for (uint candidate_hash : aovs_hash) {
96  if (candidate_hash == hash) {
97  aov_index = i;
98  break;
99  }
100  i++;
101  }
102 
103  accum_tx.ensure_layer_views();
104 
105  int index = aov_index + (is_value ? data_.aov_value_id : data_.aov_color_id);
106  GPUTexture *pass_tx = accum_tx.layer_view(index);
107 
108  return (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
109 }
110 
113 /* -------------------------------------------------------------------- */
117 void Film::sync_mist()
118 {
119  const CameraData &cam = inst_.camera.data_get();
120  const ::World *world = inst_.scene->world;
121  float mist_start = world ? world->miststa : cam.clip_near;
122  float mist_distance = world ? world->mistdist : fabsf(cam.clip_far - cam.clip_near);
123  int mist_type = world ? world->mistype : (int)WO_MIST_LINEAR;
124 
125  switch (mist_type) {
126  case WO_MIST_QUADRATIC:
127  data_.mist_exponent = 2.0f;
128  break;
129  case WO_MIST_LINEAR:
130  data_.mist_exponent = 1.0f;
131  break;
133  data_.mist_exponent = 0.5f;
134  break;
135  }
136 
137  data_.mist_scale = 1.0 / mist_distance;
138  data_.mist_bias = -mist_start / mist_distance;
139 }
140 
143 /* -------------------------------------------------------------------- */
147 inline bool operator==(const FilmData &a, const FilmData &b)
148 {
149  return (a.extent == b.extent) && (a.offset == b.offset) &&
150  (a.filter_radius == b.filter_radius) && (a.scaling_factor == b.scaling_factor) &&
151  (a.background_opacity == b.background_opacity);
152 }
153 
154 inline bool operator!=(const FilmData &a, const FilmData &b)
155 {
156  return !(a == b);
157 }
158 
161 /* -------------------------------------------------------------------- */
165 void Film::init(const int2 &extent, const rcti *output_rect)
166 {
167  Sampling &sampling = inst_.sampling;
168  Scene &scene = *inst_.scene;
169  SceneEEVEE &scene_eevee = scene.eevee;
170 
171  init_aovs();
172 
173  {
174  /* Enable passes that need to be rendered. */
176 
177  if (inst_.is_viewport()) {
178  /* Viewport Case. */
179  render_passes = eViewLayerEEVEEPassType(inst_.v3d->shading.render_pass);
180 
181  if (inst_.overlays_enabled() || inst_.gpencil_engine_enabled) {
182  /* Overlays and Grease Pencil needs the depth for correct compositing.
183  * Using the render pass ensure we store the center depth. */
184  render_passes |= EEVEE_RENDER_PASS_Z;
185  }
186  /* TEST */
187  render_passes |= EEVEE_RENDER_PASS_VECTOR;
188  }
189  else {
190  /* Render Case. */
191  render_passes = eViewLayerEEVEEPassType(inst_.view_layer->eevee.render_passes);
192 
193  render_passes |= EEVEE_RENDER_PASS_COMBINED;
194 
195 #define ENABLE_FROM_LEGACY(name_legacy, name_eevee) \
196  SET_FLAG_FROM_TEST(render_passes, \
197  (inst_.view_layer->passflag & SCE_PASS_##name_legacy) != 0, \
198  EEVEE_RENDER_PASS_##name_eevee);
199 
201  ENABLE_FROM_LEGACY(MIST, MIST)
203  ENABLE_FROM_LEGACY(SHADOW, SHADOW)
204  ENABLE_FROM_LEGACY(AO, AO)
205  ENABLE_FROM_LEGACY(EMIT, EMIT)
206  ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
207  ENABLE_FROM_LEGACY(DIFFUSE_COLOR, DIFFUSE_COLOR)
208  ENABLE_FROM_LEGACY(GLOSSY_COLOR, SPECULAR_COLOR)
209  ENABLE_FROM_LEGACY(DIFFUSE_DIRECT, DIFFUSE_LIGHT)
210  ENABLE_FROM_LEGACY(GLOSSY_DIRECT, SPECULAR_LIGHT)
211  ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
212 
213 #undef ENABLE_FROM_LEGACY
214  }
215 
216  /* Filter obsolete passes. */
218 
219  /* TODO(@fclem): Can't we rely on depsgraph update notification? */
220  if (assign_if_different(enabled_passes_, render_passes)) {
221  sampling.reset();
222  }
223  }
224  {
225  rcti fallback_rect;
226  if (BLI_rcti_is_empty(output_rect)) {
227  BLI_rcti_init(&fallback_rect, 0, extent[0], 0, extent[1]);
228  output_rect = &fallback_rect;
229  }
230 
231  FilmData data = data_;
232  data.extent = int2(BLI_rcti_size_x(output_rect), BLI_rcti_size_y(output_rect));
233  data.offset = int2(output_rect->xmin, output_rect->ymin);
234  data.extent_inv = 1.0f / float2(data.extent);
235  /* Disable filtering if sample count is 1. */
236  data.filter_radius = (sampling.sample_count() == 1) ? 0.0f :
237  clamp_f(scene.r.gauss, 0.0f, 100.0f);
238  /* TODO(fclem): parameter hidden in experimental.
239  * We need to figure out LOD bias first in order to preserve texture crispiness. */
240  data.scaling_factor = 1;
241 
242  data.background_opacity = (scene.r.alphamode == R_ALPHAPREMUL) ? 0.0f : 1.0f;
243  if (inst_.is_viewport() && false /* TODO(fclem): StudioLight */) {
244  data.background_opacity = inst_.v3d->shading.studiolight_background;
245  }
246 
247  FilmData &data_prev_ = data_;
248  if (assign_if_different(data_prev_, data)) {
249  sampling.reset();
250  }
251 
263 
264  data_.exposure_scale = pow2f(scene.view_settings.exposure);
265  data_.has_data = (enabled_passes_ & data_passes) != 0;
266  data_.any_render_pass_1 = (enabled_passes_ & color_passes_1) != 0;
267  data_.any_render_pass_2 = (enabled_passes_ & color_passes_2) != 0;
268  }
269  {
270  /* Set pass offsets. */
271 
272  data_.display_id = aovs_info.display_id;
273  data_.display_is_value = aovs_info.display_is_value;
274 
275  /* Combined is in a separate buffer. */
276  data_.combined_id = (enabled_passes_ & EEVEE_RENDER_PASS_COMBINED) ? 0 : -1;
277  /* Depth is in a separate buffer. */
278  data_.depth_id = (enabled_passes_ & EEVEE_RENDER_PASS_Z) ? 0 : -1;
279 
280  data_.color_len = 0;
281  data_.value_len = 0;
282 
283  auto pass_index_get = [&](eViewLayerEEVEEPassType pass_type) {
284  bool is_value = pass_is_value(pass_type);
285  int index = (enabled_passes_ & pass_type) ?
286  (is_value ? data_.value_len : data_.color_len)++ :
287  -1;
288  if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) {
289  data_.display_id = index;
290  data_.display_is_value = is_value;
291  }
292  return index;
293  };
294 
295  data_.mist_id = pass_index_get(EEVEE_RENDER_PASS_MIST);
296  data_.normal_id = pass_index_get(EEVEE_RENDER_PASS_NORMAL);
297  data_.vector_id = pass_index_get(EEVEE_RENDER_PASS_VECTOR);
298  data_.diffuse_light_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_LIGHT);
299  data_.diffuse_color_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_COLOR);
300  data_.specular_light_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_LIGHT);
301  data_.specular_color_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_COLOR);
302  data_.volume_light_id = pass_index_get(EEVEE_RENDER_PASS_VOLUME_LIGHT);
303  data_.emission_id = pass_index_get(EEVEE_RENDER_PASS_EMIT);
304  data_.environment_id = pass_index_get(EEVEE_RENDER_PASS_ENVIRONMENT);
305  data_.shadow_id = pass_index_get(EEVEE_RENDER_PASS_SHADOW);
306  data_.ambient_occlusion_id = pass_index_get(EEVEE_RENDER_PASS_AO);
307 
308  data_.aov_color_id = data_.color_len;
309  data_.aov_value_id = data_.value_len;
310 
311  data_.aov_color_len = aovs_info.color_len;
312  data_.aov_value_len = aovs_info.value_len;
313 
314  data_.color_len += data_.aov_color_len;
315  data_.value_len += data_.aov_value_len;
316  }
317  {
318  /* TODO(@fclem): Over-scans. */
319 
320  data_.render_extent = math::divide_ceil(extent, int2(data_.scaling_factor));
321  int2 weight_extent = inst_.camera.is_panoramic() ? data_.extent : int2(data_.scaling_factor);
322 
323  eGPUTextureFormat color_format = GPU_RGBA16F;
324  eGPUTextureFormat float_format = GPU_R16F;
325  eGPUTextureFormat weight_format = GPU_R32F;
326  eGPUTextureFormat depth_format = GPU_R32F;
327 
328  int reset = 0;
329  reset += depth_tx_.ensure_2d(depth_format, data_.extent);
330  reset += combined_tx_.current().ensure_2d(color_format, data_.extent);
331  reset += combined_tx_.next().ensure_2d(color_format, data_.extent);
332  /* Two layers, one for nearest sample weight and one for weight accumulation. */
333  reset += weight_tx_.current().ensure_2d_array(weight_format, weight_extent, 2);
334  reset += weight_tx_.next().ensure_2d_array(weight_format, weight_extent, 2);
335  reset += color_accum_tx_.ensure_2d_array(color_format,
336  (data_.color_len > 0) ? data_.extent : int2(1),
337  (data_.color_len > 0) ? data_.color_len : 1);
338  reset += value_accum_tx_.ensure_2d_array(float_format,
339  (data_.value_len > 0) ? data_.extent : int2(1),
340  (data_.value_len > 0) ? data_.value_len : 1);
341 
342  if (reset > 0) {
343  sampling.reset();
344  data_.use_history = 0;
345  data_.use_reprojection = 0;
346 
347  /* Avoid NaN in uninitialized texture memory making history blending dangerous. */
348  color_accum_tx_.clear(float4(0.0f));
349  value_accum_tx_.clear(float4(0.0f));
350  combined_tx_.current().clear(float4(0.0f));
351  weight_tx_.current().clear(float4(0.0f));
352  depth_tx_.clear(float4(0.0f));
353  }
354  }
355 
356  force_disable_reprojection_ = (scene_eevee.flag & SCE_EEVEE_TAA_REPROJECTION) == 0;
357 }
358 
360 {
361  /* We use a fragment shader for viewport because we need to output the depth. */
362  bool use_compute = (inst_.is_viewport() == false);
363 
364  eShaderType shader = use_compute ? FILM_COMP : FILM_FRAG;
365 
366  /* TODO(fclem): Shader variation for panoramic & scaled resolution. */
367 
368  RenderBuffers &rbuffers = inst_.render_buffers;
369  VelocityModule &velocity = inst_.velocity;
370 
372 
373  /* For viewport, only previous motion is supported.
374  * Still bind previous step to avoid undefined behavior. */
375  eVelocityStep step_next = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
376 
378  accumulate_ps_ = DRW_pass_create("Film.Accumulate", state);
379  GPUShader *sh = inst_.shaders.static_shader_get(shader);
380  DRWShadingGroup *grp = DRW_shgroup_create(sh, accumulate_ps_);
381  DRW_shgroup_uniform_block_ref(grp, "film_buf", &data_);
382  DRW_shgroup_uniform_block_ref(grp, "camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
383  DRW_shgroup_uniform_block_ref(grp, "camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
384  DRW_shgroup_uniform_block_ref(grp, "camera_next", &(*velocity.camera_steps[step_next]));
385  DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &rbuffers.depth_tx);
386  DRW_shgroup_uniform_texture_ref(grp, "combined_tx", &rbuffers.combined_tx);
387  DRW_shgroup_uniform_texture_ref(grp, "normal_tx", &rbuffers.normal_tx);
388  DRW_shgroup_uniform_texture_ref(grp, "vector_tx", &rbuffers.vector_tx);
389  DRW_shgroup_uniform_texture_ref(grp, "diffuse_light_tx", &rbuffers.diffuse_light_tx);
390  DRW_shgroup_uniform_texture_ref(grp, "diffuse_color_tx", &rbuffers.diffuse_color_tx);
391  DRW_shgroup_uniform_texture_ref(grp, "specular_light_tx", &rbuffers.specular_light_tx);
392  DRW_shgroup_uniform_texture_ref(grp, "specular_color_tx", &rbuffers.specular_color_tx);
393  DRW_shgroup_uniform_texture_ref(grp, "volume_light_tx", &rbuffers.volume_light_tx);
394  DRW_shgroup_uniform_texture_ref(grp, "emission_tx", &rbuffers.emission_tx);
395  DRW_shgroup_uniform_texture_ref(grp, "environment_tx", &rbuffers.environment_tx);
396  DRW_shgroup_uniform_texture_ref(grp, "shadow_tx", &rbuffers.shadow_tx);
397  DRW_shgroup_uniform_texture_ref(grp, "ambient_occlusion_tx", &rbuffers.ambient_occlusion_tx);
398  DRW_shgroup_uniform_texture_ref(grp, "aov_color_tx", &rbuffers.aov_color_tx);
399  DRW_shgroup_uniform_texture_ref(grp, "aov_value_tx", &rbuffers.aov_value_tx);
400  /* NOTE(@fclem): 16 is the max number of sampled texture in many implementations.
401  * If we need more, we need to pack more of the similar passes in the same textures as arrays or
402  * use image binding instead. */
403  DRW_shgroup_uniform_image_ref(grp, "in_weight_img", &weight_src_tx_);
404  DRW_shgroup_uniform_image_ref(grp, "out_weight_img", &weight_dst_tx_);
405  DRW_shgroup_uniform_texture_ref_ex(grp, "in_combined_tx", &combined_src_tx_, filter);
406  DRW_shgroup_uniform_image_ref(grp, "out_combined_img", &combined_dst_tx_);
407  DRW_shgroup_uniform_image_ref(grp, "depth_img", &depth_tx_);
408  DRW_shgroup_uniform_image_ref(grp, "color_accum_img", &color_accum_tx_);
409  DRW_shgroup_uniform_image_ref(grp, "value_accum_img", &value_accum_tx_);
410  /* Sync with rendering passes. */
412  /* Sync with rendering passes. */
414  if (use_compute) {
415  int2 dispatch_size = math::divide_ceil(data_.extent, int2(FILM_GROUP_SIZE));
416  DRW_shgroup_call_compute(grp, UNPACK2(dispatch_size), 1);
417  }
418  else {
419  DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
420  }
421 }
422 
424 {
425  data_.use_reprojection = inst_.sampling.interactive_mode();
426 
427  /* Just bypass the reprojection and reset the accumulation. */
428  if (force_disable_reprojection_ && inst_.sampling.is_reset()) {
429  data_.use_reprojection = false;
430  data_.use_history = false;
431  }
432 
433  aovs_info.push_update();
434 
435  sync_mist();
436 }
437 
439 {
440  float2 jitter = inst_.sampling.rng_2d_get(SAMPLING_FILTER_U);
441 
442  if (!use_box_filter && data_.filter_radius < M_SQRT1_2 && !inst_.camera.is_panoramic()) {
443  /* For filter size less than a pixel, change sampling strategy and use a uniform disk
444  * distribution covering the filter shape. This avoids putting samples in areas without any
445  * weights. */
446  /* TODO(fclem): Importance sampling could be a better option here. */
447  jitter = Sampling::sample_disk(jitter) * data_.filter_radius;
448  }
449  else {
450  /* Jitter the size of a whole pixel. [-0.5..0.5] */
451  jitter -= 0.5f;
452  }
453  /* TODO(fclem): Mixed-resolution rendering: We need to offset to each of the target pixel covered
454  * by a render pixel, ideally, by choosing one randomly using another sampling dimension, or by
455  * repeating the same sample RNG sequence for each pixel offset. */
456  return jitter;
457 }
458 
460 {
461  return enabled_passes_;
462 }
463 
464 void Film::update_sample_table()
465 {
466  data_.subpixel_offset = pixel_jitter_get();
467 
468  int filter_radius_ceil = ceilf(data_.filter_radius);
469  float filter_radius_sqr = square_f(data_.filter_radius);
470 
471  data_.samples_len = 0;
472  if (use_box_filter || data_.filter_radius < 0.01f) {
473  /* Disable gather filtering. */
474  data_.samples[0].texel = int2(0, 0);
475  data_.samples[0].weight = 1.0f;
476  data_.samples_weight_total = 1.0f;
477  data_.samples_len = 1;
478  }
479  /* NOTE: Threshold determined by hand until we don't hit the assert bellow. */
480  else if (data_.filter_radius < 2.20f) {
481  /* Small filter Size. */
482  int closest_index = 0;
483  float closest_distance = FLT_MAX;
484  data_.samples_weight_total = 0.0f;
485  /* TODO(fclem): For optimization, could try Z-tile ordering. */
486  for (int y = -filter_radius_ceil; y <= filter_radius_ceil; y++) {
487  for (int x = -filter_radius_ceil; x <= filter_radius_ceil; x++) {
488  float2 pixel_offset = float2(x, y) - data_.subpixel_offset;
489  float distance_sqr = math::length_squared(pixel_offset);
490  if (distance_sqr < filter_radius_sqr) {
491  if (data_.samples_len >= FILM_PRECOMP_SAMPLE_MAX) {
492  BLI_assert_msg(0, "Precomputed sample table is too small.");
493  break;
494  }
495  FilmSample &sample = data_.samples[data_.samples_len];
496  sample.texel = int2(x, y);
497  sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
498  data_.samples_weight_total += sample.weight;
499 
500  if (distance_sqr < closest_distance) {
501  closest_distance = distance_sqr;
502  closest_index = data_.samples_len;
503  }
504  data_.samples_len++;
505  }
506  }
507  }
508  /* Put the closest one in first position. */
509  if (closest_index != 0) {
510  SWAP(FilmSample, data_.samples[closest_index], data_.samples[0]);
511  }
512  }
513  else {
514  /* Large Filter Size. */
515  MutableSpan<FilmSample> sample_table(data_.samples, FILM_PRECOMP_SAMPLE_MAX);
516  /* To avoid hitting driver TDR and slowing rendering too much we use random sampling. */
517  /* TODO(fclem): This case needs more work. We could distribute the samples better to avoid
518  * loading the same pixel twice. */
519  data_.samples_len = sample_table.size();
520  data_.samples_weight_total = 0.0f;
521 
522  int i = 0;
523  for (FilmSample &sample : sample_table) {
524  /* TODO(fclem): Own RNG. */
525  float2 random_2d = inst_.sampling.rng_2d_get(SAMPLING_SSS_U);
526  /* This randomization makes sure we converge to the right result but also makes nearest
527  * neighbor filtering not converging rapidly. */
528  random_2d.x = (random_2d.x + i) / float(FILM_PRECOMP_SAMPLE_MAX);
529 
530  float2 pixel_offset = math::floor(Sampling::sample_spiral(random_2d) * data_.filter_radius);
531  sample.texel = int2(pixel_offset);
532 
533  float distance_sqr = math::length_squared(pixel_offset - data_.subpixel_offset);
534  sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
535  data_.samples_weight_total += sample.weight;
536  i++;
537  }
538  }
539 }
540 
542 {
543  if (inst_.is_viewport()) {
547  /* Clear when using render borders. */
548  if (data_.extent != int2(GPU_texture_width(dtxl->color), GPU_texture_height(dtxl->color))) {
549  float4 clear_color = {0.0f, 0.0f, 0.0f, 0.0f};
550  GPU_framebuffer_clear_color(dfbl->default_fb, clear_color);
551  }
552  GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
553  }
554 
555  update_sample_table();
556 
557  /* Need to update the static references as there could have change from a previous swap. */
558  weight_src_tx_ = weight_tx_.current();
559  weight_dst_tx_ = weight_tx_.next();
560  combined_src_tx_ = combined_tx_.current();
561  combined_dst_tx_ = combined_tx_.next();
562 
563  data_.display_only = false;
564  data_.push_update();
565 
567  DRW_draw_pass(accumulate_ps_);
568 
569  combined_tx_.swap();
570  weight_tx_.swap();
571 
572  /* Use history after first sample. */
573  if (data_.use_history == 0) {
574  data_.use_history = 1;
575  }
576 }
577 
579 {
580  BLI_assert(inst_.is_viewport());
581 
582  /* Acquire dummy render buffers for correct binding. They will not be used. */
583  inst_.render_buffers.acquire(int2(1), (void *)this);
584 
587  GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
588 
589  /* Need to update the static references as there could have change from a previous swap. */
590  weight_src_tx_ = weight_tx_.current();
591  weight_dst_tx_ = weight_tx_.next();
592  combined_src_tx_ = combined_tx_.current();
593  combined_dst_tx_ = combined_tx_.next();
594 
595  data_.display_only = true;
596  data_.push_update();
597 
598  DRW_view_set_active(nullptr);
599  DRW_draw_pass(accumulate_ps_);
600 
601  inst_.render_buffers.release();
602 
603  /* IMPORTANT: Do not swap! No accumulation has happened. */
604 }
605 
607 {
608 
609  bool is_value = pass_is_value(pass_type);
610  Texture &accum_tx = (pass_type == EEVEE_RENDER_PASS_COMBINED) ?
611  combined_tx_.current() :
612  (pass_type == EEVEE_RENDER_PASS_Z) ?
613  depth_tx_ :
614  (is_value ? value_accum_tx_ : color_accum_tx_);
615 
616  accum_tx.ensure_layer_views();
617 
618  int index = pass_id_get(pass_type);
619  GPUTexture *pass_tx = accum_tx.layer_view(index);
620 
622 
623  float *result = (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
624 
625  if (pass_is_float3(pass_type)) {
626  /* Convert result in place as we cannot do this conversion on GPU. */
627  for (auto px : IndexRange(accum_tx.width() * accum_tx.height())) {
628  *(reinterpret_cast<float3 *>(result) + px) = *(reinterpret_cast<float3 *>(result + px * 4));
629  }
630  }
631 
632  return result;
633 }
634 
637 } // namespace blender::eevee
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition: BLI_assert.h:53
BLI_INLINE unsigned int BLI_hash_string(const char *str)
Definition: BLI_hash.h:69
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:336
void * BLI_findstring(const struct ListBase *listbase, const char *id, int offset) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE float pow2f(float x)
MINLINE float clamp_f(float value, float min, float max)
MINLINE float square_f(float a)
#define M_SQRT1_2
Definition: BLI_math_base.h:32
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition: BLI_rect.h:190
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition: rct.c:417
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition: BLI_rect.h:186
bool BLI_rcti_is_empty(const struct rcti *rect)
unsigned int uint
Definition: BLI_sys_types.h:67
#define UNPACK2(a)
#define SWAP(type, a, b)
#define ENUM_OPERATORS(_type, _max)
#define EEVEE_RENDER_PASS_MAX_BIT
eViewLayerEEVEEPassType
@ EEVEE_RENDER_PASS_UNUSED_8
@ EEVEE_RENDER_PASS_AO
@ EEVEE_RENDER_PASS_NORMAL
@ EEVEE_RENDER_PASS_DIFFUSE_LIGHT
@ EEVEE_RENDER_PASS_VOLUME_LIGHT
@ EEVEE_RENDER_PASS_AOV
@ EEVEE_RENDER_PASS_BLOOM
@ EEVEE_RENDER_PASS_DIFFUSE_COLOR
@ EEVEE_RENDER_PASS_Z
@ EEVEE_RENDER_PASS_ENVIRONMENT
@ EEVEE_RENDER_PASS_COMBINED
@ EEVEE_RENDER_PASS_SPECULAR_LIGHT
@ EEVEE_RENDER_PASS_VECTOR
@ EEVEE_RENDER_PASS_SPECULAR_COLOR
@ EEVEE_RENDER_PASS_EMIT
@ EEVEE_RENDER_PASS_MIST
@ EEVEE_RENDER_PASS_SHADOW
@ AOV_TYPE_VALUE
@ R_ALPHAPREMUL
@ SCE_EEVEE_TAA_REPROJECTION
struct World World
@ WO_MIST_QUADRATIC
@ WO_MIST_INVERSE_QUADRATIC
@ WO_MIST_LINEAR
DRWState
Definition: DRW_render.h:298
@ DRW_STATE_WRITE_DEPTH
Definition: DRW_render.h:302
@ DRW_STATE_WRITE_COLOR
Definition: DRW_render.h:303
@ DRW_STATE_DEPTH_ALWAYS
Definition: DRW_render.h:309
#define DRW_shgroup_uniform_block_ref(shgroup, name, ubo)
Definition: DRW_render.h:653
static AppView * view
void GPU_framebuffer_bind(GPUFrameBuffer *fb)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
struct GPUShader GPUShader
Definition: GPU_shader.h:20
void GPU_memory_barrier(eGPUBarrier barrier)
Definition: gpu_state.cc:371
@ GPU_BARRIER_TEXTURE_FETCH
Definition: GPU_state.h:30
@ GPU_BARRIER_SHADER_IMAGE_ACCESS
Definition: GPU_state.h:28
@ GPU_BARRIER_TEXTURE_UPDATE
Definition: GPU_state.h:31
eGPUSamplerState
Definition: GPU_texture.h:25
@ GPU_SAMPLER_FILTER
Definition: GPU_texture.h:27
int GPU_texture_height(const GPUTexture *tex)
Definition: gpu_texture.cc:607
struct GPUTexture GPUTexture
Definition: GPU_texture.h:17
int GPU_texture_width(const GPUTexture *tex)
Definition: gpu_texture.cc:602
void * GPU_texture_read(GPUTexture *tex, eGPUDataFormat data_format, int miplvl)
Definition: gpu_texture.cc:432
@ GPU_DATA_FLOAT
Definition: GPU_texture.h:171
eGPUTextureFormat
Definition: GPU_texture.h:83
@ GPU_R16F
Definition: GPU_texture.h:113
#define Z
Definition: GeomUtils.cpp:201
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Texture
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value NORMAL
const CameraData & data_get() const
Definition: eevee_camera.hh:98
bool is_panoramic() const
float2 pixel_jitter_get() const
Definition: eevee_film.cc:438
void accumulate(const DRWView *view)
Definition: eevee_film.cc:541
static bool pass_is_value(eViewLayerEEVEEPassType pass_type)
Definition: eevee_film.hh:99
int pass_id_get(eViewLayerEEVEEPassType pass_type) const
Definition: eevee_film.hh:130
static bool pass_is_float3(eViewLayerEEVEEPassType pass_type)
Definition: eevee_film.hh:112
float * read_pass(eViewLayerEEVEEPassType pass_type)
Definition: eevee_film.cc:606
AOVsInfoDataBuf aovs_info
Definition: eevee_film.hh:36
void init(const int2 &full_extent, const rcti *output_rect)
Definition: eevee_film.cc:165
float * read_aov(ViewLayerAOV *aov)
Definition: eevee_film.cc:84
eViewLayerEEVEEPassType enabled_passes_get() const
Definition: eevee_film.cc:459
static constexpr bool use_box_filter
Definition: eevee_film.hh:38
RenderBuffers render_buffers
void acquire(int2 extent, void *owner)
static float2 sample_disk(const float2 &rand)
float2 rng_2d_get(eSamplingDimension starting_dimension) const
uint64_t sample_count() const
static float2 sample_spiral(const float2 &rand)
GPUShader * static_shader_get(eShaderType shader_type)
Definition: eevee_shader.cc:92
std::array< CameraDataBuf *, 3 > camera_steps
Scene scene
World world
DefaultFramebufferList * DRW_viewport_framebuffer_list_get(void)
Definition: draw_manager.c:633
DefaultTextureList * DRW_viewport_texture_list_get(void)
Definition: draw_manager.c:638
void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex, eGPUSamplerState sampler_state)
void DRW_shgroup_call_compute(DRWShadingGroup *shgroup, int groups_x_len, int groups_y_len, int groups_z_len)
void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, Object *ob, uint tri_count)
void DRW_shgroup_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
DRWShadingGroup * DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
DRWPass * DRW_pass_create(const char *name, DRWState state)
void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
void DRW_draw_pass(DRWPass *pass)
void DRW_view_set_active(const DRWView *view)
#define FILM_GROUP_SIZE
#define ENABLE_FROM_LEGACY(name_legacy, name_eevee)
depth_tx normal_tx diffuse_light_tx specular_light_tx volume_light_tx environment_tx ambient_occlusion_tx aov_value_tx in_weight_img GPU_RGBA16F
depth_tx normal_tx diffuse_light_tx specular_light_tx volume_light_tx environment_tx ambient_occlusion_tx aov_value_tx GPU_R32F
#define AOV_MAX
#define FILM_PRECOMP_SAMPLE_MAX
smooth(Type::VEC3, "prev") .smooth(Type CameraData
void GPU_framebuffer_viewport_set(GPUFrameBuffer *gpu_fb, int x, int y, int width, int height)
DO_INLINE void filter(lfVector *V, fmatrix3x3 *S)
const int state
ccl_gpu_kernel_postfix ccl_global float int int int int float bool reset
clear internal cached data and reset random seed
ccl_gpu_kernel_postfix ccl_global float int int int int sh
#define ceilf(x)
Definition: metal/compat.h:225
#define fabsf(x)
Definition: metal/compat.h:219
static unsigned a[3]
Definition: RandGen.cpp:78
bool operator!=(const CameraData &a, const CameraData &b)
Definition: eevee_camera.hh:67
bool operator==(const CameraData &a, const CameraData &b)
Definition: eevee_camera.hh:59
vec_base< T, Size > divide_ceil(const vec_base< T, Size > &a, const vec_base< T, Size > &b)
T floor(const T &a)
T length_squared(const vec_base< T, Size > &a)
vec_base< float, 4 > float4
bool assign_if_different(T &old_value, T new_value)
vec_base< float, 2 > float2
vec_base< int32_t, 2 > int2
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
#define hash
Definition: noise.c:153
struct GPUFrameBuffer * default_fb
struct GPUTexture * color
ColorManagedViewSettings view_settings
struct RenderData r
struct World * world
struct SceneEEVEE eevee
float studiolight_background
View3DShading shading
struct ViewLayerEEVEE eevee
ListBase aovs
float miststa
short mistype
float mistdist
float x
Definition: types_float2.h:15
int ymin
Definition: DNA_vec_types.h:64
int xmin
Definition: DNA_vec_types.h:63