Blender  V3.3
shade_surface.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: Apache-2.0
2  * Copyright 2011-2022 Blender Foundation */
3 
4 #pragma once
5 
7 #include "kernel/film/passes.h"
8 
10 
15 
16 #include "kernel/light/light.h"
17 #include "kernel/light/sample.h"
18 
20 
24 {
27 
30 
31  shader_setup_from_ray(kg, sd, &ray, &isect);
32 }
33 
35  const ccl_private ShaderData *sd,
36  const float3 ray_P,
37  const float3 ray_D)
38 {
39  /* No ray offset needed for other primitive types. */
40  if (!(sd->type & PRIMITIVE_TRIANGLE)) {
41  return ray_P;
42  }
43 
44  /* Self intersection tests already account for the case where a ray hits the
45  * same primitive. However precision issues can still cause neighboring
46  * triangles to be hit. Here we test if the ray-triangle intersection with
47  * the same primitive would miss, implying that a neighboring triangle would
48  * be hit instead.
49  *
50  * This relies on triangle intersection to be watertight, and the object inverse
51  * object transform to match the one used by ray intersection exactly.
52  *
53  * Potential improvements:
54  * - It appears this happens when either barycentric coordinates are small,
55  * or dot(sd->Ng, ray_D) is small. Detect such cases and skip test?
56  * - Instead of ray offset, can we tweak P to lie within the triangle?
57  */
58  const uint tri_vindex = kernel_data_fetch(tri_vindex, sd->prim).w;
59  const packed_float3 tri_a = kernel_data_fetch(tri_verts, tri_vindex + 0),
60  tri_b = kernel_data_fetch(tri_verts, tri_vindex + 1),
61  tri_c = kernel_data_fetch(tri_verts, tri_vindex + 2);
62 
63  float3 local_ray_P = ray_P;
64  float3 local_ray_D = ray_D;
65 
66  if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
67  const Transform itfm = object_get_inverse_transform(kg, sd);
68  local_ray_P = transform_point(&itfm, local_ray_P);
69  local_ray_D = transform_direction(&itfm, local_ray_D);
70  }
71 
72  if (ray_triangle_intersect_self(local_ray_P, local_ray_D, tri_a, tri_b, tri_c)) {
73  return ray_P;
74  }
75  else {
76  return ray_offset(ray_P, sd->Ng);
77  }
78 }
79 
80 #ifdef __HOLDOUT__
81 ccl_device_forceinline bool integrate_surface_holdout(KernelGlobals kg,
85 {
86  /* Write holdout transparency to render buffer and stop if fully holdout. */
87  const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
88 
89  if (((sd->flag & SD_HOLDOUT) || (sd->object_flag & SD_OBJECT_HOLDOUT_MASK)) &&
90  (path_flag & PATH_RAY_TRANSPARENT_BACKGROUND)) {
91  const float3 holdout_weight = shader_holdout_apply(kg, sd);
92  const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
93  const float transparent = average(holdout_weight * throughput);
94  kernel_accum_holdout(kg, state, path_flag, transparent, render_buffer);
95  if (isequal(holdout_weight, one_float3())) {
96  return false;
97  }
98  }
99 
100  return true;
101 }
102 #endif /* __HOLDOUT__ */
103 
104 #ifdef __EMISSION__
105 ccl_device_forceinline void integrate_surface_emission(KernelGlobals kg,
107  ccl_private const ShaderData *sd,
108  ccl_global float *ccl_restrict
110 {
111  const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
112 
113  /* Evaluate emissive closure. */
115 
116 # ifdef __HAIR__
117  if (!(path_flag & PATH_RAY_MIS_SKIP) && (sd->flag & SD_USE_MIS) &&
118  (sd->type & PRIMITIVE_TRIANGLE))
119 # else
120  if (!(path_flag & PATH_RAY_MIS_SKIP) && (sd->flag & SD_USE_MIS))
121 # endif
122  {
123  const float bsdf_pdf = INTEGRATOR_STATE(state, path, mis_ray_pdf);
124  const float t = sd->ray_length;
125 
126  /* Multiple importance sampling, get triangle light pdf,
127  * and compute weight with respect to BSDF pdf. */
128  float pdf = triangle_light_pdf(kg, sd, t);
129  float mis_weight = light_sample_mis_weight_forward(kg, bsdf_pdf, pdf);
130  L *= mis_weight;
131  }
132 
133  const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
135  kg, state, throughput * L, render_buffer, object_lightgroup(kg, sd->object));
136 }
137 #endif /* __EMISSION__ */
138 
139 #ifdef __EMISSION__
140 /* Path tracing: sample point on light and evaluate light shader, then
141  * queue shadow ray to be traced. */
142 template<uint node_feature_mask>
143 ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
146  ccl_private const RNGState *rng_state)
147 {
148  /* Test if there is a light or BSDF that needs direct light. */
149  if (!(kernel_data.integrator.use_direct_light && (sd->flag & SD_BSDF_HAS_EVAL))) {
150  return;
151  }
152 
153  /* Sample position on a light. */
155  {
156  const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
157  const uint bounce = INTEGRATOR_STATE(state, path, bounce);
158  float light_u, light_v;
159  path_state_rng_2D(kg, rng_state, PRNG_LIGHT_U, &light_u, &light_v);
160 
162  kg, light_u, light_v, sd->time, sd->P, bounce, path_flag, &ls)) {
163  return;
164  }
165  }
166 
167  kernel_assert(ls.pdf != 0.0f);
168 
169  /* Evaluate light shader.
170  *
171  * TODO: can we reuse sd memory? In theory we can move this after
172  * integrate_surface_bounce, evaluate the BSDF, and only then evaluate
173  * the light shader. This could also move to its own kernel, for
174  * non-constant light sources. */
175  ShaderDataCausticsStorage emission_sd_storage;
176  ccl_private ShaderData *emission_sd = AS_SHADER_DATA(&emission_sd_storage);
177 
180  const bool is_transmission = shader_bsdf_is_transmission(sd, ls.D);
181 
182 # ifdef __MNEE__
183  int mnee_vertex_count = 0;
184  IF_KERNEL_FEATURE(MNEE)
185  {
186  if (ls.lamp != LAMP_NONE) {
187  /* Is this a caustic light? */
188  const bool use_caustics = kernel_data_fetch(lights, ls.lamp).use_caustics;
189  if (use_caustics) {
190  /* Are we on a caustic caster? */
191  if (is_transmission && (sd->object_flag & SD_OBJECT_CAUSTICS_CASTER))
192  return;
193 
194  /* Are we on a caustic receiver? */
195  if (!is_transmission && (sd->object_flag & SD_OBJECT_CAUSTICS_RECEIVER))
196  mnee_vertex_count = kernel_path_mnee_sample(
197  kg, state, sd, emission_sd, rng_state, &ls, &bsdf_eval);
198  }
199  }
200  }
201  if (mnee_vertex_count > 0) {
202  /* Create shadow ray after successful manifold walk:
203  * emission_sd contains the last interface intersection and
204  * the light sample ls has been updated */
205  light_sample_to_surface_shadow_ray(kg, emission_sd, &ls, &ray);
206  }
207  else
208 # endif /* __MNEE__ */
209  {
210  const float3 light_eval = light_sample_shader_eval(kg, state, emission_sd, &ls, sd->time);
211  if (is_zero(light_eval)) {
212  return;
213  }
214 
215  /* Evaluate BSDF. */
216  const float bsdf_pdf = shader_bsdf_eval(kg, sd, ls.D, is_transmission, &bsdf_eval, ls.shader);
217  bsdf_eval_mul(&bsdf_eval, light_eval / ls.pdf);
218 
219  if (ls.shader & SHADER_USE_MIS) {
220  const float mis_weight = light_sample_mis_weight_nee(kg, ls.pdf, bsdf_pdf);
221  bsdf_eval_mul(&bsdf_eval, mis_weight);
222  }
223 
224  /* Path termination. */
225  const float terminate = path_state_rng_light_termination(kg, rng_state);
226  if (light_sample_terminate(kg, &ls, &bsdf_eval, terminate)) {
227  return;
228  }
229 
230  /* Create shadow ray. */
231  light_sample_to_surface_shadow_ray(kg, sd, &ls, &ray);
232  }
233 
234  const bool is_light = light_sample_is_light(&ls);
235 
236  /* Branch off shadow kernel. */
239 
240  /* Copy volume stack and enter/exit volume. */
242 
243  if (is_transmission) {
244 # ifdef __VOLUME__
245  shadow_volume_stack_enter_exit(kg, shadow_state, sd);
246 # endif
247  }
248 
249  if (ray.self.object != OBJECT_NONE) {
250  ray.P = integrate_surface_ray_offset(kg, sd, ray.P, ray.D);
251  }
252 
253  /* Write shadow ray and associated state to global memory. */
254  integrator_state_write_shadow_ray(kg, shadow_state, &ray);
255  // Save memory by storing the light and object indices in the shadow_isect
256  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
257  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
258  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;
259  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, prim) = ray.self.light_prim;
260 
261  /* Copy state from main path to shadow path. */
262  uint32_t shadow_flag = INTEGRATOR_STATE(state, path, flag);
263  shadow_flag |= (is_light) ? PATH_RAY_SHADOW_FOR_LIGHT : 0;
264  const float3 throughput = INTEGRATOR_STATE(state, path, throughput) * bsdf_eval_sum(&bsdf_eval);
265 
266  if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
267  packed_float3 pass_diffuse_weight;
268  packed_float3 pass_glossy_weight;
269 
270  if (shadow_flag & PATH_RAY_ANY_PASS) {
271  /* Indirect bounce, use weights from earlier surface or volume bounce. */
272  pass_diffuse_weight = INTEGRATOR_STATE(state, path, pass_diffuse_weight);
273  pass_glossy_weight = INTEGRATOR_STATE(state, path, pass_glossy_weight);
274  }
275  else {
276  /* Direct light, use BSDFs at this bounce. */
277  shadow_flag |= PATH_RAY_SURFACE_PASS;
278  pass_diffuse_weight = packed_float3(bsdf_eval_pass_diffuse_weight(&bsdf_eval));
279  pass_glossy_weight = packed_float3(bsdf_eval_pass_glossy_weight(&bsdf_eval));
280  }
281 
282  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, pass_diffuse_weight) = pass_diffuse_weight;
283  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, pass_glossy_weight) = pass_glossy_weight;
284  }
285 
286  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, render_pixel_index) = INTEGRATOR_STATE(
287  state, path, render_pixel_index);
288  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, rng_offset) = INTEGRATOR_STATE(
289  state, path, rng_offset);
290  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, rng_hash) = INTEGRATOR_STATE(
291  state, path, rng_hash);
292  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, sample) = INTEGRATOR_STATE(
293  state, path, sample);
294  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, flag) = shadow_flag;
295 
296  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, transparent_bounce) = INTEGRATOR_STATE(
297  state, path, transparent_bounce);
298  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, glossy_bounce) = INTEGRATOR_STATE(
299  state, path, glossy_bounce);
300 
301 # ifdef __MNEE__
302  if (mnee_vertex_count > 0) {
303  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, transmission_bounce) =
304  INTEGRATOR_STATE(state, path, transmission_bounce) + mnee_vertex_count - 1;
305  INTEGRATOR_STATE_WRITE(shadow_state,
306  shadow_path,
307  diffuse_bounce) = INTEGRATOR_STATE(state, path, diffuse_bounce) + 1;
308  INTEGRATOR_STATE_WRITE(shadow_state,
309  shadow_path,
310  bounce) = INTEGRATOR_STATE(state, path, bounce) + mnee_vertex_count;
311  }
312  else
313 # endif
314  {
315  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, transmission_bounce) = INTEGRATOR_STATE(
316  state, path, transmission_bounce);
317  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, diffuse_bounce) = INTEGRATOR_STATE(
318  state, path, diffuse_bounce);
319  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, bounce) = INTEGRATOR_STATE(
320  state, path, bounce);
321  }
322 
323  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, throughput) = throughput;
324 
325  if (kernel_data.kernel_features & KERNEL_FEATURE_SHADOW_PASS) {
326  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, unshadowed_throughput) = throughput;
327  }
328 
329  /* Write Lightgroup, +1 as lightgroup is int but we need to encode into a uint8_t. */
331  shadow_state, shadow_path, lightgroup) = (ls.type != LIGHT_BACKGROUND) ?
332  ls.group + 1 :
333  kernel_data.background.lightgroup + 1;
334 }
335 #endif
336 
337 /* Path tracing: bounce off or through surface with new direction. */
339  KernelGlobals kg,
342  ccl_private const RNGState *rng_state)
343 {
344  /* Sample BSDF or BSSRDF. */
345  if (!(sd->flag & (SD_BSDF | SD_BSSRDF))) {
346  return LABEL_NONE;
347  }
348 
349  float bsdf_u, bsdf_v;
350  path_state_rng_2D(kg, rng_state, PRNG_BSDF_U, &bsdf_u, &bsdf_v);
351  ccl_private const ShaderClosure *sc = shader_bsdf_bssrdf_pick(sd, &bsdf_u);
352 
353 #ifdef __SUBSURFACE__
354  /* BSSRDF closure, we schedule subsurface intersection kernel. */
355  if (CLOSURE_IS_BSSRDF(sc->type)) {
356  return subsurface_bounce(kg, state, sd, sc);
357  }
358 #endif
359 
360  /* BSDF closure, sample direction. */
361  float bsdf_pdf;
363  float3 bsdf_omega_in ccl_optional_struct_init;
365  int label;
366 
368  kg, sd, sc, bsdf_u, bsdf_v, &bsdf_eval, &bsdf_omega_in, &bsdf_domega_in, &bsdf_pdf);
369 
370  if (bsdf_pdf == 0.0f || bsdf_eval_is_zero(&bsdf_eval)) {
371  return LABEL_NONE;
372  }
373 
374  if (label & LABEL_TRANSPARENT) {
375  /* Only need to modify start distance for transparent. */
376  INTEGRATOR_STATE_WRITE(state, ray, tmin) = intersection_t_offset(sd->ray_length);
377  }
378  else {
379  /* Setup ray with changed origin and direction. */
380  const float3 D = normalize(bsdf_omega_in);
382  INTEGRATOR_STATE_WRITE(state, ray, D) = D;
383  INTEGRATOR_STATE_WRITE(state, ray, tmin) = 0.0f;
384  INTEGRATOR_STATE_WRITE(state, ray, tmax) = FLT_MAX;
385 #ifdef __RAY_DIFFERENTIALS__
387  INTEGRATOR_STATE_WRITE(state, ray, dD) = differential_make_compact(bsdf_domega_in);
388 #endif
389  }
390 
391  /* Update throughput. */
392  float3 throughput = INTEGRATOR_STATE(state, path, throughput);
393  throughput *= bsdf_eval_sum(&bsdf_eval) / bsdf_pdf;
394  INTEGRATOR_STATE_WRITE(state, path, throughput) = throughput;
395 
396  if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
397  if (INTEGRATOR_STATE(state, path, bounce) == 0) {
398  INTEGRATOR_STATE_WRITE(state, path, pass_diffuse_weight) = bsdf_eval_pass_diffuse_weight(
399  &bsdf_eval);
400  INTEGRATOR_STATE_WRITE(state, path, pass_glossy_weight) = bsdf_eval_pass_glossy_weight(
401  &bsdf_eval);
402  }
403  }
404 
405  /* Update path state */
406  if (!(label & LABEL_TRANSPARENT)) {
407  INTEGRATOR_STATE_WRITE(state, path, mis_ray_pdf) = bsdf_pdf;
408  INTEGRATOR_STATE_WRITE(state, path, min_ray_pdf) = fminf(
409  bsdf_pdf, INTEGRATOR_STATE(state, path, min_ray_pdf));
410  }
411 
413  return label;
414 }
415 
416 #ifdef __VOLUME__
417 ccl_device_forceinline int integrate_surface_volume_only_bounce(IntegratorState state,
419 {
420  if (!path_state_volume_next(state)) {
421  return LABEL_NONE;
422  }
423 
424  /* Only modify start distance. */
425  INTEGRATOR_STATE_WRITE(state, ray, tmin) = intersection_t_offset(sd->ray_length);
426 
428 }
429 #endif
430 
432  const uint32_t path_flag)
433 {
434  const float probability = (path_flag & PATH_RAY_TERMINATE_ON_NEXT_SURFACE) ?
435  0.0f :
436  INTEGRATOR_STATE(state, path, continuation_probability);
437  if (probability == 0.0f) {
438  return true;
439  }
440  else if (probability != 1.0f) {
441  INTEGRATOR_STATE_WRITE(state, path, throughput) /= probability;
442  }
443 
444  return false;
445 }
446 
447 #if defined(__AO__)
448 ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
452  rng_state,
454 {
455  if (!(kernel_data.kernel_features & KERNEL_FEATURE_AO_ADDITIVE) &&
456  !(INTEGRATOR_STATE(state, path, flag) & PATH_RAY_CAMERA)) {
457  return;
458  }
459 
460  float bsdf_u, bsdf_v;
461  path_state_rng_2D(kg, rng_state, PRNG_BSDF_U, &bsdf_u, &bsdf_v);
462 
463  float3 ao_N;
464  const float3 ao_weight = shader_bsdf_ao(
465  kg, sd, kernel_data.integrator.ao_additive_factor, &ao_N);
466 
467  float3 ao_D;
468  float ao_pdf;
469  sample_cos_hemisphere(ao_N, bsdf_u, bsdf_v, &ao_D, &ao_pdf);
470 
471  bool skip_self = true;
472 
474  ray.P = shadow_ray_offset(kg, sd, ao_D, &skip_self);
475  ray.D = ao_D;
476  if (skip_self) {
477  ray.P = integrate_surface_ray_offset(kg, sd, ray.P, ray.D);
478  }
479  ray.tmin = 0.0f;
480  ray.tmax = kernel_data.integrator.ao_bounces_distance;
481  ray.time = sd->time;
482  ray.self.object = (skip_self) ? sd->object : OBJECT_NONE;
483  ray.self.prim = (skip_self) ? sd->prim : PRIM_NONE;
484  ray.self.light_object = OBJECT_NONE;
485  ray.self.light_prim = PRIM_NONE;
486  ray.dP = differential_zero_compact();
487  ray.dD = differential_zero_compact();
488 
489  /* Branch off shadow kernel. */
492 
493  /* Copy volume stack and enter/exit volume. */
495 
496  /* Write shadow ray and associated state to global memory. */
497  integrator_state_write_shadow_ray(kg, shadow_state, &ray);
498  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
499  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
500  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;
501  INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, prim) = ray.self.light_prim;
502 
503  /* Copy state from main path to shadow path. */
504  const uint16_t bounce = INTEGRATOR_STATE(state, path, bounce);
505  const uint16_t transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce);
506  uint32_t shadow_flag = INTEGRATOR_STATE(state, path, flag) | PATH_RAY_SHADOW_FOR_AO;
507  const float3 throughput = INTEGRATOR_STATE(state, path, throughput) * shader_bsdf_alpha(kg, sd);
508 
509  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, render_pixel_index) = INTEGRATOR_STATE(
510  state, path, render_pixel_index);
511  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, rng_offset) = INTEGRATOR_STATE(
512  state, path, rng_offset);
513  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, rng_hash) = INTEGRATOR_STATE(
514  state, path, rng_hash);
515  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, sample) = INTEGRATOR_STATE(
516  state, path, sample);
517  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, flag) = shadow_flag;
518  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, bounce) = bounce;
519  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, transparent_bounce) = transparent_bounce;
520  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, throughput) = throughput;
521 
522  if (kernel_data.kernel_features & KERNEL_FEATURE_AO_ADDITIVE) {
523  INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, unshadowed_throughput) = ao_weight;
524  }
525 }
526 #endif /* defined(__AO__) */
527 
528 template<uint node_feature_mask>
532 
533 {
535 
536  /* Setup shader data. */
537  ShaderData sd;
539  PROFILING_SHADER(sd.object, sd.shader);
540 
541  int continue_path_label = 0;
542 
543  const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
544 
545  /* Skip most work for volume bounding surface. */
546 #ifdef __VOLUME__
547  if (!(sd.flag & SD_HAS_ONLY_VOLUME)) {
548 #endif
549 #ifdef __SUBSURFACE__
550  /* Can skip shader evaluation for BSSRDF exit point without bump mapping. */
551  if (!(path_flag & PATH_RAY_SUBSURFACE) || ((sd.flag & SD_HAS_BSSRDF_BUMP)))
552 #endif
553  {
554  /* Evaluate shader. */
556  shader_eval_surface<node_feature_mask>(kg, state, &sd, render_buffer, path_flag);
557 
558  /* Initialize additional RNG for BSDFs. */
559  if (sd.flag & SD_BSDF_NEEDS_LCG) {
560  sd.lcg_state = lcg_state_init(INTEGRATOR_STATE(state, path, rng_hash),
561  INTEGRATOR_STATE(state, path, rng_offset),
562  INTEGRATOR_STATE(state, path, sample),
563  0xb4bc3953);
564  }
565  }
566 
567 #ifdef __SUBSURFACE__
568  if (path_flag & PATH_RAY_SUBSURFACE) {
569  /* When coming from inside subsurface scattering, setup a diffuse
570  * closure to perform lighting at the exit point. */
571  subsurface_shader_data_setup(kg, state, &sd, path_flag);
573  }
574  else
575 #endif
576  {
577  /* Filter closures. */
578  shader_prepare_surface_closures(kg, state, &sd, path_flag);
579 
580 #ifdef __HOLDOUT__
581  /* Evaluate holdout. */
582  if (!integrate_surface_holdout(kg, state, &sd, render_buffer)) {
583  return false;
584  }
585 #endif
586 
587 #ifdef __EMISSION__
588  /* Write emission. */
589  if (sd.flag & SD_EMISSION) {
590  integrate_surface_emission(kg, state, &sd, render_buffer);
591  }
592 #endif
593 
594  /* Perform path termination. Most paths have already been terminated in
595  * the intersect_closest kernel, this is just for emission and for dividing
596  * throughput by the probability at the right moment.
597  *
598  * Also ensure we don't do it twice for SSS at both the entry and exit point. */
599  if (integrate_surface_terminate(state, path_flag)) {
600  return false;
601  }
602 
603  /* Write render passes. */
604 #ifdef __PASSES__
607 #endif
608 
609 #ifdef __DENOISING_FEATURES__
610  kernel_write_denoising_features_surface(kg, state, &sd, render_buffer);
611 #endif
612  }
613 
614  /* Load random number state. */
615  RNGState rng_state;
616  path_state_rng_load(state, &rng_state);
617 
618  /* Direct light. */
620  integrate_surface_direct_light<node_feature_mask>(kg, state, &sd, &rng_state);
621 
622 #if defined(__AO__)
623  /* Ambient occlusion pass. */
624  if (kernel_data.kernel_features & KERNEL_FEATURE_AO) {
626  integrate_surface_ao(kg, state, &sd, &rng_state, render_buffer);
627  }
628 #endif
629 
631  continue_path_label = integrate_surface_bsdf_bssrdf_bounce(kg, state, &sd, &rng_state);
632 #ifdef __VOLUME__
633  }
634  else {
635  if (integrate_surface_terminate(state, path_flag)) {
636  return false;
637  }
638 
640  continue_path_label = integrate_surface_volume_only_bounce(state, &sd);
641  }
642 
643  if (continue_path_label & LABEL_TRANSMIT) {
644  /* Enter/Exit volume. */
645  volume_stack_enter_exit(kg, state, &sd);
646  }
647 #endif
648 
649  return continue_path_label != 0;
650 }
651 
657 {
658  if (integrate_surface<node_feature_mask>(kg, state, render_buffer)) {
659  if (INTEGRATOR_STATE(state, path, flag) & PATH_RAY_SUBSURFACE) {
662  }
663  else {
664  kernel_assert(INTEGRATOR_STATE(state, ray, tmax) != 0.0f);
666  }
667  }
668  else {
669  integrator_path_terminate(kg, state, current_kernel);
670  }
671 }
672 
675 {
678  kg, state, render_buffer);
679 }
680 
683 {
687 }
688 
unsigned int uint
Definition: BLI_sys_types.h:67
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
ccl_device_inline float3 bsdf_eval_pass_diffuse_weight(ccl_private const BsdfEval *eval)
Definition: accumulate.h:77
ccl_device_inline void bsdf_eval_mul(ccl_private BsdfEval *eval, float value)
Definition: accumulate.h:58
ccl_device_inline float3 bsdf_eval_pass_glossy_weight(ccl_private const BsdfEval *eval)
Definition: accumulate.h:84
ccl_device_inline bool bsdf_eval_is_zero(ccl_private BsdfEval *eval)
Definition: accumulate.h:53
ccl_device_inline void kernel_accum_holdout(KernelGlobals kg, ConstIntegratorState state, const uint32_t path_flag, const float transparent, ccl_global float *ccl_restrict render_buffer)
Definition: accumulate.h:549
ccl_device_inline void kernel_accum_emission(KernelGlobals kg, ConstIntegratorState state, const float3 L, ccl_global float *ccl_restrict render_buffer, const int lightgroup=LIGHTGROUP_NONE)
Definition: accumulate.h:592
ccl_device_inline float3 bsdf_eval_sum(ccl_private const BsdfEval *eval)
Definition: accumulate.h:72
ccl_device float3 bsdf_eval(KernelGlobals kg, ccl_private ShaderData *sd, ccl_private const ShaderClosure *sc, const float3 omega_in, const bool is_transmission, ccl_private float *pdf)
Definition: bsdf.h:462
#define kernel_assert(cond)
Definition: cpu/compat.h:34
#define ccl_restrict
Definition: cuda/compat.h:50
#define ccl_device_forceinline
Definition: cuda/compat.h:35
#define ccl_optional_struct_init
Definition: cuda/compat.h:53
#define ccl_device
Definition: cuda/compat.h:32
#define ccl_private
Definition: cuda/compat.h:48
#define ccl_global
Definition: cuda/compat.h:43
#define CCL_NAMESPACE_END
Definition: cuda/compat.h:9
ccl_device_forceinline float intersection_t_offset(const float t)
ccl_device_inline float3 ray_offset(const float3 P, const float3 Ng)
ccl_device_inline float light_sample_mis_weight_forward(KernelGlobals kg, const float forward_pdf, const float nee_pdf)
ccl_device_inline float light_sample_mis_weight_nee(KernelGlobals kg, const float nee_pdf, const float forward_pdf)
CCL_NAMESPACE_BEGIN ccl_device_noinline_cpu float3 light_sample_shader_eval(KernelGlobals kg, IntegratorState state, ccl_private ShaderData *ccl_restrict emission_sd, ccl_private LightSample *ccl_restrict ls, float time)
ccl_device_inline float3 shadow_ray_offset(KernelGlobals kg, ccl_private const ShaderData *ccl_restrict sd, float3 L, ccl_private bool *r_skip_self)
ccl_device_inline bool light_sample_is_light(ccl_private const LightSample *ccl_restrict ls)
ccl_device_inline void light_sample_to_surface_shadow_ray(KernelGlobals kg, ccl_private const ShaderData *ccl_restrict sd, ccl_private const LightSample *ccl_restrict ls, ccl_private Ray *ray)
ccl_device_inline bool light_sample_terminate(KernelGlobals kg, ccl_private const LightSample *ccl_restrict ls, ccl_private BsdfEval *ccl_restrict eval, const float rand_terminate)
const char * label
#define kernel_data
const KernelGlobalsCPU *ccl_restrict KernelGlobals
#define kernel_data_fetch(name, index)
ccl_device_forceinline float differential_make_compact(const differential3 D)
Definition: differential.h:117
ccl_device_forceinline float differential_zero_compact()
Definition: differential.h:112
ccl_device_inline float3 transform_direction(ccl_private const Transform *t, const float3 a)
CCL_NAMESPACE_END CCL_NAMESPACE_BEGIN ccl_device_inline float3 transform_point(ccl_private const Transform *t, const float3 a)
ccl_gpu_kernel_postfix ccl_global KernelWorkTile const int ccl_global float * render_buffer
const uint64_t render_pixel_index
const int state
ccl_device_inline int object_lightgroup(KernelGlobals kg, int object)
ccl_device_inline Transform object_get_inverse_transform(KernelGlobals kg, ccl_private const ShaderData *sd)
ccl_device float3 shader_emissive_eval(ccl_private const ShaderData *sd)
ccl_device_inline ccl_private const ShaderClosure * shader_bsdf_bssrdf_pick(ccl_private const ShaderData *ccl_restrict sd, ccl_private float *randu)
ccl_device_inline bool shader_bsdf_is_transmission(ccl_private const ShaderData *sd, const float3 omega_in)
ccl_device float3 shader_bsdf_ao(KernelGlobals kg, ccl_private const ShaderData *sd, const float ao_factor, ccl_private float3 *N_)
ccl_device float shader_bsdf_eval(KernelGlobals kg, ccl_private ShaderData *sd, const float3 omega_in, const bool is_transmission, ccl_private BsdfEval *bsdf_eval, const uint light_shader_flags)
ccl_device float3 shader_holdout_apply(KernelGlobals kg, ccl_private ShaderData *sd)
ccl_device float3 shader_bsdf_alpha(KernelGlobals kg, ccl_private const ShaderData *sd)
ccl_device int shader_bsdf_sample_closure(KernelGlobals kg, ccl_private ShaderData *sd, ccl_private const ShaderClosure *sc, float randu, float randv, ccl_private BsdfEval *bsdf_eval, ccl_private float3 *omega_in, ccl_private differential3 *domega_in, ccl_private float *pdf)
CCL_NAMESPACE_BEGIN ccl_device_inline void shader_prepare_surface_closures(KernelGlobals kg, ConstIntegratorState state, ccl_private ShaderData *sd, const uint32_t path_flag)
ccl_device_inline bool light_distribution_sample_from_position(KernelGlobals kg, float randu, const float randv, const float time, const float3 P, const int bounce, const uint32_t path_flag, ccl_private LightSample *ls)
ccl_device_forceinline float triangle_light_pdf(KernelGlobals kg, ccl_private const ShaderData *sd, float t)
#define CLOSURE_IS_BSSRDF(type)
@ SD_BSDF_HAS_EVAL
Definition: kernel/types.h:744
@ SD_BSSRDF
Definition: kernel/types.h:746
@ SD_BSDF_NEEDS_LCG
Definition: kernel/types.h:756
@ SD_HAS_BSSRDF_BUMP
Definition: kernel/types.h:774
@ SD_HAS_ONLY_VOLUME
Definition: kernel/types.h:770
@ SD_BSDF
Definition: kernel/types.h:742
@ SD_USE_MIS
Definition: kernel/types.h:764
@ SD_HOLDOUT
Definition: kernel/types.h:748
@ SD_EMISSION
Definition: kernel/types.h:740
@ PRIMITIVE_TRIANGLE
Definition: kernel/types.h:551
#define AS_SHADER_DATA(shader_data_tiny_storage)
Definition: kernel/types.h:943
@ PRNG_BSDF_U
Definition: kernel/types.h:164
@ PRNG_LIGHT_U
Definition: kernel/types.h:166
#define PRIM_NONE
Definition: kernel/types.h:41
@ PATH_RAY_SHADOW_FOR_AO
Definition: kernel/types.h:275
@ PATH_RAY_MIS_SKIP
Definition: kernel/types.h:225
@ PATH_RAY_SHADOW_FOR_LIGHT
Definition: kernel/types.h:274
@ PATH_RAY_SUBSURFACE
Definition: kernel/types.h:262
@ PATH_RAY_SURFACE_PASS
Definition: kernel/types.h:269
@ PATH_RAY_TRANSPARENT_BACKGROUND
Definition: kernel/types.h:235
@ PATH_RAY_CAMERA
Definition: kernel/types.h:194
@ PATH_RAY_ANY_PASS
Definition: kernel/types.h:271
@ PATH_RAY_TERMINATE_ON_NEXT_SURFACE
Definition: kernel/types.h:238
@ KERNEL_FEATURE_LIGHT_PASSES
@ KERNEL_FEATURE_MNEE
@ KERNEL_FEATURE_AO_ADDITIVE
@ KERNEL_FEATURE_SHADOW_PASS
@ KERNEL_FEATURE_AO
@ KERNEL_FEATURE_NODE_RAYTRACE
#define OBJECT_NONE
Definition: kernel/types.h:40
ShaderData
Definition: kernel/types.h:925
@ SHADER_USE_MIS
Definition: kernel/types.h:438
@ SD_OBJECT_HOLDOUT_MASK
Definition: kernel/types.h:804
@ SD_OBJECT_CAUSTICS_RECEIVER
Definition: kernel/types.h:824
@ SD_OBJECT_TRANSFORM_APPLIED
Definition: kernel/types.h:808
@ SD_OBJECT_CAUSTICS_CASTER
Definition: kernel/types.h:822
@ LABEL_TRANSMIT
Definition: kernel/types.h:317
@ LABEL_NONE
Definition: kernel/types.h:316
@ LABEL_TRANSPARENT
Definition: kernel/types.h:322
#define KERNEL_FEATURE_NODE_MASK_SURFACE
ShaderClosure
Definition: kernel/types.h:726
@ LIGHT_BACKGROUND
Definition: kernel/types.h:458
#define LAMP_NONE
Definition: kernel/types.h:42
DeviceKernel
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE
@ DEVICE_KERNEL_INTEGRATOR_INTERSECT_SUBSURFACE
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE
@ DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW
@ DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST
#define IF_KERNEL_FEATURE(feature)
ShaderDataCausticsStorage
Definition: kernel/types.h:941
#define PROFILING_INIT_FOR_SHADER(kg, event)
#define PROFILING_EVENT(event)
#define PROFILING_SHADER(object, shader)
ccl_device_inline uint lcg_state_init(const uint rng_hash, const uint rng_offset, const uint sample, const uint scramble)
Definition: lcg.h:33
ccl_device_inline float average(const float2 &a)
Definition: math_float2.h:170
ccl_device_inline bool isequal(const float3 a, const float3 b)
Definition: math_float3.h:524
ccl_device_inline float3 one_float3()
Definition: math_float3.h:89
static float P(float k)
Definition: math_interp.c:25
ccl_device_forceinline bool ray_triangle_intersect_self(const float3 ray_P, const float3 ray_D, const float3 tri_a, const float3 tri_b, const float3 tri_c)
#define L
#define fminf(x, y)
Definition: metal/compat.h:229
vec_base< T, Size > normalize(const vec_base< T, Size > &v)
bool is_zero(const T &a)
ccl_device_inline void kernel_write_data_passes(KernelGlobals kg, IntegratorState state, ccl_private const ShaderData *sd, ccl_global float *ccl_restrict render_buffer)
Definition: passes.h:166
ccl_device_inline float path_state_rng_light_termination(KernelGlobals kg, ccl_private const RNGState *state)
Definition: path_state.h:359
ccl_device_inline void path_state_rng_load(ConstIntegratorState state, ccl_private RNGState *rng_state)
Definition: path_state.h:283
ccl_device_inline void path_state_next(KernelGlobals kg, IntegratorState state, int label)
Definition: path_state.h:82
ccl_device_inline void path_state_rng_2D(KernelGlobals kg, ccl_private const RNGState *rng_state, int dimension, ccl_private float *fx, ccl_private float *fy)
Definition: path_state.h:307
ccl_device_inline void sample_cos_hemisphere(const float3 N, float randu, float randv, ccl_private float3 *omega_in, ccl_private float *pdf)
ccl_device_forceinline float3 integrate_surface_ray_offset(KernelGlobals kg, const ccl_private ShaderData *sd, const float3 ray_P, const float3 ray_D)
Definition: shade_surface.h:34
ccl_device_forceinline void integrator_shade_surface(KernelGlobals kg, IntegratorState state, ccl_global float *ccl_restrict render_buffer)
ccl_device_forceinline void integrator_shade_surface_mnee(KernelGlobals kg, IntegratorState state, ccl_global float *ccl_restrict render_buffer)
ccl_device bool integrate_surface(KernelGlobals kg, IntegratorState state, ccl_global float *ccl_restrict render_buffer)
ccl_device_forceinline bool integrate_surface_terminate(IntegratorState state, const uint32_t path_flag)
ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce(KernelGlobals kg, IntegratorState state, ccl_private ShaderData *sd, ccl_private const RNGState *rng_state)
ccl_device_forceinline void integrator_shade_surface_raytrace(KernelGlobals kg, IntegratorState state, ccl_global float *ccl_restrict render_buffer)
CCL_NAMESPACE_BEGIN ccl_device_forceinline void integrate_surface_shader_setup(KernelGlobals kg, ConstIntegratorState state, ccl_private ShaderData *sd)
Definition: shade_surface.h:21
CCL_NAMESPACE_BEGIN ccl_device_inline void shader_setup_from_ray(KernelGlobals kg, ccl_private ShaderData *ccl_restrict sd, ccl_private const Ray *ccl_restrict ray, ccl_private const Intersection *ccl_restrict isect)
Definition: shader_data.h:28
#define INTEGRATOR_STATE_ARRAY_WRITE(state, nested_struct, array_index, member)
Definition: state.h:159
IntegratorStateCPU *ccl_restrict IntegratorState
Definition: state.h:147
#define INTEGRATOR_STATE_WRITE(state, nested_struct, member)
Definition: state.h:155
const IntegratorStateCPU *ccl_restrict ConstIntegratorState
Definition: state.h:148
#define INTEGRATOR_STATE(state, nested_struct, member)
Definition: state.h:154
IntegratorShadowStateCPU *ccl_restrict IntegratorShadowState
Definition: state.h:149
ccl_device_forceinline void integrator_path_terminate(KernelGlobals kg, IntegratorState state, const DeviceKernel current_kernel)
Definition: state_flow.h:160
ccl_device_forceinline void integrator_path_next(KernelGlobals kg, IntegratorState state, const DeviceKernel current_kernel, const DeviceKernel next_kernel)
Definition: state_flow.h:151
ccl_device_forceinline IntegratorShadowState integrator_shadow_path_init(KernelGlobals kg, IntegratorState state, const DeviceKernel next_kernel, const bool is_ao)
Definition: state_flow.h:179
ccl_device_forceinline void integrator_state_read_ray(KernelGlobals kg, ConstIntegratorState state, ccl_private Ray *ccl_restrict ray)
Definition: state_util.h:27
ccl_device_forceinline void integrator_state_read_isect(KernelGlobals kg, ConstIntegratorState state, ccl_private Intersection *ccl_restrict isect)
Definition: state_util.h:79
ccl_device_forceinline void integrator_state_write_shadow_ray(KernelGlobals kg, IntegratorShadowState state, ccl_private const Ray *ccl_restrict ray)
Definition: state_util.h:42
ccl_device_forceinline void integrator_state_copy_volume_stack_to_shadow(KernelGlobals kg, IntegratorShadowState shadow_state, ConstIntegratorState state)
Definition: state_util.h:142
unsigned short uint16_t
Definition: stdint.h:79
unsigned int uint32_t
Definition: stdint.h:80
@ PROFILING_SHADE_SURFACE_SETUP
@ PROFILING_SHADE_SURFACE_EVAL
@ PROFILING_SHADE_SURFACE_INDIRECT_LIGHT
@ PROFILING_SHADE_SURFACE_AO
@ PROFILING_SHADE_SURFACE_DIRECT_LIGHT
@ PROFILING_SHADE_SURFACE_PASSES
ccl_device void volume_stack_enter_exit(KernelGlobals kg, ccl_private const ShaderData *sd, StackReadOp stack_read, StackWriteOp stack_write)
Definition: volume_stack.h:22
ccl_device void shadow_volume_stack_enter_exit(KernelGlobals kg, IntegratorShadowState state, ccl_private const ShaderData *sd)
Definition: volume_stack.h:91
BLI_INLINE float D(const float *data, const int res[3], int x, int y, int z)
Definition: voxel.c:13