Blender  V3.3
kernel/camera/camera.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: Apache-2.0
2  * Copyright 2011-2022 Blender Foundation */
3 
4 #pragma once
5 
10 
12 
13 /* Perspective Camera */
14 
16 {
17  float blades = cam->blades;
18  float2 bokeh;
19 
20  if (blades == 0.0f) {
21  /* sample disk */
22  bokeh = concentric_sample_disk(u, v);
23  }
24  else {
25  /* sample polygon */
26  float rotation = cam->bladesrotation;
27  bokeh = regular_polygon_sample(blades, rotation, u, v);
28  }
29 
30  /* anamorphic lens bokeh */
31  bokeh.x *= cam->inv_aperture_ratio;
32 
33  return bokeh;
34 }
35 
37  float raster_x,
38  float raster_y,
39  float lens_u,
40  float lens_v,
41  ccl_private Ray *ray)
42 {
43  /* create ray form raster position */
44  ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
45  float3 raster = make_float3(raster_x, raster_y, 0.0f);
46  float3 Pcamera = transform_perspective(&rastertocamera, raster);
47 
48 #ifdef __CAMERA_MOTION__
49  if (kernel_data.cam.have_perspective_motion) {
50  /* TODO(sergey): Currently we interpolate projected coordinate which
51  * gives nice looking result and which is simple, but is in fact a bit
52  * different comparing to constructing projective matrix from an
53  * interpolated field of view.
54  */
55  if (ray->time < 0.5f) {
56  ProjectionTransform rastertocamera_pre = kernel_data.cam.perspective_pre;
57  float3 Pcamera_pre = transform_perspective(&rastertocamera_pre, raster);
58  Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
59  }
60  else {
61  ProjectionTransform rastertocamera_post = kernel_data.cam.perspective_post;
62  float3 Pcamera_post = transform_perspective(&rastertocamera_post, raster);
63  Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
64  }
65  }
66 #endif
67 
68  float3 P = zero_float3();
69  float3 D = Pcamera;
70 
71  /* modify ray for depth of field */
72  float aperturesize = kernel_data.cam.aperturesize;
73 
74  if (aperturesize > 0.0f) {
75  /* sample point on aperture */
76  float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
77 
78  /* compute point on plane of focus */
79  float ft = kernel_data.cam.focaldistance / D.z;
80  float3 Pfocus = D * ft;
81 
82  /* update ray for effect of lens */
83  P = make_float3(lensuv.x, lensuv.y, 0.0f);
84  D = normalize(Pfocus - P);
85  }
86 
87  /* transform ray from camera to world */
88  Transform cameratoworld = kernel_data.cam.cameratoworld;
89 
90 #ifdef __CAMERA_MOTION__
91  if (kernel_data.cam.num_motion_steps) {
93  kernel_data_array(camera_motion),
94  kernel_data.cam.num_motion_steps,
95  ray->time);
96  }
97 #endif
98 
99  P = transform_point(&cameratoworld, P);
100  D = normalize(transform_direction(&cameratoworld, D));
101 
102  bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
103  if (!use_stereo) {
104  /* No stereo */
105  ray->P = P;
106  ray->D = D;
107 
108 #ifdef __RAY_DIFFERENTIALS__
109  float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
110  float3 Dcenter_normalized = normalize(Dcenter);
111 
112  /* TODO: can this be optimized to give compact differentials directly? */
113  ray->dP = differential_zero_compact();
114  differential3 dD;
115  dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - Dcenter_normalized;
116  dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - Dcenter_normalized;
117  ray->dD = differential_make_compact(dD);
118 #endif
119  }
120  else {
121  /* Spherical stereo */
123  ray->P = P;
124  ray->D = D;
125 
126 #ifdef __RAY_DIFFERENTIALS__
127  /* Ray differentials, computed from scratch using the raster coordinates
128  * because we don't want to be affected by depth of field. We compute
129  * ray origin and direction for the center and two neighboring pixels
130  * and simply take their differences. */
131  float3 Pnostereo = transform_point(&cameratoworld, zero_float3());
132 
133  float3 Pcenter = Pnostereo;
134  float3 Dcenter = Pcamera;
135  Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
136  spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
137 
138  float3 Px = Pnostereo;
139  float3 Dx = transform_perspective(&rastertocamera,
140  make_float3(raster_x + 1.0f, raster_y, 0.0f));
141  Dx = normalize(transform_direction(&cameratoworld, Dx));
142  spherical_stereo_transform(&kernel_data.cam, &Px, &Dx);
143 
144  differential3 dP, dD;
145 
146  dP.dx = Px - Pcenter;
147  dD.dx = Dx - Dcenter;
148 
149  float3 Py = Pnostereo;
150  float3 Dy = transform_perspective(&rastertocamera,
151  make_float3(raster_x, raster_y + 1.0f, 0.0f));
152  Dy = normalize(transform_direction(&cameratoworld, Dy));
153  spherical_stereo_transform(&kernel_data.cam, &Py, &Dy);
154 
155  dP.dy = Py - Pcenter;
156  dD.dy = Dy - Dcenter;
157  ray->dD = differential_make_compact(dD);
158  ray->dP = differential_make_compact(dP);
159 #endif
160  }
161 
162 #ifdef __CAMERA_CLIPPING__
163  /* clipping */
164  float z_inv = 1.0f / normalize(Pcamera).z;
165  float nearclip = kernel_data.cam.nearclip * z_inv;
166  ray->P += nearclip * ray->D;
167  ray->dP += nearclip * ray->dD;
168  ray->tmin = 0.0f;
169  ray->tmax = kernel_data.cam.cliplength * z_inv;
170 #else
171  ray->tmin = 0.0f;
172  ray->tmax = FLT_MAX;
173 #endif
174 }
175 
176 /* Orthographic Camera */
178  float raster_x,
179  float raster_y,
180  float lens_u,
181  float lens_v,
182  ccl_private Ray *ray)
183 {
184  /* create ray form raster position */
185  ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
186  float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
187 
188  float3 P;
189  float3 D = make_float3(0.0f, 0.0f, 1.0f);
190 
191  /* modify ray for depth of field */
192  float aperturesize = kernel_data.cam.aperturesize;
193 
194  if (aperturesize > 0.0f) {
195  /* sample point on aperture */
196  float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
197 
198  /* compute point on plane of focus */
199  float3 Pfocus = D * kernel_data.cam.focaldistance;
200 
201  /* update ray for effect of lens */
202  float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
203  P = Pcamera + lensuvw;
204  D = normalize(Pfocus - lensuvw);
205  }
206  else {
207  P = Pcamera;
208  }
209  /* transform ray from camera to world */
210  Transform cameratoworld = kernel_data.cam.cameratoworld;
211 
212 #ifdef __CAMERA_MOTION__
213  if (kernel_data.cam.num_motion_steps) {
214  transform_motion_array_interpolate(&cameratoworld,
215  kernel_data_array(camera_motion),
216  kernel_data.cam.num_motion_steps,
217  ray->time);
218  }
219 #endif
220 
221  ray->P = transform_point(&cameratoworld, P);
222  ray->D = normalize(transform_direction(&cameratoworld, D));
223 
224 #ifdef __RAY_DIFFERENTIALS__
225  /* ray differential */
226  differential3 dP;
227  dP.dx = float4_to_float3(kernel_data.cam.dx);
228  dP.dy = float4_to_float3(kernel_data.cam.dx);
229 
230  ray->dP = differential_make_compact(dP);
231  ray->dD = differential_zero_compact();
232 #endif
233 
234 #ifdef __CAMERA_CLIPPING__
235  /* clipping */
236  ray->tmin = 0.0f;
237  ray->tmax = kernel_data.cam.cliplength;
238 #else
239  ray->tmin = 0.0f;
240  ray->tmax = FLT_MAX;
241 #endif
242 }
243 
244 /* Panorama Camera */
245 
247 #ifdef __CAMERA_MOTION__
248  ccl_global const DecomposedTransform *cam_motion,
249 #endif
250  float raster_x,
251  float raster_y,
252  float lens_u,
253  float lens_v,
254  ccl_private Ray *ray)
255 {
256  ProjectionTransform rastertocamera = cam->rastertocamera;
257  float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
258 
259  /* create ray form raster position */
260  float3 P = zero_float3();
261  float3 D = panorama_to_direction(cam, Pcamera.x, Pcamera.y);
262 
263  /* indicates ray should not receive any light, outside of the lens */
264  if (is_zero(D)) {
265  ray->tmax = 0.0f;
266  return;
267  }
268 
269  /* modify ray for depth of field */
270  float aperturesize = cam->aperturesize;
271 
272  if (aperturesize > 0.0f) {
273  /* sample point on aperture */
274  float2 lensuv = camera_sample_aperture(cam, lens_u, lens_v) * aperturesize;
275 
276  /* compute point on plane of focus */
277  float3 Dfocus = normalize(D);
278  float3 Pfocus = Dfocus * cam->focaldistance;
279 
280  /* calculate orthonormal coordinates perpendicular to Dfocus */
281  float3 U, V;
282  U = normalize(make_float3(1.0f, 0.0f, 0.0f) - Dfocus.x * Dfocus);
283  V = normalize(cross(Dfocus, U));
284 
285  /* update ray for effect of lens */
286  P = U * lensuv.x + V * lensuv.y;
287  D = normalize(Pfocus - P);
288  }
289 
290  /* transform ray from camera to world */
291  Transform cameratoworld = cam->cameratoworld;
292 
293 #ifdef __CAMERA_MOTION__
294  if (cam->num_motion_steps) {
296  &cameratoworld, cam_motion, cam->num_motion_steps, ray->time);
297  }
298 #endif
299 
300  /* Stereo transform */
301  bool use_stereo = cam->interocular_offset != 0.0f;
302  if (use_stereo) {
303  spherical_stereo_transform(cam, &P, &D);
304  }
305 
306  P = transform_point(&cameratoworld, P);
307  D = normalize(transform_direction(&cameratoworld, D));
308 
309  ray->P = P;
310  ray->D = D;
311 
312 #ifdef __RAY_DIFFERENTIALS__
313  /* Ray differentials, computed from scratch using the raster coordinates
314  * because we don't want to be affected by depth of field. We compute
315  * ray origin and direction for the center and two neighboring pixels
316  * and simply take their differences. */
317  float3 Pcenter = Pcamera;
318  float3 Dcenter = panorama_to_direction(cam, Pcenter.x, Pcenter.y);
319  if (use_stereo) {
320  spherical_stereo_transform(cam, &Pcenter, &Dcenter);
321  }
322  Pcenter = transform_point(&cameratoworld, Pcenter);
323  Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
324 
325  float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
326  float3 Dx = panorama_to_direction(cam, Px.x, Px.y);
327  if (use_stereo) {
328  spherical_stereo_transform(cam, &Px, &Dx);
329  }
330  Px = transform_point(&cameratoworld, Px);
331  Dx = normalize(transform_direction(&cameratoworld, Dx));
332 
333  differential3 dP, dD;
334  dP.dx = Px - Pcenter;
335  dD.dx = Dx - Dcenter;
336 
337  float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
338  float3 Dy = panorama_to_direction(cam, Py.x, Py.y);
339  if (use_stereo) {
340  spherical_stereo_transform(cam, &Py, &Dy);
341  }
342  Py = transform_point(&cameratoworld, Py);
343  Dy = normalize(transform_direction(&cameratoworld, Dy));
344 
345  dP.dy = Py - Pcenter;
346  dD.dy = Dy - Dcenter;
347  ray->dD = differential_make_compact(dD);
348  ray->dP = differential_make_compact(dP);
349 #endif
350 
351 #ifdef __CAMERA_CLIPPING__
352  /* clipping */
353  float nearclip = cam->nearclip;
354  ray->P += nearclip * ray->D;
355  ray->dP += nearclip * ray->dD;
356  ray->tmin = 0.0f;
357  ray->tmax = cam->cliplength;
358 #else
359  ray->tmin = 0.0f;
360  ray->tmax = FLT_MAX;
361 #endif
362 }
363 
364 /* Common */
365 
367  int x,
368  int y,
369  float filter_u,
370  float filter_v,
371  float lens_u,
372  float lens_v,
373  float time,
374  ccl_private Ray *ray)
375 {
376  /* pixel filter */
377  int filter_table_offset = kernel_data.tables.filter_table_offset;
378  float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
379  float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
380 
381 #ifdef __CAMERA_MOTION__
382  /* motion blur */
383  if (kernel_data.cam.shuttertime == -1.0f) {
384  ray->time = 0.5f;
385  }
386  else {
387  /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
388  * effect in use but rolling shutter duration is set to 0.0.
389  */
390  const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
391  ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
392  /* TODO(sergey): Currently single rolling shutter effect type only
393  * where scan-lines are acquired from top to bottom and whole scan-line
394  * is acquired at once (no delay in acquisition happens between pixels
395  * of single scan-line).
396  *
397  * Might want to support more models in the future.
398  */
399  if (kernel_data.cam.rolling_shutter_type) {
400  /* Time corresponding to a fully rolling shutter only effect:
401  * top of the frame is time 0.0, bottom of the frame is time 1.0.
402  */
403  const float time = 1.0f - (float)y / kernel_data.cam.height;
404  const float duration = kernel_data.cam.rolling_shutter_duration;
405  if (duration != 0.0f) {
406  /* This isn't fully physical correct, but lets us to have simple
407  * controls in the interface. The idea here is basically sort of
408  * linear interpolation between how much rolling shutter effect
409  * exist on the frame and how much of it is a motion blur effect.
410  */
411  ray->time = (ray->time - 0.5f) * duration;
412  ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
413  }
414  else {
415  ray->time = time;
416  }
417  }
418  }
419 #endif
420 
421  /* sample */
422  if (kernel_data.cam.type == CAMERA_PERSPECTIVE) {
423  camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
424  }
425  else if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
426  camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
427  }
428  else {
429 #ifdef __CAMERA_MOTION__
430  ccl_global const DecomposedTransform *cam_motion = kernel_data_array(camera_motion);
431  camera_sample_panorama(&kernel_data.cam, cam_motion, raster_x, raster_y, lens_u, lens_v, ray);
432 #else
433  camera_sample_panorama(&kernel_data.cam, raster_x, raster_y, lens_u, lens_v, ray);
434 #endif
435  }
436 }
437 
438 /* Utilities */
439 
441 {
442  Transform cameratoworld = kernel_data.cam.cameratoworld;
443  return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
444 }
445 
447 {
448  Transform cameratoworld = kernel_data.cam.cameratoworld;
449  float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
450 
451  if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
452  float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
453  return fabsf(dot((P - camP), camD));
454  }
455  else {
456  return len(P - camP);
457  }
458 }
459 
461 {
462  if (kernel_data.cam.type != CAMERA_PANORAMA) {
463  Transform worldtocamera = kernel_data.cam.worldtocamera;
464  return transform_point(&worldtocamera, P).z;
465  }
466  else {
467  Transform cameratoworld = kernel_data.cam.cameratoworld;
468  float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
469  return len(P - camP);
470  }
471 }
472 
474 {
475  Transform cameratoworld = kernel_data.cam.cameratoworld;
476 
477  if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
478  float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
479  return -camD;
480  }
481  else {
482  float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
483  return normalize(camP - P);
484  }
485 }
486 
489  float3 P)
490 {
491  if (kernel_data.cam.type != CAMERA_PANORAMA) {
492  /* perspective / ortho */
493  if (sd->object == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
494  P += camera_position(kg);
495 
496  ProjectionTransform tfm = kernel_data.cam.worldtondc;
497  return transform_perspective(&tfm, P);
498  }
499  else {
500  /* panorama */
501  Transform tfm = kernel_data.cam.worldtocamera;
502 
503  if (sd->object != OBJECT_NONE)
504  P = normalize(transform_point(&tfm, P));
505  else
506  P = normalize(transform_direction(&tfm, P));
507 
509 
510  return make_float3(uv.x, uv.y, 0.0f);
511  }
512 }
513 
typedef float(TangentPoint)[2]
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
#define U
ATTR_WARN_UNUSED_RESULT const BMVert * v
unsigned int U
Definition: btGjkEpa3.h:78
#define ccl_device
Definition: cuda/compat.h:32
#define ccl_constant
Definition: cuda/compat.h:46
#define ccl_private
Definition: cuda/compat.h:48
#define ccl_device_inline
Definition: cuda/compat.h:34
#define ccl_global
Definition: cuda/compat.h:43
#define CCL_NAMESPACE_END
Definition: cuda/compat.h:9
ccl_device_inline float3 panorama_to_direction(ccl_constant KernelCamera *cam, float u, float v)
ccl_device_inline void spherical_stereo_transform(ccl_constant KernelCamera *cam, ccl_private float3 *P, ccl_private float3 *D)
ccl_device_inline float2 direction_to_panorama(ccl_constant KernelCamera *cam, float3 dir)
ccl_device_inline float3 transform_perspective(ccl_private const ProjectionTransform *t, const float3 a)
double time
#define kernel_data
const KernelGlobalsCPU *ccl_restrict KernelGlobals
#define kernel_data_array(name)
ccl_device_forceinline float differential_make_compact(const differential3 D)
Definition: differential.h:117
ccl_device_forceinline float differential_zero_compact()
Definition: differential.h:112
int len
Definition: draw_manager.c:108
ccl_device void transform_motion_array_interpolate(ccl_private Transform *tfm, ccl_global const DecomposedTransform *motion, uint numsteps, float time)
ccl_device_inline float3 transform_direction(ccl_private const Transform *t, const float3 a)
CCL_NAMESPACE_END CCL_NAMESPACE_BEGIN ccl_device_inline float3 transform_point(ccl_private const Transform *t, const float3 a)
CCL_NAMESPACE_BEGIN ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, float u, float v)
ccl_device void camera_sample_perspective(KernelGlobals kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_private Ray *ray)
ccl_device_inline float3 camera_position(KernelGlobals kg)
ccl_device_inline float camera_distance(KernelGlobals kg, float3 P)
ccl_device_inline void camera_sample(KernelGlobals kg, int x, int y, float filter_u, float filter_v, float lens_u, float lens_v, float time, ccl_private Ray *ray)
ccl_device_inline float3 camera_world_to_ndc(KernelGlobals kg, ccl_private ShaderData *sd, float3 P)
ccl_device_inline float3 camera_direction_from_point(KernelGlobals kg, float3 P)
ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam, float raster_x, float raster_y, float lens_u, float lens_v, ccl_private Ray *ray)
ccl_device void camera_sample_orthographic(KernelGlobals kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_private Ray *ray)
ccl_device_inline float camera_z_depth(KernelGlobals kg, float3 P)
#define __CAMERA_MOTION__
Definition: kernel/types.h:80
#define FILTER_TABLE_SIZE
Definition: kernel/types.h:26
#define PRIM_NONE
Definition: kernel/types.h:41
#define OBJECT_NONE
Definition: kernel/types.h:40
ShaderData
Definition: kernel/types.h:925
#define SHUTTER_TABLE_SIZE
Definition: kernel/types.h:28
@ CAMERA_PERSPECTIVE
Definition: kernel/types.h:466
@ CAMERA_PANORAMA
Definition: kernel/types.h:466
@ CAMERA_ORTHOGRAPHIC
Definition: kernel/types.h:466
CCL_NAMESPACE_BEGIN ccl_device float lookup_table_read(KernelGlobals kg, float x, int offset, int size)
Definition: lookup_table.h:10
ccl_device_inline float2 interp(const float2 &a, const float2 &b, float t)
Definition: math_float2.h:232
ccl_device_inline float3 zero_float3()
Definition: math_float3.h:80
static float P(float k)
Definition: math_interp.c:25
#define fabsf(x)
Definition: metal/compat.h:219
#define make_float3(x, y, z)
Definition: metal/compat.h:204
T dot(const vec_base< T, Size > &a, const vec_base< T, Size > &b)
vec_base< T, 3 > cross(const vec_base< T, 3 > &a, const vec_base< T, 3 > &b)
vec_base< T, Size > normalize(const vec_base< T, Size > &v)
bool is_zero(const T &a)
ccl_device float2 regular_polygon_sample(float corners, float rotation, float u, float v)
ccl_device float2 concentric_sample_disk(float u1, float u2)
float x
Definition: types_float2.h:15
float y
Definition: types_float2.h:15
float z
float y
float x
ccl_device_inline float3 float4_to_float3(const float4 a)
Definition: util/math.h:500
BLI_INLINE float D(const float *data, const int res[3], int x, int y, int z)
Definition: voxel.c:13
CCL_NAMESPACE_BEGIN struct Window V