Blender  V3.3
bvh.mm
Go to the documentation of this file.
1 /* SPDX-License-Identifier: Apache-2.0
2  * Copyright 2021-2022 Blender Foundation */
3 
4 #ifdef WITH_METAL
5 
6 # include "scene/hair.h"
7 # include "scene/mesh.h"
8 # include "scene/object.h"
9 # include "scene/pointcloud.h"
10 
11 # include "util/progress.h"
12 
13 # include "device/metal/bvh.h"
14 # include "device/metal/util.h"
15 
17 
18 # define BVH_status(...) \
19  { \
20  string str = string_printf(__VA_ARGS__); \
21  progress.set_substatus(str); \
22  metal_printf("%s\n", str.c_str()); \
23  }
24 
25 BVHMetal::BVHMetal(const BVHParams &params_,
26  const vector<Geometry *> &geometry_,
27  const vector<Object *> &objects_,
28  Device *device)
29  : BVH(params_, geometry_, objects_), stats(device->stats)
30 {
31 }
32 
33 BVHMetal::~BVHMetal()
34 {
35  if (@available(macos 12.0, *)) {
36  if (accel_struct) {
37  stats.mem_free(accel_struct.allocatedSize);
38  [accel_struct release];
39  }
40  }
41 }
42 
43 bool BVHMetal::build_BLAS_mesh(Progress &progress,
44  id<MTLDevice> device,
45  id<MTLCommandQueue> queue,
46  Geometry *const geom,
47  bool refit)
48 {
49  if (@available(macos 12.0, *)) {
50  /* Build BLAS for triangle primitives */
51  Mesh *const mesh = static_cast<Mesh *const>(geom);
52  if (mesh->num_triangles() == 0) {
53  return false;
54  }
55 
56  /*------------------------------------------------*/
57  BVH_status(
58  "Building mesh BLAS | %7d tris | %s", (int)mesh->num_triangles(), geom->name.c_str());
59  /*------------------------------------------------*/
60 
61  const bool use_fast_trace_bvh = (params.bvh_type == BVH_TYPE_STATIC);
62 
63  const array<float3> &verts = mesh->get_verts();
64  const array<int> &tris = mesh->get_triangles();
65  const size_t num_verts = verts.size();
66  const size_t num_indices = tris.size();
67 
68  size_t num_motion_steps = 1;
70  if (motion_blur && mesh->get_use_motion_blur() && motion_keys) {
71  num_motion_steps = mesh->get_motion_steps();
72  }
73 
74  MTLResourceOptions storage_mode;
75  if (device.hasUnifiedMemory) {
76  storage_mode = MTLResourceStorageModeShared;
77  }
78  else {
79  storage_mode = MTLResourceStorageModeManaged;
80  }
81 
82  /* Upload the mesh data to the GPU */
83  id<MTLBuffer> posBuf = nil;
84  id<MTLBuffer> indexBuf = [device newBufferWithBytes:tris.data()
85  length:num_indices * sizeof(tris.data()[0])
86  options:storage_mode];
87 
88  if (num_motion_steps == 1) {
89  posBuf = [device newBufferWithBytes:verts.data()
90  length:num_verts * sizeof(verts.data()[0])
91  options:storage_mode];
92  }
93  else {
94  posBuf = [device newBufferWithLength:num_verts * num_motion_steps * sizeof(verts.data()[0])
95  options:storage_mode];
96  float3 *dest_data = (float3 *)[posBuf contents];
97  size_t center_step = (num_motion_steps - 1) / 2;
98  for (size_t step = 0; step < num_motion_steps; ++step) {
99  const float3 *verts = mesh->get_verts().data();
100 
101  /* The center step for motion vertices is not stored in the attribute. */
102  if (step != center_step) {
103  verts = motion_keys->data_float3() + (step > center_step ? step - 1 : step) * num_verts;
104  }
105  memcpy(dest_data + num_verts * step, verts, num_verts * sizeof(float3));
106  }
107  if (storage_mode == MTLResourceStorageModeManaged) {
108  [posBuf didModifyRange:NSMakeRange(0, posBuf.length)];
109  }
110  }
111 
112  /* Create an acceleration structure. */
113  MTLAccelerationStructureGeometryDescriptor *geomDesc;
114  if (num_motion_steps > 1) {
115  std::vector<MTLMotionKeyframeData *> vertex_ptrs;
116  vertex_ptrs.reserve(num_motion_steps);
117  for (size_t step = 0; step < num_motion_steps; ++step) {
118  MTLMotionKeyframeData *k = [MTLMotionKeyframeData data];
119  k.buffer = posBuf;
120  k.offset = num_verts * step * sizeof(float3);
121  vertex_ptrs.push_back(k);
122  }
123 
124  MTLAccelerationStructureMotionTriangleGeometryDescriptor *geomDescMotion =
125  [MTLAccelerationStructureMotionTriangleGeometryDescriptor descriptor];
126  geomDescMotion.vertexBuffers = [NSArray arrayWithObjects:vertex_ptrs.data()
127  count:vertex_ptrs.size()];
128  geomDescMotion.vertexStride = sizeof(verts.data()[0]);
129  geomDescMotion.indexBuffer = indexBuf;
130  geomDescMotion.indexBufferOffset = 0;
131  geomDescMotion.indexType = MTLIndexTypeUInt32;
132  geomDescMotion.triangleCount = num_indices / 3;
133  geomDescMotion.intersectionFunctionTableOffset = 0;
134 
135  geomDesc = geomDescMotion;
136  }
137  else {
138  MTLAccelerationStructureTriangleGeometryDescriptor *geomDescNoMotion =
139  [MTLAccelerationStructureTriangleGeometryDescriptor descriptor];
140  geomDescNoMotion.vertexBuffer = posBuf;
141  geomDescNoMotion.vertexBufferOffset = 0;
142  geomDescNoMotion.vertexStride = sizeof(verts.data()[0]);
143  geomDescNoMotion.indexBuffer = indexBuf;
144  geomDescNoMotion.indexBufferOffset = 0;
145  geomDescNoMotion.indexType = MTLIndexTypeUInt32;
146  geomDescNoMotion.triangleCount = num_indices / 3;
147  geomDescNoMotion.intersectionFunctionTableOffset = 0;
148 
149  geomDesc = geomDescNoMotion;
150  }
151 
152  /* Force a single any-hit call, so shadow record-all behavior works correctly */
153  /* (Match optix behavior: unsigned int build_flags =
154  * OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;) */
155  geomDesc.allowDuplicateIntersectionFunctionInvocation = false;
156 
157  MTLPrimitiveAccelerationStructureDescriptor *accelDesc =
158  [MTLPrimitiveAccelerationStructureDescriptor descriptor];
159  accelDesc.geometryDescriptors = @[ geomDesc ];
160  if (num_motion_steps > 1) {
161  accelDesc.motionStartTime = 0.0f;
162  accelDesc.motionEndTime = 1.0f;
163  accelDesc.motionStartBorderMode = MTLMotionBorderModeClamp;
164  accelDesc.motionEndBorderMode = MTLMotionBorderModeClamp;
165  accelDesc.motionKeyframeCount = num_motion_steps;
166  }
167 
168  if (!use_fast_trace_bvh) {
169  accelDesc.usage |= (MTLAccelerationStructureUsageRefit |
170  MTLAccelerationStructureUsagePreferFastBuild);
171  }
172 
173  MTLAccelerationStructureSizes accelSizes = [device
174  accelerationStructureSizesWithDescriptor:accelDesc];
175  id<MTLAccelerationStructure> accel_uncompressed = [device
176  newAccelerationStructureWithSize:accelSizes.accelerationStructureSize];
177  id<MTLBuffer> scratchBuf = [device newBufferWithLength:accelSizes.buildScratchBufferSize
178  options:MTLResourceStorageModePrivate];
179  id<MTLBuffer> sizeBuf = [device newBufferWithLength:8 options:MTLResourceStorageModeShared];
180  id<MTLCommandBuffer> accelCommands = [queue commandBuffer];
181  id<MTLAccelerationStructureCommandEncoder> accelEnc =
182  [accelCommands accelerationStructureCommandEncoder];
183  if (refit) {
184  [accelEnc refitAccelerationStructure:accel_struct
185  descriptor:accelDesc
186  destination:accel_uncompressed
187  scratchBuffer:scratchBuf
188  scratchBufferOffset:0];
189  }
190  else {
191  [accelEnc buildAccelerationStructure:accel_uncompressed
192  descriptor:accelDesc
193  scratchBuffer:scratchBuf
194  scratchBufferOffset:0];
195  }
196  if (use_fast_trace_bvh) {
197  [accelEnc writeCompactedAccelerationStructureSize:accel_uncompressed
198  toBuffer:sizeBuf
199  offset:0
200  sizeDataType:MTLDataTypeULong];
201  }
202  [accelEnc endEncoding];
203  [accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
204  /* free temp resources */
205  [scratchBuf release];
206  [indexBuf release];
207  [posBuf release];
208 
209  if (use_fast_trace_bvh) {
210  /* Compact the accel structure */
211  uint64_t compressed_size = *(uint64_t *)sizeBuf.contents;
212 
213  dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
214  id<MTLCommandBuffer> accelCommands = [queue commandBuffer];
215  id<MTLAccelerationStructureCommandEncoder> accelEnc =
216  [accelCommands accelerationStructureCommandEncoder];
217  id<MTLAccelerationStructure> accel = [device
218  newAccelerationStructureWithSize:compressed_size];
219  [accelEnc copyAndCompactAccelerationStructure:accel_uncompressed
220  toAccelerationStructure:accel];
221  [accelEnc endEncoding];
222  [accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
223  uint64_t allocated_size = [accel allocatedSize];
224  stats.mem_alloc(allocated_size);
225  accel_struct = accel;
226  [accel_uncompressed release];
227  accel_struct_building = false;
228  }];
229  [accelCommands commit];
230  });
231  }
232  else {
233  /* set our acceleration structure to the uncompressed structure */
234  accel_struct = accel_uncompressed;
235 
236  uint64_t allocated_size = [accel_struct allocatedSize];
237  stats.mem_alloc(allocated_size);
238  accel_struct_building = false;
239  }
240  [sizeBuf release];
241  }];
242 
243  accel_struct_building = true;
244  [accelCommands commit];
245 
246  return true;
247  }
248  return false;
249 }
250 
251 bool BVHMetal::build_BLAS_hair(Progress &progress,
252  id<MTLDevice> device,
253  id<MTLCommandQueue> queue,
254  Geometry *const geom,
255  bool refit)
256 {
257  if (@available(macos 12.0, *)) {
258  /* Build BLAS for hair curves */
259  Hair *hair = static_cast<Hair *>(geom);
260  if (hair->num_curves() == 0) {
261  return false;
262  }
263 
264  /*------------------------------------------------*/
265  BVH_status(
266  "Building hair BLAS | %7d curves | %s", (int)hair->num_curves(), geom->name.c_str());
267  /*------------------------------------------------*/
268 
269  const bool use_fast_trace_bvh = (params.bvh_type == BVH_TYPE_STATIC);
270  const size_t num_segments = hair->num_segments();
271 
272  size_t num_motion_steps = 1;
274  if (motion_blur && hair->get_use_motion_blur() && motion_keys) {
275  num_motion_steps = hair->get_motion_steps();
276  }
277 
278  const size_t num_aabbs = num_segments * num_motion_steps;
279 
280  MTLResourceOptions storage_mode;
281  if (device.hasUnifiedMemory) {
282  storage_mode = MTLResourceStorageModeShared;
283  }
284  else {
285  storage_mode = MTLResourceStorageModeManaged;
286  }
287 
288  /* Allocate a GPU buffer for the AABB data and populate it */
289  id<MTLBuffer> aabbBuf = [device
290  newBufferWithLength:num_aabbs * sizeof(MTLAxisAlignedBoundingBox)
291  options:storage_mode];
292  MTLAxisAlignedBoundingBox *aabb_data = (MTLAxisAlignedBoundingBox *)[aabbBuf contents];
293 
294  /* Get AABBs for each motion step */
295  size_t center_step = (num_motion_steps - 1) / 2;
296  for (size_t step = 0; step < num_motion_steps; ++step) {
297  /* The center step for motion vertices is not stored in the attribute */
298  const float3 *keys = hair->get_curve_keys().data();
299  if (step != center_step) {
300  size_t attr_offset = (step > center_step) ? step - 1 : step;
301  /* Technically this is a float4 array, but sizeof(float3) == sizeof(float4) */
302  keys = motion_keys->data_float3() + attr_offset * hair->get_curve_keys().size();
303  }
304 
305  for (size_t j = 0, i = 0; j < hair->num_curves(); ++j) {
306  const Hair::Curve curve = hair->get_curve(j);
307 
308  for (int segment = 0; segment < curve.num_segments(); ++segment, ++i) {
309  {
311  curve.bounds_grow(segment, keys, hair->get_curve_radius().data(), bounds);
312 
313  const size_t index = step * num_segments + i;
314  aabb_data[index].min = (MTLPackedFloat3 &)bounds.min;
315  aabb_data[index].max = (MTLPackedFloat3 &)bounds.max;
316  }
317  }
318  }
319  }
320 
321  if (storage_mode == MTLResourceStorageModeManaged) {
322  [aabbBuf didModifyRange:NSMakeRange(0, aabbBuf.length)];
323  }
324 
325 # if 0
326  for (size_t i=0; i<num_aabbs && i < 400; i++) {
327  MTLAxisAlignedBoundingBox& bb = aabb_data[i];
328  printf(" %d: %.1f,%.1f,%.1f -- %.1f,%.1f,%.1f\n", int(i), bb.min.x, bb.min.y, bb.min.z, bb.max.x, bb.max.y, bb.max.z);
329  }
330 # endif
331 
332  MTLAccelerationStructureGeometryDescriptor *geomDesc;
333  if (motion_blur) {
334  std::vector<MTLMotionKeyframeData *> aabb_ptrs;
335  aabb_ptrs.reserve(num_motion_steps);
336  for (size_t step = 0; step < num_motion_steps; ++step) {
337  MTLMotionKeyframeData *k = [MTLMotionKeyframeData data];
338  k.buffer = aabbBuf;
339  k.offset = step * num_segments * sizeof(MTLAxisAlignedBoundingBox);
340  aabb_ptrs.push_back(k);
341  }
342 
343  MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor *geomDescMotion =
344  [MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor descriptor];
345  geomDescMotion.boundingBoxBuffers = [NSArray arrayWithObjects:aabb_ptrs.data()
346  count:aabb_ptrs.size()];
347  geomDescMotion.boundingBoxCount = num_segments;
348  geomDescMotion.boundingBoxStride = sizeof(aabb_data[0]);
349  geomDescMotion.intersectionFunctionTableOffset = 1;
350 
351  /* Force a single any-hit call, so shadow record-all behavior works correctly */
352  /* (Match optix behavior: unsigned int build_flags =
353  * OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;) */
354  geomDescMotion.allowDuplicateIntersectionFunctionInvocation = false;
355  geomDescMotion.opaque = true;
356  geomDesc = geomDescMotion;
357  }
358  else {
359  MTLAccelerationStructureBoundingBoxGeometryDescriptor *geomDescNoMotion =
360  [MTLAccelerationStructureBoundingBoxGeometryDescriptor descriptor];
361  geomDescNoMotion.boundingBoxBuffer = aabbBuf;
362  geomDescNoMotion.boundingBoxBufferOffset = 0;
363  geomDescNoMotion.boundingBoxCount = int(num_aabbs);
364  geomDescNoMotion.boundingBoxStride = sizeof(aabb_data[0]);
365  geomDescNoMotion.intersectionFunctionTableOffset = 1;
366 
367  /* Force a single any-hit call, so shadow record-all behavior works correctly */
368  /* (Match optix behavior: unsigned int build_flags =
369  * OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;) */
370  geomDescNoMotion.allowDuplicateIntersectionFunctionInvocation = false;
371  geomDescNoMotion.opaque = true;
372  geomDesc = geomDescNoMotion;
373  }
374 
375  MTLPrimitiveAccelerationStructureDescriptor *accelDesc =
376  [MTLPrimitiveAccelerationStructureDescriptor descriptor];
377  accelDesc.geometryDescriptors = @[ geomDesc ];
378 
379  if (motion_blur) {
380  accelDesc.motionStartTime = 0.0f;
381  accelDesc.motionEndTime = 1.0f;
382  accelDesc.motionStartBorderMode = MTLMotionBorderModeVanish;
383  accelDesc.motionEndBorderMode = MTLMotionBorderModeVanish;
384  accelDesc.motionKeyframeCount = num_motion_steps;
385  }
386 
387  if (!use_fast_trace_bvh) {
388  accelDesc.usage |= (MTLAccelerationStructureUsageRefit |
389  MTLAccelerationStructureUsagePreferFastBuild);
390  }
391 
392  MTLAccelerationStructureSizes accelSizes = [device
393  accelerationStructureSizesWithDescriptor:accelDesc];
394  id<MTLAccelerationStructure> accel_uncompressed = [device
395  newAccelerationStructureWithSize:accelSizes.accelerationStructureSize];
396  id<MTLBuffer> scratchBuf = [device newBufferWithLength:accelSizes.buildScratchBufferSize
397  options:MTLResourceStorageModePrivate];
398  id<MTLBuffer> sizeBuf = [device newBufferWithLength:8 options:MTLResourceStorageModeShared];
399  id<MTLCommandBuffer> accelCommands = [queue commandBuffer];
400  id<MTLAccelerationStructureCommandEncoder> accelEnc =
401  [accelCommands accelerationStructureCommandEncoder];
402  if (refit) {
403  [accelEnc refitAccelerationStructure:accel_struct
404  descriptor:accelDesc
405  destination:accel_uncompressed
406  scratchBuffer:scratchBuf
407  scratchBufferOffset:0];
408  }
409  else {
410  [accelEnc buildAccelerationStructure:accel_uncompressed
411  descriptor:accelDesc
412  scratchBuffer:scratchBuf
413  scratchBufferOffset:0];
414  }
415  if (use_fast_trace_bvh) {
416  [accelEnc writeCompactedAccelerationStructureSize:accel_uncompressed
417  toBuffer:sizeBuf
418  offset:0
419  sizeDataType:MTLDataTypeULong];
420  }
421  [accelEnc endEncoding];
422  [accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
423  /* free temp resources */
424  [scratchBuf release];
425  [aabbBuf release];
426 
427  if (use_fast_trace_bvh) {
428  /* Compact the accel structure */
429  uint64_t compressed_size = *(uint64_t *)sizeBuf.contents;
430 
431  dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
432  id<MTLCommandBuffer> accelCommands = [queue commandBuffer];
433  id<MTLAccelerationStructureCommandEncoder> accelEnc =
434  [accelCommands accelerationStructureCommandEncoder];
435  id<MTLAccelerationStructure> accel = [device
436  newAccelerationStructureWithSize:compressed_size];
437  [accelEnc copyAndCompactAccelerationStructure:accel_uncompressed
438  toAccelerationStructure:accel];
439  [accelEnc endEncoding];
440  [accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
441  uint64_t allocated_size = [accel allocatedSize];
442  stats.mem_alloc(allocated_size);
443  accel_struct = accel;
444  [accel_uncompressed release];
445  accel_struct_building = false;
446  }];
447  [accelCommands commit];
448  });
449  }
450  else {
451  /* set our acceleration structure to the uncompressed structure */
452  accel_struct = accel_uncompressed;
453 
454  uint64_t allocated_size = [accel_struct allocatedSize];
455  stats.mem_alloc(allocated_size);
456  accel_struct_building = false;
457  }
458  [sizeBuf release];
459  }];
460 
461  accel_struct_building = true;
462  [accelCommands commit];
463  return true;
464  }
465  return false;
466 }
467 
468 bool BVHMetal::build_BLAS_pointcloud(Progress &progress,
469  id<MTLDevice> device,
470  id<MTLCommandQueue> queue,
471  Geometry *const geom,
472  bool refit)
473 {
474  if (@available(macos 12.0, *)) {
475  /* Build BLAS for point cloud */
476  PointCloud *pointcloud = static_cast<PointCloud *>(geom);
477  if (pointcloud->num_points() == 0) {
478  return false;
479  }
480 
481  /*------------------------------------------------*/
482  BVH_status("Building pointcloud BLAS | %7d points | %s",
483  (int)pointcloud->num_points(),
484  geom->name.c_str());
485  /*------------------------------------------------*/
486 
487  const size_t num_points = pointcloud->get_points().size();
488  const float3 *points = pointcloud->get_points().data();
489  const float *radius = pointcloud->get_radius().data();
490 
491  const bool use_fast_trace_bvh = (params.bvh_type == BVH_TYPE_STATIC);
492 
493  size_t num_motion_steps = 1;
494  Attribute *motion_keys = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
495  if (motion_blur && pointcloud->get_use_motion_blur() && motion_keys) {
496  num_motion_steps = pointcloud->get_motion_steps();
497  }
498 
499  const size_t num_aabbs = num_motion_steps * num_points;
500 
501  MTLResourceOptions storage_mode;
502  if (device.hasUnifiedMemory) {
503  storage_mode = MTLResourceStorageModeShared;
504  }
505  else {
506  storage_mode = MTLResourceStorageModeManaged;
507  }
508 
509  /* Allocate a GPU buffer for the AABB data and populate it */
510  id<MTLBuffer> aabbBuf = [device
511  newBufferWithLength:num_aabbs * sizeof(MTLAxisAlignedBoundingBox)
512  options:storage_mode];
513  MTLAxisAlignedBoundingBox *aabb_data = (MTLAxisAlignedBoundingBox *)[aabbBuf contents];
514 
515  /* Get AABBs for each motion step */
516  size_t center_step = (num_motion_steps - 1) / 2;
517  for (size_t step = 0; step < num_motion_steps; ++step) {
518  /* The center step for motion vertices is not stored in the attribute */
519  if (step != center_step) {
520  size_t attr_offset = (step > center_step) ? step - 1 : step;
521  points = motion_keys->data_float3() + attr_offset * num_points;
522  }
523 
524  for (size_t j = 0; j < num_points; ++j) {
525  const PointCloud::Point point = pointcloud->get_point(j);
527  point.bounds_grow(points, radius, bounds);
528 
529  const size_t index = step * num_points + j;
530  aabb_data[index].min = (MTLPackedFloat3 &)bounds.min;
531  aabb_data[index].max = (MTLPackedFloat3 &)bounds.max;
532  }
533  }
534 
535  if (storage_mode == MTLResourceStorageModeManaged) {
536  [aabbBuf didModifyRange:NSMakeRange(0, aabbBuf.length)];
537  }
538 
539 # if 0
540  for (size_t i=0; i<num_aabbs && i < 400; i++) {
541  MTLAxisAlignedBoundingBox& bb = aabb_data[i];
542  printf(" %d: %.1f,%.1f,%.1f -- %.1f,%.1f,%.1f\n", int(i), bb.min.x, bb.min.y, bb.min.z, bb.max.x, bb.max.y, bb.max.z);
543  }
544 # endif
545 
546  MTLAccelerationStructureGeometryDescriptor *geomDesc;
547  if (motion_blur) {
548  std::vector<MTLMotionKeyframeData *> aabb_ptrs;
549  aabb_ptrs.reserve(num_motion_steps);
550  for (size_t step = 0; step < num_motion_steps; ++step) {
551  MTLMotionKeyframeData *k = [MTLMotionKeyframeData data];
552  k.buffer = aabbBuf;
553  k.offset = step * num_points * sizeof(MTLAxisAlignedBoundingBox);
554  aabb_ptrs.push_back(k);
555  }
556 
557  MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor *geomDescMotion =
558  [MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor descriptor];
559  geomDescMotion.boundingBoxBuffers = [NSArray arrayWithObjects:aabb_ptrs.data()
560  count:aabb_ptrs.size()];
561  geomDescMotion.boundingBoxCount = num_points;
562  geomDescMotion.boundingBoxStride = sizeof(aabb_data[0]);
563  geomDescMotion.intersectionFunctionTableOffset = 2;
564 
565  /* Force a single any-hit call, so shadow record-all behavior works correctly */
566  /* (Match optix behavior: unsigned int build_flags =
567  * OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;) */
568  geomDescMotion.allowDuplicateIntersectionFunctionInvocation = false;
569  geomDescMotion.opaque = true;
570  geomDesc = geomDescMotion;
571  }
572  else {
573  MTLAccelerationStructureBoundingBoxGeometryDescriptor *geomDescNoMotion =
574  [MTLAccelerationStructureBoundingBoxGeometryDescriptor descriptor];
575  geomDescNoMotion.boundingBoxBuffer = aabbBuf;
576  geomDescNoMotion.boundingBoxBufferOffset = 0;
577  geomDescNoMotion.boundingBoxCount = int(num_aabbs);
578  geomDescNoMotion.boundingBoxStride = sizeof(aabb_data[0]);
579  geomDescNoMotion.intersectionFunctionTableOffset = 2;
580 
581  /* Force a single any-hit call, so shadow record-all behavior works correctly */
582  /* (Match optix behavior: unsigned int build_flags =
583  * OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;) */
584  geomDescNoMotion.allowDuplicateIntersectionFunctionInvocation = false;
585  geomDescNoMotion.opaque = true;
586  geomDesc = geomDescNoMotion;
587  }
588 
589  MTLPrimitiveAccelerationStructureDescriptor *accelDesc =
590  [MTLPrimitiveAccelerationStructureDescriptor descriptor];
591  accelDesc.geometryDescriptors = @[ geomDesc ];
592 
593  if (motion_blur) {
594  accelDesc.motionStartTime = 0.0f;
595  accelDesc.motionEndTime = 1.0f;
596  accelDesc.motionStartBorderMode = MTLMotionBorderModeVanish;
597  accelDesc.motionEndBorderMode = MTLMotionBorderModeVanish;
598  accelDesc.motionKeyframeCount = num_motion_steps;
599  }
600 
601  if (!use_fast_trace_bvh) {
602  accelDesc.usage |= (MTLAccelerationStructureUsageRefit |
603  MTLAccelerationStructureUsagePreferFastBuild);
604  }
605 
606  MTLAccelerationStructureSizes accelSizes = [device
607  accelerationStructureSizesWithDescriptor:accelDesc];
608  id<MTLAccelerationStructure> accel_uncompressed = [device
609  newAccelerationStructureWithSize:accelSizes.accelerationStructureSize];
610  id<MTLBuffer> scratchBuf = [device newBufferWithLength:accelSizes.buildScratchBufferSize
611  options:MTLResourceStorageModePrivate];
612  id<MTLBuffer> sizeBuf = [device newBufferWithLength:8 options:MTLResourceStorageModeShared];
613  id<MTLCommandBuffer> accelCommands = [queue commandBuffer];
614  id<MTLAccelerationStructureCommandEncoder> accelEnc =
615  [accelCommands accelerationStructureCommandEncoder];
616  if (refit) {
617  [accelEnc refitAccelerationStructure:accel_struct
618  descriptor:accelDesc
619  destination:accel_uncompressed
620  scratchBuffer:scratchBuf
621  scratchBufferOffset:0];
622  }
623  else {
624  [accelEnc buildAccelerationStructure:accel_uncompressed
625  descriptor:accelDesc
626  scratchBuffer:scratchBuf
627  scratchBufferOffset:0];
628  }
629  if (use_fast_trace_bvh) {
630  [accelEnc writeCompactedAccelerationStructureSize:accel_uncompressed
631  toBuffer:sizeBuf
632  offset:0
633  sizeDataType:MTLDataTypeULong];
634  }
635  [accelEnc endEncoding];
636  [accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
637  /* free temp resources */
638  [scratchBuf release];
639  [aabbBuf release];
640 
641  if (use_fast_trace_bvh) {
642  /* Compact the accel structure */
643  uint64_t compressed_size = *(uint64_t *)sizeBuf.contents;
644 
645  dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
646  id<MTLCommandBuffer> accelCommands = [queue commandBuffer];
647  id<MTLAccelerationStructureCommandEncoder> accelEnc =
648  [accelCommands accelerationStructureCommandEncoder];
649  id<MTLAccelerationStructure> accel = [device
650  newAccelerationStructureWithSize:compressed_size];
651  [accelEnc copyAndCompactAccelerationStructure:accel_uncompressed
652  toAccelerationStructure:accel];
653  [accelEnc endEncoding];
654  [accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
655  uint64_t allocated_size = [accel allocatedSize];
656  stats.mem_alloc(allocated_size);
657  accel_struct = accel;
658  [accel_uncompressed release];
659  accel_struct_building = false;
660  }];
661  [accelCommands commit];
662  });
663  }
664  else {
665  /* set our acceleration structure to the uncompressed structure */
666  accel_struct = accel_uncompressed;
667 
668  uint64_t allocated_size = [accel_struct allocatedSize];
669  stats.mem_alloc(allocated_size);
670  accel_struct_building = false;
671  }
672  [sizeBuf release];
673  }];
674 
675  accel_struct_building = true;
676  [accelCommands commit];
677  return true;
678  }
679  return false;
680 }
681 
682 bool BVHMetal::build_BLAS(Progress &progress,
683  id<MTLDevice> device,
684  id<MTLCommandQueue> queue,
685  bool refit)
686 {
687  if (@available(macos 12.0, *)) {
688  assert(objects.size() == 1 && geometry.size() == 1);
689 
690  /* Build bottom level acceleration structures (BLAS) */
691  Geometry *const geom = geometry[0];
692  switch (geom->geometry_type) {
693  case Geometry::VOLUME:
694  case Geometry::MESH:
695  return build_BLAS_mesh(progress, device, queue, geom, refit);
696  case Geometry::HAIR:
697  return build_BLAS_hair(progress, device, queue, geom, refit);
699  return build_BLAS_pointcloud(progress, device, queue, geom, refit);
700  default:
701  return false;
702  }
703  }
704  return false;
705 }
706 
707 bool BVHMetal::build_TLAS(Progress &progress,
708  id<MTLDevice> device,
709  id<MTLCommandQueue> queue,
710  bool refit)
711 {
712  if (@available(macos 12.0, *)) {
713 
714  /* we need to sync here and ensure that all BLAS have completed async generation by both GCD
715  * and Metal */
716  {
717  __block bool complete_bvh = false;
718  while (!complete_bvh) {
719  dispatch_sync(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
720  complete_bvh = true;
721  for (Object *ob : objects) {
722  /* Skip non-traceable objects */
723  if (!ob->is_traceable())
724  continue;
725 
726  Geometry const *geom = ob->get_geometry();
727  BVHMetal const *blas = static_cast<BVHMetal const *>(geom->bvh);
728  if (blas->accel_struct_building) {
729  complete_bvh = false;
730 
731  /* We're likely waiting on a command buffer that's in flight to complete.
732  * Queue up a command buffer and wait for it complete before checking the BLAS again
733  */
734  id<MTLCommandBuffer> command_buffer = [queue commandBuffer];
735  [command_buffer commit];
736  [command_buffer waitUntilCompleted];
737  break;
738  }
739  }
740  });
741  }
742  }
743 
744  uint32_t num_instances = 0;
745  uint32_t num_motion_transforms = 0;
746  for (Object *ob : objects) {
747  /* Skip non-traceable objects */
748  if (!ob->is_traceable())
749  continue;
750  num_instances++;
751 
752  if (ob->use_motion()) {
753  num_motion_transforms += max((size_t)1, ob->get_motion().size());
754  }
755  else {
756  num_motion_transforms++;
757  }
758  }
759 
760  if (num_instances == 0) {
761  return false;
762  }
763 
764  /*------------------------------------------------*/
765  BVH_status("Building TLAS | %7d instances", (int)num_instances);
766  /*------------------------------------------------*/
767 
768  const bool use_fast_trace_bvh = (params.bvh_type == BVH_TYPE_STATIC);
769 
770  NSMutableArray *all_blas = [NSMutableArray array];
771  unordered_map<BVHMetal const *, int> instance_mapping;
772 
773  /* Lambda function to build/retrieve the BLAS index mapping */
774  auto get_blas_index = [&](BVHMetal const *blas) {
775  auto it = instance_mapping.find(blas);
776  if (it != instance_mapping.end()) {
777  return it->second;
778  }
779  else {
780  int blas_index = (int)[all_blas count];
781  instance_mapping[blas] = blas_index;
782  if (@available(macos 12.0, *)) {
783  [all_blas addObject:blas->accel_struct];
784  }
785  return blas_index;
786  }
787  };
788 
789  MTLResourceOptions storage_mode;
790  if (device.hasUnifiedMemory) {
791  storage_mode = MTLResourceStorageModeShared;
792  }
793  else {
794  storage_mode = MTLResourceStorageModeManaged;
795  }
796 
797  size_t instance_size;
798  if (motion_blur) {
799  instance_size = sizeof(MTLAccelerationStructureMotionInstanceDescriptor);
800  }
801  else {
802  instance_size = sizeof(MTLAccelerationStructureUserIDInstanceDescriptor);
803  }
804 
805  /* Allocate a GPU buffer for the instance data and populate it */
806  id<MTLBuffer> instanceBuf = [device newBufferWithLength:num_instances * instance_size
807  options:storage_mode];
808  id<MTLBuffer> motion_transforms_buf = nil;
809  MTLPackedFloat4x3 *motion_transforms = nullptr;
810  if (motion_blur && num_motion_transforms) {
811  motion_transforms_buf = [device
812  newBufferWithLength:num_motion_transforms * sizeof(MTLPackedFloat4x3)
813  options:storage_mode];
814  motion_transforms = (MTLPackedFloat4x3 *)motion_transforms_buf.contents;
815  }
816 
817  uint32_t instance_index = 0;
818  uint32_t motion_transform_index = 0;
819  for (Object *ob : objects) {
820  /* Skip non-traceable objects */
821  if (!ob->is_traceable())
822  continue;
823 
824  Geometry const *geom = ob->get_geometry();
825 
826  BVHMetal const *blas = static_cast<BVHMetal const *>(geom->bvh);
827  uint32_t accel_struct_index = get_blas_index(blas);
828 
829  /* Add some of the object visibility bits to the mask.
830  * __prim_visibility contains the combined visibility bits of all instances, so is not
831  * reliable if they differ between instances.
832  *
833  * METAL_WIP: OptiX visibility mask can only contain 8 bits, so have to trade-off here
834  * and select just a few important ones.
835  */
836  uint32_t mask = ob->visibility_for_tracing() & 0xFF;
837 
838  /* Have to have at least one bit in the mask, or else instance would always be culled. */
839  if (0 == mask) {
840  mask = 0xFF;
841  }
842 
843  /* Set user instance ID to object index */
844  int object_index = ob->get_device_index();
845  uint32_t user_id = uint32_t(object_index);
846 
847  /* Bake into the appropriate descriptor */
848  if (motion_blur) {
849  MTLAccelerationStructureMotionInstanceDescriptor *instances =
850  (MTLAccelerationStructureMotionInstanceDescriptor *)[instanceBuf contents];
851  MTLAccelerationStructureMotionInstanceDescriptor &desc = instances[instance_index++];
852 
853  desc.accelerationStructureIndex = accel_struct_index;
854  desc.userID = user_id;
855  desc.mask = mask;
856  desc.motionStartTime = 0.0f;
857  desc.motionEndTime = 1.0f;
858  desc.motionTransformsStartIndex = motion_transform_index;
859  desc.motionStartBorderMode = MTLMotionBorderModeVanish;
860  desc.motionEndBorderMode = MTLMotionBorderModeVanish;
861  desc.intersectionFunctionTableOffset = 0;
862 
863  int key_count = ob->get_motion().size();
864  if (key_count) {
865  desc.motionTransformsCount = key_count;
866 
867  Transform *keys = ob->get_motion().data();
868  for (int i = 0; i < key_count; i++) {
869  float *t = (float *)&motion_transforms[motion_transform_index++];
870  /* Transpose transform */
871  auto src = (float const *)&keys[i];
872  for (int i = 0; i < 12; i++) {
873  t[i] = src[(i / 3) + 4 * (i % 3)];
874  }
875  }
876  }
877  else {
878  desc.motionTransformsCount = 1;
879 
880  float *t = (float *)&motion_transforms[motion_transform_index++];
881  if (ob->get_geometry()->is_instanced()) {
882  /* Transpose transform */
883  auto src = (float const *)&ob->get_tfm();
884  for (int i = 0; i < 12; i++) {
885  t[i] = src[(i / 3) + 4 * (i % 3)];
886  }
887  }
888  else {
889  /* Clear transform to identity matrix */
890  t[0] = t[4] = t[8] = 1.0f;
891  }
892  }
893  }
894  else {
895  MTLAccelerationStructureUserIDInstanceDescriptor *instances =
896  (MTLAccelerationStructureUserIDInstanceDescriptor *)[instanceBuf contents];
897  MTLAccelerationStructureUserIDInstanceDescriptor &desc = instances[instance_index++];
898 
899  desc.accelerationStructureIndex = accel_struct_index;
900  desc.userID = user_id;
901  desc.mask = mask;
902  desc.intersectionFunctionTableOffset = 0;
903 
904  float *t = (float *)&desc.transformationMatrix;
905  if (ob->get_geometry()->is_instanced()) {
906  /* Transpose transform */
907  auto src = (float const *)&ob->get_tfm();
908  for (int i = 0; i < 12; i++) {
909  t[i] = src[(i / 3) + 4 * (i % 3)];
910  }
911  }
912  else {
913  /* Clear transform to identity matrix */
914  t[0] = t[4] = t[8] = 1.0f;
915  }
916  }
917  }
918 
919  if (storage_mode == MTLResourceStorageModeManaged) {
920  [instanceBuf didModifyRange:NSMakeRange(0, instanceBuf.length)];
921  if (motion_transforms_buf) {
922  [motion_transforms_buf didModifyRange:NSMakeRange(0, motion_transforms_buf.length)];
923  assert(num_motion_transforms == motion_transform_index);
924  }
925  }
926 
927  MTLInstanceAccelerationStructureDescriptor *accelDesc =
928  [MTLInstanceAccelerationStructureDescriptor descriptor];
929  accelDesc.instanceCount = num_instances;
930  accelDesc.instanceDescriptorType = MTLAccelerationStructureInstanceDescriptorTypeUserID;
931  accelDesc.instanceDescriptorBuffer = instanceBuf;
932  accelDesc.instanceDescriptorBufferOffset = 0;
933  accelDesc.instanceDescriptorStride = instance_size;
934  accelDesc.instancedAccelerationStructures = all_blas;
935 
936  if (motion_blur) {
937  accelDesc.instanceDescriptorType = MTLAccelerationStructureInstanceDescriptorTypeMotion;
938  accelDesc.motionTransformBuffer = motion_transforms_buf;
939  accelDesc.motionTransformCount = num_motion_transforms;
940  }
941 
942  if (!use_fast_trace_bvh) {
943  accelDesc.usage |= (MTLAccelerationStructureUsageRefit |
944  MTLAccelerationStructureUsagePreferFastBuild);
945  }
946 
947  MTLAccelerationStructureSizes accelSizes = [device
948  accelerationStructureSizesWithDescriptor:accelDesc];
949  id<MTLAccelerationStructure> accel = [device
950  newAccelerationStructureWithSize:accelSizes.accelerationStructureSize];
951  id<MTLBuffer> scratchBuf = [device newBufferWithLength:accelSizes.buildScratchBufferSize
952  options:MTLResourceStorageModePrivate];
953  id<MTLCommandBuffer> accelCommands = [queue commandBuffer];
954  id<MTLAccelerationStructureCommandEncoder> accelEnc =
955  [accelCommands accelerationStructureCommandEncoder];
956  if (refit) {
957  [accelEnc refitAccelerationStructure:accel_struct
958  descriptor:accelDesc
959  destination:accel
960  scratchBuffer:scratchBuf
961  scratchBufferOffset:0];
962  }
963  else {
964  [accelEnc buildAccelerationStructure:accel
965  descriptor:accelDesc
966  scratchBuffer:scratchBuf
967  scratchBufferOffset:0];
968  }
969  [accelEnc endEncoding];
970  [accelCommands commit];
971  [accelCommands waitUntilCompleted];
972 
973  if (motion_transforms_buf) {
974  [motion_transforms_buf release];
975  }
976  [instanceBuf release];
977  [scratchBuf release];
978 
979  uint64_t allocated_size = [accel allocatedSize];
980  stats.mem_alloc(allocated_size);
981 
982  /* Cache top and bottom-level acceleration structs */
983  accel_struct = accel;
984  blas_array.clear();
985  blas_array.reserve(all_blas.count);
986  for (id<MTLAccelerationStructure> blas in all_blas) {
987  blas_array.push_back(blas);
988  }
989 
990  return true;
991  }
992  return false;
993 }
994 
995 bool BVHMetal::build(Progress &progress,
996  id<MTLDevice> device,
997  id<MTLCommandQueue> queue,
998  bool refit)
999 {
1000  if (@available(macos 12.0, *)) {
1001  if (refit && params.bvh_type != BVH_TYPE_STATIC) {
1002  assert(accel_struct);
1003  }
1004  else {
1005  if (accel_struct) {
1006  stats.mem_free(accel_struct.allocatedSize);
1007  [accel_struct release];
1008  accel_struct = nil;
1009  }
1010  }
1011  }
1012 
1013  if (!params.top_level) {
1014  return build_BLAS(progress, device, queue, refit);
1015  }
1016  else {
1017  return build_TLAS(progress, device, queue, refit);
1018  }
1019 }
1020 
1022 
1023 #endif /* WITH_METAL */
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
float float3[3]
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Sky Generate a procedural sky texture Noise Generate fractal Perlin noise Wave Generate procedural bands or rings with noise Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a point
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition: btDbvt.cpp:299
void refit(btStridingMeshInterface *triangles, const btVector3 &aabbMin, const btVector3 &aabbMax)
void build(btStridingMeshInterface *triangles, bool useQuantizedAabbCompression, const btVector3 &bvhAabbMin, const btVector3 &bvhAabbMax)
Attribute * find(ustring name) const
float3 * data_float3()
Definition: bvh/bvh.h:63
Type geometry_type
AttributeSet attributes
Definition: hair.h:13
Curve get_curve(size_t i) const
Definition: hair.h:109
size_t num_curves() const
Definition: hair.h:123
size_t num_segments() const
Definition: hair.h:128
size_t size() const
#define CCL_NAMESPACE_END
Definition: cuda/compat.h:9
CCL_NAMESPACE_BEGIN struct Options options
Curve curve
SyclQueue * queue
SyclQueue void void * src
static float verts[][3]
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
int count
ccl_gpu_kernel_postfix int ccl_global int ccl_global int * num_indices
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
@ ATTR_STD_MOTION_VERTEX_POSITION
Definition: kernel/types.h:624
descriptor
Definition: logImageCore.h:144
ccl_device_inline float4 mask(const int4 &mask, const float4 &a)
Definition: math_float4.h:513
Segment< FEdge *, Vec3r > segment
T length(const vec_base< T, Size > &a)
@ BVH_TYPE_STATIC
Definition: params.h:37
unsigned int uint32_t
Definition: stdint.h:80
unsigned __int64 uint64_t
Definition: stdint.h:90
@ empty
Definition: boundbox.h:35
size_t num_triangles() const
Definition: scene/mesh.h:79
ustring name
Definition: graph/node.h:174
Point get_point(int i) const
size_t num_points() const
float max