12 # include <CL/sycl.hpp>
21 static OneAPIErrorCallback s_error_cb =
nullptr;
22 static void *s_error_user_ptr =
nullptr;
24 static std::vector<sycl::device> oneapi_available_devices();
26 void oneapi_set_error_cb(OneAPIErrorCallback cb,
void *user_ptr)
29 s_error_user_ptr = user_ptr;
32 void oneapi_check_usm(SyclQueue *queue_,
const void *usm_ptr,
bool allow_host =
false)
36 sycl::info::device_type device_type =
37 queue->get_device().get_info<sycl::info::device::device_type>();
40 assert(usm_type == sycl::usm::alloc::device ||
41 ((device_type == sycl::info::device_type::host ||
42 device_type == sycl::info::device_type::is_cpu || allow_host) &&
43 usm_type == sycl::usm::alloc::host));
47 bool oneapi_create_queue(SyclQueue *&external_queue,
int device_index)
49 bool finished_correct =
true;
51 std::vector<sycl::device>
devices = oneapi_available_devices();
52 if (device_index < 0 || device_index >=
devices.size()) {
56 sycl::property::queue::in_order());
57 external_queue =
reinterpret_cast<SyclQueue *
>(created_queue);
59 catch (sycl::exception
const &
e) {
60 finished_correct =
false;
62 s_error_cb(
e.what(), s_error_user_ptr);
65 return finished_correct;
68 void oneapi_free_queue(SyclQueue *queue_)
75 void *oneapi_usm_aligned_alloc_host(SyclQueue *queue_,
size_t memory_size,
size_t alignment)
79 return sycl::aligned_alloc_host(alignment, memory_size, *
queue);
82 void *oneapi_usm_alloc_device(SyclQueue *queue_,
size_t memory_size)
86 return sycl::malloc_device(memory_size, *
queue);
89 void oneapi_usm_free(SyclQueue *queue_,
void *usm_ptr)
93 oneapi_check_usm(queue_, usm_ptr,
true);
97 bool oneapi_usm_memcpy(SyclQueue *queue_,
void *
dest,
void *
src,
size_t num_bytes)
101 oneapi_check_usm(queue_,
dest,
true);
102 oneapi_check_usm(queue_,
src,
true);
103 sycl::event mem_event =
queue->memcpy(
dest,
src, num_bytes);
104 # ifdef WITH_CYCLES_DEBUG
109 mem_event.wait_and_throw();
112 catch (sycl::exception
const &
e) {
114 s_error_cb(
e.what(), s_error_user_ptr);
121 bool from_device_to_host = dest_type == sycl::usm::alloc::host &&
122 src_type == sycl::usm::alloc::device;
123 bool host_or_device_memop_with_offset = dest_type == sycl::usm::alloc::unknown ||
124 src_type == sycl::usm::alloc::unknown;
128 if (from_device_to_host || host_or_device_memop_with_offset)
134 bool oneapi_usm_memset(SyclQueue *queue_,
void *usm_ptr,
unsigned char value,
size_t num_bytes)
138 oneapi_check_usm(queue_, usm_ptr,
true);
139 sycl::event mem_event =
queue->memset(usm_ptr, value, num_bytes);
140 # ifdef WITH_CYCLES_DEBUG
145 mem_event.wait_and_throw();
148 catch (sycl::exception
const &
e) {
150 s_error_cb(
e.what(), s_error_user_ptr);
160 bool oneapi_queue_synchronize(SyclQueue *queue_)
165 queue->wait_and_throw();
168 catch (sycl::exception
const &
e) {
170 s_error_cb(
e.what(), s_error_user_ptr);
178 bool oneapi_run_test_kernel(SyclQueue *queue_)
183 sycl::buffer<float, 1>
A(
N);
184 sycl::buffer<float, 1>
B(
N);
187 sycl::host_accessor A_host_acc(
A, sycl::write_only);
188 for (
size_t i = (
size_t)0; i <
N; i++)
189 A_host_acc[i] = rand() % 32;
193 queue->submit([&](sycl::handler &cgh) {
194 sycl::accessor A_acc(
A, cgh, sycl::read_only);
195 sycl::accessor B_acc(
B, cgh, sycl::write_only, sycl::no_init);
197 cgh.parallel_for(
N, [=](sycl::id<1> idx) { B_acc[idx] = A_acc[idx] + idx.get(0); });
199 queue->wait_and_throw();
201 sycl::host_accessor A_host_acc(
A, sycl::read_only);
202 sycl::host_accessor B_host_acc(
B, sycl::read_only);
204 for (
size_t i = (
size_t)0; i <
N; i++) {
205 float result = A_host_acc[i] + B_host_acc[i];
209 catch (sycl::exception
const &
e) {
211 s_error_cb(
e.what(), s_error_user_ptr);
219 bool oneapi_kernel_globals_size(SyclQueue *queue_,
size_t &kernel_global_size)
226 void oneapi_set_global_memory(SyclQueue *queue_,
229 void *memory_device_pointer)
234 assert(memory_device_pointer);
236 oneapi_check_usm(queue_, memory_device_pointer);
242 # define KERNEL_DATA_ARRAY(type, name) \
243 else if (#name == matched_name) \
245 globals->__##name = (type *)memory_device_pointer; \
250 else if (
"integrator_state" == matched_name) {
255 # include "kernel/data_arrays.h"
258 std::cerr <<
"Can't found global/constant memory with name \"" << matched_name <<
"\"!"
262 # undef KERNEL_DATA_ARRAY
267 size_t oneapi_kernel_preferred_local_size(SyclQueue *queue_,
269 const size_t kernel_global_size)
273 (
void)kernel_global_size;
274 const static size_t preferred_work_group_size_intersect_shading = 32;
275 const static size_t preferred_work_group_size_technical = 1024;
277 size_t preferred_work_group_size = 0;
292 preferred_work_group_size = preferred_work_group_size_intersect_shading;
307 preferred_work_group_size = preferred_work_group_size_technical;
311 preferred_work_group_size = 512;
314 const size_t limit_work_group_size =
315 queue->get_device().get_info<sycl::info::device::max_work_group_size>();
316 return std::min(limit_work_group_size, preferred_work_group_size);
319 bool oneapi_enqueue_kernel(KernelContext *kernel_context,
333 size_t local_size = oneapi_kernel_preferred_local_size(
334 kernel_context->queue, device_kernel,
global_size);
353 size_t groups_count = (
num_states + local_size - 1) / local_size;
358 # ifdef WITH_ONEAPI_SYCL_HOST_ENABLED
359 if (
queue->get_device().is_host()) {
368 # pragma warning(error : 4062)
369 # elif defined(__GNUC__)
370 # pragma GCC diagnostic push
371 # pragma GCC diagnostic error "-Wswitch"
375 queue->submit([&](sycl::handler &cgh) {
376 switch (device_kernel) {
378 oneapi_call(kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_reset);
383 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_init_from_camera);
388 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_init_from_bake);
393 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_intersect_closest);
398 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_intersect_shadow);
407 oneapi_kernel_integrator_intersect_subsurface);
416 oneapi_kernel_integrator_intersect_volume_stack);
421 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_shade_background);
426 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_shade_light);
431 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_shade_shadow);
436 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_shade_surface);
445 oneapi_kernel_integrator_shade_surface_raytrace);
450 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_shade_surface_mnee);
455 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_shade_volume);
460 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_queued_paths_array);
469 oneapi_kernel_integrator_queued_shadow_paths_array);
474 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_active_paths_array);
483 oneapi_kernel_integrator_terminated_paths_array);
492 oneapi_kernel_integrator_terminated_shadow_paths_array);
497 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_sorted_paths_array);
506 oneapi_kernel_integrator_compact_paths_array);
515 oneapi_kernel_integrator_compact_shadow_paths_array);
524 oneapi_kernel_adaptive_sampling_convergence_check);
529 kg, cgh,
global_size, local_size, args, oneapi_kernel_adaptive_sampling_filter_x);
534 kg, cgh,
global_size, local_size, args, oneapi_kernel_adaptive_sampling_filter_y);
538 oneapi_call(kg, cgh,
global_size, local_size, args, oneapi_kernel_shader_eval_displace);
543 kg, cgh,
global_size, local_size, args, oneapi_kernel_shader_eval_background);
552 oneapi_kernel_shader_eval_curve_shadow_transparency);
556 oneapi_call(kg, cgh,
global_size, local_size, args, oneapi_kernel_prefix_sum);
561 # define DEVICE_KERNEL_FILM_CONVERT_PARTIAL(VARIANT, variant) \
562 case DEVICE_KERNEL_FILM_CONVERT_##VARIANT: { \
563 oneapi_call(kg, cgh, \
567 oneapi_kernel_film_convert_##variant); \
571 # define DEVICE_KERNEL_FILM_CONVERT(variant, VARIANT) \
572 DEVICE_KERNEL_FILM_CONVERT_PARTIAL(VARIANT, variant) \
573 DEVICE_KERNEL_FILM_CONVERT_PARTIAL(VARIANT##_HALF_RGBA, variant##_half_rgba)
575 DEVICE_KERNEL_FILM_CONVERT(depth, DEPTH);
576 DEVICE_KERNEL_FILM_CONVERT(mist, MIST);
577 DEVICE_KERNEL_FILM_CONVERT(sample_count, SAMPLE_COUNT);
578 DEVICE_KERNEL_FILM_CONVERT(
float,
FLOAT);
579 DEVICE_KERNEL_FILM_CONVERT(light_path, LIGHT_PATH);
581 DEVICE_KERNEL_FILM_CONVERT(motion, MOTION);
582 DEVICE_KERNEL_FILM_CONVERT(cryptomatte, CRYPTOMATTE);
583 DEVICE_KERNEL_FILM_CONVERT(shadow_catcher, SHADOW_CATCHER);
584 DEVICE_KERNEL_FILM_CONVERT(shadow_catcher_matte_with_shadow,
585 SHADOW_CATCHER_MATTE_WITH_SHADOW);
586 DEVICE_KERNEL_FILM_CONVERT(combined, COMBINED);
589 # undef DEVICE_KERNEL_FILM_CONVERT
590 # undef DEVICE_KERNEL_FILM_CONVERT_PARTIAL
595 kg, cgh,
global_size, local_size, args, oneapi_kernel_filter_guiding_preprocess);
604 oneapi_kernel_filter_guiding_set_fake_albedo);
609 kg, cgh,
global_size, local_size, args, oneapi_kernel_filter_color_preprocess);
614 kg, cgh,
global_size, local_size, args, oneapi_kernel_filter_color_postprocess);
619 kg, cgh,
global_size, local_size, args, oneapi_kernel_cryptomatte_postprocess);
624 kg, cgh,
global_size, local_size, args, oneapi_kernel_integrator_compact_states);
633 oneapi_kernel_integrator_compact_shadow_states);
642 oneapi_kernel_integrator_shadow_catcher_count_possible_splits);
653 catch (sycl::exception
const &
e) {
655 s_error_cb(
e.what(), s_error_user_ptr);
661 # pragma warning(default : 4062)
662 # elif defined(__GNUC__)
663 # pragma GCC diagnostic pop
672 static const int lowest_supported_driver_version_win = 1013430;
673 static const int lowest_supported_driver_version_neo = 23904;
675 static int parse_driver_build_version(
const sycl::device &device)
677 const std::string &driver_version = device.get_info<sycl::info::device::driver_version>();
678 int driver_build_version = 0;
680 size_t second_dot_position = driver_version.find(
'.', driver_version.find(
'.') + 1);
681 if (second_dot_position == std::string::npos) {
682 std::cerr <<
"Unable to parse unknown Intel GPU driver version \"" << driver_version
683 <<
"\" does not match xx.xx.xxxxx (Linux), x.x.xxxx (L0),"
684 <<
" xx.xx.xxx.xxxx (Windows) for device \""
685 << device.get_info<sycl::info::device::name>() <<
"\"." << std::endl;
689 size_t third_dot_position = driver_version.find(
'.', second_dot_position + 1);
690 if (third_dot_position != std::string::npos) {
691 const std::string &third_number_substr = driver_version.substr(
692 second_dot_position + 1, third_dot_position - second_dot_position - 1);
693 const std::string &forth_number_substr = driver_version.substr(third_dot_position + 1);
694 if (third_number_substr.length() == 3 && forth_number_substr.length() == 4)
695 driver_build_version = std::stoi(third_number_substr) * 10000 +
696 std::stoi(forth_number_substr);
699 const std::string &third_number_substr = driver_version.substr(second_dot_position + 1);
700 driver_build_version = std::stoi(third_number_substr);
703 catch (std::invalid_argument &
e) {
704 std::cerr <<
"Unable to parse unknown Intel GPU driver version \"" << driver_version
705 <<
"\" does not match xx.xx.xxxxx (Linux), x.x.xxxx (L0),"
706 <<
" xx.xx.xxx.xxxx (Windows) for device \""
707 << device.get_info<sycl::info::device::name>() <<
"\"." << std::endl;
711 return driver_build_version;
714 static std::vector<sycl::device> oneapi_available_devices()
716 bool allow_all_devices =
false;
717 if (getenv(
"CYCLES_ONEAPI_ALL_DEVICES") !=
nullptr)
718 allow_all_devices =
true;
722 # ifdef WITH_ONEAPI_SYCL_HOST_ENABLED
723 bool allow_host =
true;
725 bool allow_host =
false;
728 const std::vector<sycl::platform> &oneapi_platforms = sycl::platform::get_platforms();
730 std::vector<sycl::device> available_devices;
731 for (
const sycl::platform &platform : oneapi_platforms) {
738 const std::vector<sycl::device> &oneapi_devices =
740 platform.get_devices(sycl::info::device_type::gpu);
742 for (
const sycl::device &device : oneapi_devices) {
743 if (allow_all_devices) {
745 if (allow_host || !device.is_host()) {
746 available_devices.push_back(device);
750 bool filter_out =
false;
755 if (device.is_gpu() && platform.get_backend() == sycl::backend::ext_oneapi_level_zero) {
758 int number_of_eus = 96;
759 int threads_per_eu = 7;
760 if (device.has(sycl::aspect::ext_intel_gpu_eu_count)) {
761 number_of_eus = device.get_info<sycl::info::device::ext_intel_gpu_eu_count>();
763 if (device.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
765 device.get_info<sycl::info::device::ext_intel_gpu_hw_threads_per_eu>();
768 if (number_of_eus <= 96 && threads_per_eu == 7) {
773 int driver_build_version = parse_driver_build_version(device);
774 if ((driver_build_version > 100000 &&
775 driver_build_version < lowest_supported_driver_version_win) ||
776 driver_build_version < lowest_supported_driver_version_neo) {
781 else if (!allow_host && device.is_host()) {
784 else if (!allow_all_devices) {
789 available_devices.push_back(device);
795 return available_devices;
798 char *oneapi_device_capabilities()
800 std::stringstream capabilities;
802 const std::vector<sycl::device> &oneapi_devices = oneapi_available_devices();
803 for (
const sycl::device &device : oneapi_devices) {
804 const std::string &name = device.get_info<sycl::info::device::name>();
806 capabilities << std::string(
"\t") << name <<
"\n";
807 # define WRITE_ATTR(attribute_name, attribute_variable) \
808 capabilities << "\t\tsycl::info::device::" #attribute_name "\t\t\t" << attribute_variable \
810 # define GET_NUM_ATTR(attribute) \
812 size_t attribute = (size_t)device.get_info<sycl::info::device ::attribute>(); \
813 capabilities << "\t\tsycl::info::device::" #attribute "\t\t\t" << attribute << "\n"; \
816 GET_NUM_ATTR(vendor_id)
817 GET_NUM_ATTR(max_compute_units)
818 GET_NUM_ATTR(max_work_item_dimensions)
820 sycl::id<3> max_work_item_sizes =
821 device.get_info<sycl::info::device::max_work_item_sizes<3>>();
822 WRITE_ATTR(
"max_work_item_sizes_dim0", ((
size_t)max_work_item_sizes.get(0)))
823 WRITE_ATTR("max_work_item_sizes_dim1", ((
size_t)max_work_item_sizes.get(1)))
824 WRITE_ATTR("max_work_item_sizes_dim2", ((
size_t)max_work_item_sizes.get(2)))
826 GET_NUM_ATTR(max_work_group_size)
827 GET_NUM_ATTR(max_num_sub_groups)
828 GET_NUM_ATTR(sub_group_independent_forward_progress)
830 GET_NUM_ATTR(preferred_vector_width_char)
831 GET_NUM_ATTR(preferred_vector_width_short)
832 GET_NUM_ATTR(preferred_vector_width_int)
833 GET_NUM_ATTR(preferred_vector_width_long)
834 GET_NUM_ATTR(preferred_vector_width_float)
835 GET_NUM_ATTR(preferred_vector_width_double)
836 GET_NUM_ATTR(preferred_vector_width_half)
838 GET_NUM_ATTR(native_vector_width_char)
839 GET_NUM_ATTR(native_vector_width_short)
840 GET_NUM_ATTR(native_vector_width_int)
841 GET_NUM_ATTR(native_vector_width_long)
842 GET_NUM_ATTR(native_vector_width_float)
843 GET_NUM_ATTR(native_vector_width_double)
844 GET_NUM_ATTR(native_vector_width_half)
846 size_t max_clock_frequency =
847 (
size_t)(device.is_host() ? (
size_t)0 :
848 device.get_info<sycl::info::device::max_clock_frequency>());
849 WRITE_ATTR("max_clock_frequency", max_clock_frequency)
851 GET_NUM_ATTR(address_bits)
852 GET_NUM_ATTR(max_mem_alloc_size)
857 bool image_support = false;
858 WRITE_ATTR("image_support", (
size_t)image_support)
860 GET_NUM_ATTR(max_parameter_size)
861 GET_NUM_ATTR(mem_base_addr_align)
862 GET_NUM_ATTR(global_mem_size)
863 GET_NUM_ATTR(local_mem_size)
864 GET_NUM_ATTR(error_correction_support)
865 GET_NUM_ATTR(profiling_timer_resolution)
866 GET_NUM_ATTR(is_available)
870 capabilities <<
"\n";
873 return ::strdup(capabilities.str().c_str());
876 void oneapi_free(
void *p)
883 void oneapi_iterate_devices(OneAPIDeviceIteratorCallback cb,
void *user_ptr)
886 std::vector<sycl::device>
devices = oneapi_available_devices();
887 for (sycl::device &device :
devices) {
888 const std::string &platform_name =
889 device.get_platform().get_info<sycl::info::platform::name>();
890 std::string name = device.get_info<sycl::info::device::name>();
891 std::string
id =
"ONEAPI_" + platform_name +
"_" + name;
892 if (device.has(sycl::aspect::ext_intel_pci_address)) {
893 id.append(
"_" + device.get_info<sycl::info::device::ext_intel_pci_address>());
895 (cb)(
id.c_str(), name.c_str(), num, user_ptr);
900 size_t oneapi_get_memcapacity(SyclQueue *
queue)
904 .get_info<sycl::info::device::global_mem_size>();
907 int oneapi_get_num_multiprocessors(SyclQueue *
queue)
909 const sycl::device &device =
reinterpret_cast<sycl::queue *
>(
queue)->get_device();
910 if (device.has(sycl::aspect::ext_intel_gpu_eu_count)) {
911 return device.get_info<sycl::info::device::ext_intel_gpu_eu_count>();
917 int oneapi_get_max_num_threads_per_multiprocessor(SyclQueue *
queue)
919 const sycl::device &device =
reinterpret_cast<sycl::queue *
>(
queue)->get_device();
920 if (device.has(sycl::aspect::ext_intel_gpu_eu_simd_width) &&
921 device.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
922 return device.get_info<sycl::info::device::ext_intel_gpu_eu_simd_width>() *
923 device.get_info<sycl::info::device::ext_intel_gpu_hw_threads_per_eu>();
void BLI_kdtree_nd_() free(KDTree *tree)
__forceinline bool all(const avxb &b)
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
static PointerRNA * get_pointer_type(ButsContextPath *path, StructRNA *type)
#define kernel_assert(cond)
#define KERNEL_DATA_ARRAY(type, name)
SyclQueue void void size_t num_bytes SyclQueue void const char void *memory_device_pointer KernelContext int kernel
SyclQueue void void size_t num_bytes SyclQueue void const char * memory_name
SyclQueue void void * src
SyclQueue void void size_t num_bytes void
SyclQueue void void size_t num_bytes SyclQueue void const char void *memory_device_pointer KernelContext int size_t global_size
SyclQueue void void size_t num_bytes SyclQueue void * kernel_globals
@ DEVICE_KERNEL_ADAPTIVE_SAMPLING_CONVERGENCE_CHECK
@ DEVICE_KERNEL_INTEGRATOR_RESET
@ DEVICE_KERNEL_INTEGRATOR_QUEUED_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_SHADE_LIGHT
@ DEVICE_KERNEL_FILTER_COLOR_PREPROCESS
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE
@ DEVICE_KERNEL_SHADER_EVAL_DISPLACE
@ DEVICE_KERNEL_INTEGRATOR_SHADOW_CATCHER_COUNT_POSSIBLE_SPLITS
@ DEVICE_KERNEL_INTEGRATOR_INTERSECT_SUBSURFACE
@ DEVICE_KERNEL_INTEGRATOR_QUEUED_SHADOW_PATHS_ARRAY
@ DEVICE_KERNEL_FILTER_GUIDING_SET_FAKE_ALBEDO
@ DEVICE_KERNEL_FILTER_COLOR_POSTPROCESS
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SHADOW
@ DEVICE_KERNEL_INTEGRATOR_TERMINATED_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_INTERSECT_VOLUME_STACK
@ DEVICE_KERNEL_SHADER_EVAL_BACKGROUND
@ DEVICE_KERNEL_INTEGRATOR_SORTED_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_STATES
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE
@ DEVICE_KERNEL_FILTER_GUIDING_PREPROCESS
@ DEVICE_KERNEL_INTEGRATOR_COMPACT_STATES
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE
@ DEVICE_KERNEL_INTEGRATOR_TERMINATED_SHADOW_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL
@ DEVICE_KERNEL_INTEGRATOR_ACTIVE_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_INIT_FROM_CAMERA
@ DEVICE_KERNEL_ADAPTIVE_SAMPLING_CONVERGENCE_FILTER_Y
@ DEVICE_KERNEL_ADAPTIVE_SAMPLING_CONVERGENCE_FILTER_X
@ DEVICE_KERNEL_CRYPTOMATTE_POSTPROCESS
@ DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY
@ DEVICE_KERNEL_INTEGRATOR_INIT_FROM_BAKE
@ DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY
@ DEVICE_KERNEL_INTEGRATOR_SHADE_VOLUME
@ DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW
@ DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST
@ DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND
@ DEVICE_KERNEL_PREFIX_SUM
Vector< CPUDevice > devices
list of all CPUDevices. for every hardware thread an instance of CPUDevice is created
struct blender::compositor::@179::@182 opencl
#define GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE
IntegratorStateGPU * integrator_state