Blender  V3.3
mtl_texture.mm
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
7 #include "BKE_global.h"
8 
9 #include "DNA_userdef_types.h"
10 
11 #include "GPU_batch.h"
12 #include "GPU_batch_presets.h"
13 #include "GPU_capabilities.h"
14 #include "GPU_framebuffer.h"
15 #include "GPU_platform.h"
16 #include "GPU_state.h"
17 
18 #include "mtl_backend.hh"
19 #include "mtl_common.hh"
20 #include "mtl_context.hh"
21 #include "mtl_debug.hh"
22 #include "mtl_texture.hh"
23 
24 #include "GHOST_C-api.h"
25 
26 namespace blender::gpu {
27 
28 /* -------------------------------------------------------------------- */
32 void gpu::MTLTexture::mtl_texture_init()
33 {
34  BLI_assert(MTLContext::get() != nullptr);
35 
36  /* Status. */
37  is_baked_ = false;
38  is_dirty_ = false;
39  resource_mode_ = MTL_TEXTURE_MODE_DEFAULT;
40  mtl_max_mips_ = 1;
41 
42  /* Metal properties. */
43  texture_ = nil;
44  texture_buffer_ = nil;
45  mip_swizzle_view_ = nil;
46 
47  /* Binding information. */
48  is_bound_ = false;
49 
50  /* VBO. */
51  vert_buffer_ = nullptr;
52  vert_buffer_mtl_ = nil;
53  vert_buffer_offset_ = -1;
54 
55  /* Default Swizzle. */
56  tex_swizzle_mask_[0] = 'r';
57  tex_swizzle_mask_[1] = 'g';
58  tex_swizzle_mask_[2] = 'b';
59  tex_swizzle_mask_[3] = 'a';
60  mtl_swizzle_mask_ = MTLTextureSwizzleChannelsMake(
61  MTLTextureSwizzleRed, MTLTextureSwizzleGreen, MTLTextureSwizzleBlue, MTLTextureSwizzleAlpha);
62 
63  /* TODO(Metal): Find a way of specifying texture usage externally. */
65 }
66 
67 gpu::MTLTexture::MTLTexture(const char *name) : Texture(name)
68 {
69  /* Common Initialization. */
70  mtl_texture_init();
71 }
72 
73 gpu::MTLTexture::MTLTexture(const char *name,
76  id<MTLTexture> metal_texture)
77  : Texture(name)
78 {
79  /* Common Initialization. */
80  mtl_texture_init();
81 
82  /* Prep texture from METAL handle. */
83  BLI_assert(metal_texture != nil);
85  type_ = type;
86  init_2D(metal_texture.width, metal_texture.height, 0, 1, format);
87 
88  /* Assign MTLTexture. */
89  texture_ = metal_texture;
90  [texture_ retain];
91 
92  /* Flag as Baked. */
93  is_baked_ = true;
94  is_dirty_ = false;
95  resource_mode_ = MTL_TEXTURE_MODE_EXTERNAL;
96 }
97 
99 {
100  /* Unbind if bound. */
101  if (is_bound_) {
102  MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
103  if (ctx != nullptr) {
104  ctx->state_manager->texture_unbind(this);
105  }
106  }
107 
108  /* Free memory. */
109  this->reset();
110 }
111 
114 /* -------------------------------------------------------------------- */
115 void gpu::MTLTexture::bake_mip_swizzle_view()
116 {
117  if (texture_view_dirty_flags_) {
118  /* if a texture view was previously created we release it. */
119  if (mip_swizzle_view_ != nil) {
120  [mip_swizzle_view_ release];
121  mip_swizzle_view_ = nil;
122  }
123 
124  /* Determine num slices */
125  int num_slices = 1;
126  switch (type_) {
128  num_slices = h_;
129  break;
131  num_slices = d_;
132  break;
133  case GPU_TEXTURE_CUBE:
134  num_slices = 6;
135  break;
137  /* d_ is equal to array levels * 6, including face count. */
138  num_slices = d_;
139  break;
140  default:
141  num_slices = 1;
142  break;
143  }
144 
145  int range_len = min_ii((mip_texture_max_level_ - mip_texture_base_level_) + 1,
146  texture_.mipmapLevelCount);
147  BLI_assert(range_len > 0);
148  BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount);
149  BLI_assert(mip_texture_base_layer_ < num_slices);
150  mip_swizzle_view_ = [texture_
151  newTextureViewWithPixelFormat:texture_.pixelFormat
152  textureType:texture_.textureType
153  levels:NSMakeRange(mip_texture_base_level_, range_len)
154  slices:NSMakeRange(mip_texture_base_layer_, num_slices)
155  swizzle:mtl_swizzle_mask_];
156  MTL_LOG_INFO(
157  "Updating texture view - MIP TEXTURE BASE LEVEL: %d, MAX LEVEL: %d (Range len: %d)\n",
158  mip_texture_base_level_,
159  min_ii(mip_texture_max_level_, texture_.mipmapLevelCount),
160  range_len);
161  mip_swizzle_view_.label = [texture_ label];
162  texture_view_dirty_flags_ = TEXTURE_VIEW_NOT_DIRTY;
163  }
164 }
165 
169 id<MTLTexture> gpu::MTLTexture::get_metal_handle()
170 {
171 
172  /* ensure up to date and baked. */
173  this->ensure_baked();
174 
175  /* Verify VBO texture shares same buffer. */
176  if (resource_mode_ == MTL_TEXTURE_MODE_VBO) {
177  int r_offset = -1;
178 
179  /* TODO(Metal): Fetch buffer from MTLVertBuf when implemented. */
180  id<MTLBuffer> buf = nil; /*vert_buffer_->get_metal_buffer(&r_offset);*/
181  BLI_assert(vert_buffer_mtl_ != nil);
182  BLI_assert(buf == vert_buffer_mtl_ && r_offset == vert_buffer_offset_);
183 
184  UNUSED_VARS(buf);
185  UNUSED_VARS_NDEBUG(r_offset);
186  }
187 
188  if (is_baked_) {
189  /* For explicit texture views, ensure we always return the texture view. */
190  if (resource_mode_ == MTL_TEXTURE_MODE_TEXTURE_VIEW) {
191  BLI_assert(mip_swizzle_view_ && "Texture view should always have a valid handle.");
192  }
193 
194  if (mip_swizzle_view_ != nil || texture_view_dirty_flags_) {
195  bake_mip_swizzle_view();
196  return mip_swizzle_view_;
197  }
198  return texture_;
199  }
200  return nil;
201 }
202 
203 id<MTLTexture> gpu::MTLTexture::get_metal_handle_base()
204 {
205 
206  /* ensure up to date and baked. */
207  this->ensure_baked();
208 
209  /* For explicit texture views, always return the texture view. */
210  if (resource_mode_ == MTL_TEXTURE_MODE_TEXTURE_VIEW) {
211  BLI_assert(mip_swizzle_view_ && "Texture view should always have a valid handle.");
212  if (mip_swizzle_view_ != nil || texture_view_dirty_flags_) {
213  bake_mip_swizzle_view();
214  }
215  return mip_swizzle_view_;
216  }
217 
218  /* Return base handle. */
219  if (is_baked_) {
220  return texture_;
221  }
222  return nil;
223 }
224 
225 void gpu::MTLTexture::blit(id<MTLBlitCommandEncoder> blit_encoder,
226  uint src_x_offset,
227  uint src_y_offset,
228  uint src_z_offset,
229  uint src_slice,
230  uint src_mip,
231  gpu::MTLTexture *dest,
232  uint dst_x_offset,
233  uint dst_y_offset,
234  uint dst_z_offset,
235  uint dst_slice,
236  uint dst_mip,
237  uint width,
238  uint height,
239  uint depth)
240 {
241 
242  BLI_assert(this && dest);
243  BLI_assert(width > 0 && height > 0 && depth > 0);
244  MTLSize src_size = MTLSizeMake(width, height, depth);
245  MTLOrigin src_origin = MTLOriginMake(src_x_offset, src_y_offset, src_z_offset);
246  MTLOrigin dst_origin = MTLOriginMake(dst_x_offset, dst_y_offset, dst_z_offset);
247 
248  if (this->format_get() != dest->format_get()) {
250  "[Warning] gpu::MTLTexture: Cannot copy between two textures of different types using a "
251  "blit encoder. TODO: Support this operation\n");
252  return;
253  }
254 
255  /* TODO(Metal): Verify if we want to use the one with modified base-level/texture view
256  * or not. */
257  [blit_encoder copyFromTexture:this->get_metal_handle_base()
258  sourceSlice:src_slice
259  sourceLevel:src_mip
260  sourceOrigin:src_origin
261  sourceSize:src_size
262  toTexture:dest->get_metal_handle_base()
263  destinationSlice:dst_slice
264  destinationLevel:dst_mip
265  destinationOrigin:dst_origin];
266 }
267 
268 void gpu::MTLTexture::blit(gpu::MTLTexture *dst,
269  uint src_x_offset,
270  uint src_y_offset,
271  uint dst_x_offset,
272  uint dst_y_offset,
273  uint src_mip,
274  uint dst_mip,
275  uint dst_slice,
276  int width,
277  int height)
278 {
279  BLI_assert(this->type_get() == dst->type_get());
280 
281  GPUShader *shader = fullscreen_blit_sh_get();
282  BLI_assert(shader != nullptr);
284 
285  /* Fetch restore framebuffer and blit target framebuffer from destination texture. */
287  GPUFrameBuffer *blit_target_fb = dst->get_blit_framebuffer(dst_slice, dst_mip);
288  BLI_assert(blit_target_fb);
289  GPU_framebuffer_bind(blit_target_fb);
290 
291  /* Execute graphics draw call to perform the blit. */
293 
294  GPU_batch_set_shader(quad, shader);
295 
296  float w = dst->width_get();
297  float h = dst->height_get();
298 
299  GPU_shader_uniform_2f(shader, "fullscreen", w, h);
300  GPU_shader_uniform_2f(shader, "src_offset", src_x_offset, src_y_offset);
301  GPU_shader_uniform_2f(shader, "dst_offset", dst_x_offset, dst_y_offset);
302  GPU_shader_uniform_2f(shader, "size", width, height);
303 
304  GPU_shader_uniform_1i(shader, "mip", src_mip);
305  GPU_batch_texture_bind(quad, "imageTexture", wrap(this));
306 
307  /* Caching previous pipeline state. */
308  bool depth_write_prev = GPU_depth_mask_get();
309  uint stencil_mask_prev = GPU_stencil_mask_get();
310  eGPUStencilTest stencil_test_prev = GPU_stencil_test_get();
311  eGPUFaceCullTest culling_test_prev = GPU_face_culling_get();
312  eGPUBlend blend_prev = GPU_blend_get();
313  eGPUDepthTest depth_test_prev = GPU_depth_test_get();
314  GPU_scissor_test(false);
315 
316  /* Apply state for blit draw call. */
321  GPU_depth_mask(false);
324 
326 
327  /* restoring old pipeline state. */
328  GPU_depth_mask(depth_write_prev);
329  GPU_stencil_write_mask_set(stencil_mask_prev);
330  GPU_stencil_test(stencil_test_prev);
331  GPU_face_culling(culling_test_prev);
332  GPU_depth_mask(depth_write_prev);
333  GPU_blend(blend_prev);
334  GPU_depth_test(depth_test_prev);
335 
336  if (restore_fb != nullptr) {
337  GPU_framebuffer_bind(restore_fb);
338  }
339  else {
341  }
342 }
343 
344 GPUFrameBuffer *gpu::MTLTexture::get_blit_framebuffer(uint dst_slice, uint dst_mip)
345 {
346 
347  /* Check if layer has changed. */
348  bool update_attachments = false;
349  if (!blit_fb_) {
350  blit_fb_ = GPU_framebuffer_create("gpu_blit");
351  update_attachments = true;
352  }
353 
354  /* Check if current blit FB has the correct attachment properties. */
355  if (blit_fb_) {
356  if (blit_fb_slice_ != dst_slice || blit_fb_mip_ != dst_mip) {
357  update_attachments = true;
358  }
359  }
360 
361  if (update_attachments) {
362  if (format_flag_ & GPU_FORMAT_DEPTH || format_flag_ & GPU_FORMAT_STENCIL) {
363  /* DEPTH TEX */
364  GPU_framebuffer_ensure_config(
365  &blit_fb_,
366  {GPU_ATTACHMENT_TEXTURE_LAYER_MIP(wrap(static_cast<Texture *>(this)),
367  static_cast<int>(dst_slice),
368  static_cast<int>(dst_mip)),
369  GPU_ATTACHMENT_NONE});
370  }
371  else {
372  /* COLOR TEX */
373  GPU_framebuffer_ensure_config(
374  &blit_fb_,
375  {GPU_ATTACHMENT_NONE,
376  GPU_ATTACHMENT_TEXTURE_LAYER_MIP(wrap(static_cast<Texture *>(this)),
377  static_cast<int>(dst_slice),
378  static_cast<int>(dst_mip))});
379  }
380  blit_fb_slice_ = dst_slice;
381  blit_fb_mip_ = dst_mip;
382  }
383 
384  BLI_assert(blit_fb_);
385  return blit_fb_;
386 }
387 
388 MTLSamplerState gpu::MTLTexture::get_sampler_state()
389 {
390  MTLSamplerState sampler_state;
391  sampler_state.state = this->sampler_state;
392  /* Add more parameters as needed */
393  return sampler_state;
394 }
395 
397  int mip, int offset[3], int extent[3], eGPUDataFormat type, const void *data)
398 {
399  /* Fetch active context. */
400  MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
401  BLI_assert(ctx);
402 
403  /* Do not update texture view. */
404  BLI_assert(resource_mode_ != MTL_TEXTURE_MODE_TEXTURE_VIEW);
405 
406  /* Ensure mipmaps. */
407  this->ensure_mipmaps(mip);
408 
409  /* Ensure texture is baked. */
410  this->ensure_baked();
411 
412  /* Safety checks. */
413 #if TRUST_NO_ONE
414  BLI_assert(mip >= mip_min_ && mip <= mip_max_);
415  BLI_assert(mip < texture_.mipmapLevelCount);
416  BLI_assert(texture_.mipmapLevelCount >= mip_max_);
417 #endif
418 
419  /* DEPTH FLAG - Depth formats cannot use direct BLIT - pass off to their own routine which will
420  * do a depth-only render. */
421  bool is_depth_format = (format_flag_ & GPU_FORMAT_DEPTH);
422  if (is_depth_format) {
423  switch (type_) {
424 
425  case GPU_TEXTURE_2D: {
426  update_sub_depth_2d(mip, offset, extent, type, data);
427  return;
428  }
429  default:
431  "[Error] gpu::MTLTexture::update_sub not yet supported for other depth "
432  "configurations\n");
433  return;
434  return;
435  }
436  }
437 
438  @autoreleasepool {
439  /* Determine totalsize of INPUT Data. */
440  int num_channels = to_component_len(format_);
441  int input_bytes_per_pixel = num_channels * to_bytesize(type);
442  int totalsize = 0;
443 
444  /* If unpack row length is used, size of input data uses the unpack row length, rather than the
445  * image length. */
446  int expected_update_w = ((ctx->pipeline_state.unpack_row_length == 0) ?
447  extent[0] :
449 
450  /* Ensure calculated total size isn't larger than remaining image data size */
451  switch (this->dimensions_count()) {
452  case 1:
453  totalsize = input_bytes_per_pixel * max_ii(expected_update_w, 1);
454  break;
455  case 2:
456  totalsize = input_bytes_per_pixel * max_ii(expected_update_w, 1) * max_ii(extent[1], 1);
457  break;
458  case 3:
459  totalsize = input_bytes_per_pixel * max_ii(expected_update_w, 1) * max_ii(extent[1], 1) *
460  max_ii(extent[2], 1);
461  break;
462  default:
463  BLI_assert(false);
464  break;
465  }
466 
467  /* When unpack row length is used, provided data does not necessarily contain padding for last
468  * row, so we only include up to the end of updated data. */
469  if (ctx->pipeline_state.unpack_row_length > 0) {
470  BLI_assert(ctx->pipeline_state.unpack_row_length >= extent[0]);
471  totalsize -= (ctx->pipeline_state.unpack_row_length - extent[0]) * input_bytes_per_pixel;
472  }
473 
474  /* Check */
475  BLI_assert(totalsize > 0);
476 
477  /* Determine expected destination data size. */
478  MTLPixelFormat destination_format = gpu_texture_format_to_metal(format_);
479  int expected_dst_bytes_per_pixel = get_mtl_format_bytesize(destination_format);
480  int destination_num_channels = get_mtl_format_num_components(destination_format);
481 
482  /* Prepare specialisation struct (For texture update routine). */
483  TextureUpdateRoutineSpecialisation compute_specialisation_kernel = {
484  tex_data_format_to_msl_type_str(type), /* INPUT DATA FORMAT */
485  tex_data_format_to_msl_texture_template_type(type), /* TEXTURE DATA FORMAT */
486  num_channels,
487  destination_num_channels};
488 
489  /* Determine whether we can do direct BLIT or not. */
490  bool can_use_direct_blit = true;
491  if (expected_dst_bytes_per_pixel != input_bytes_per_pixel ||
492  num_channels != destination_num_channels) {
493  can_use_direct_blit = false;
494  }
495 
496  if (is_depth_format) {
497  if (type_ == GPU_TEXTURE_2D || type_ == GPU_TEXTURE_2D_ARRAY) {
498  /* Workaround for crash in validation layer when blitting to depth2D target with
499  * dimensions (1, 1, 1); */
500  if (extent[0] == 1 && extent[1] == 1 && extent[2] == 1 && totalsize == 4) {
501  can_use_direct_blit = false;
502  }
503  }
504  }
505 
506  if (format_ == GPU_SRGB8_A8 && !can_use_direct_blit) {
508  "SRGB data upload does not work correctly using compute upload. "
509  "texname '%s'\n",
510  name_);
511  }
512 
513  /* Safety Checks. */
515  BLI_assert(can_use_direct_blit &&
516  "Special input data type must be a 1-1 mapping with destination texture as it "
517  "cannot easily be split");
518  }
519 
520  /* Debug and verification. */
521  if (!can_use_direct_blit) {
523  "gpu::MTLTexture::update_sub supplied bpp is %d bytes (%d components per "
524  "pixel), but backing texture bpp is %d bytes (%d components per pixel) "
525  "(TODO(Metal): Channel Conversion needed) (w: %d, h: %d, d: %d)\n",
526  input_bytes_per_pixel,
527  num_channels,
528  expected_dst_bytes_per_pixel,
529  destination_num_channels,
530  w_,
531  h_,
532  d_);
533 
534  /* Check mip compatibility. */
535  if (mip != 0) {
537  "[Error]: Updating texture layers other than mip=0 when data is mismatched is not "
538  "possible in METAL on macOS using texture->write\n");
539  return;
540  }
541 
542  /* Check Format write-ability. */
543  if (mtl_format_get_writeable_view_format(destination_format) == MTLPixelFormatInvalid) {
545  "[Error]: Updating texture -- destination MTLPixelFormat '%d' does not support write "
546  "operations, and no suitable TextureView format exists.\n",
547  *(int *)(&destination_format));
548  return;
549  }
550  }
551 
552  /* Prepare staging buffer for data. */
553  id<MTLBuffer> staging_buffer = nil;
554  uint64_t staging_buffer_offset = 0;
555 
556  /* Fetch allocation from scratch buffer. */
557  MTLTemporaryBuffer allocation =
559  memcpy(allocation.data, data, totalsize);
560  staging_buffer = allocation.metal_buffer;
561  staging_buffer_offset = allocation.buffer_offset;
562 
563  /* Common Properties. */
564  MTLPixelFormat compatible_write_format = mtl_format_get_writeable_view_format(
565  destination_format);
566 
567  /* Some texture formats are not writeable so we need to use a texture view. */
568  if (compatible_write_format == MTLPixelFormatInvalid) {
569  MTL_LOG_ERROR("Cannot use compute update blit with texture-view format: %d\n",
570  *((int *)&compatible_write_format));
571  return;
572  }
573  id<MTLTexture> texture_handle = ((compatible_write_format == destination_format)) ?
574  texture_ :
575  [texture_
576  newTextureViewWithPixelFormat:compatible_write_format];
577 
578  /* Prepare command encoders. */
579  id<MTLBlitCommandEncoder> blit_encoder = nil;
580  id<MTLComputeCommandEncoder> compute_encoder = nil;
581  if (can_use_direct_blit) {
582  blit_encoder = ctx->main_command_buffer.ensure_begin_blit_encoder();
583  BLI_assert(blit_encoder != nil);
584  }
585  else {
586  compute_encoder = ctx->main_command_buffer.ensure_begin_compute_encoder();
587  BLI_assert(compute_encoder != nil);
588  }
589 
590  switch (type_) {
591 
592  /* 1D */
593  case GPU_TEXTURE_1D:
594  case GPU_TEXTURE_1D_ARRAY: {
595  if (can_use_direct_blit) {
596  /* Use Blit based update. */
597  int bytes_per_row = expected_dst_bytes_per_pixel *
598  ((ctx->pipeline_state.unpack_row_length == 0) ?
599  extent[0] :
601  int bytes_per_image = bytes_per_row;
602  int max_array_index = ((type_ == GPU_TEXTURE_1D_ARRAY) ? extent[1] : 1);
603  for (int array_index = 0; array_index < max_array_index; array_index++) {
604 
605  int buffer_array_offset = staging_buffer_offset + (bytes_per_image * array_index);
606  [blit_encoder
607  copyFromBuffer:staging_buffer
608  sourceOffset:buffer_array_offset
609  sourceBytesPerRow:bytes_per_row
610  sourceBytesPerImage:bytes_per_image
611  sourceSize:MTLSizeMake(extent[0], 1, 1)
612  toTexture:texture_handle
613  destinationSlice:((type_ == GPU_TEXTURE_1D_ARRAY) ? (array_index + offset[1]) :
614  0)
615  destinationLevel:mip
616  destinationOrigin:MTLOriginMake(offset[0], 0, 0)];
617  }
618  }
619  else {
620  /* Use Compute Based update. */
621  if (type_ == GPU_TEXTURE_1D) {
622  id<MTLComputePipelineState> pso = texture_update_1d_get_kernel(
623  compute_specialisation_kernel);
624  TextureUpdateParams params = {mip,
625  {extent[0], 1, 1},
626  {offset[0], 0, 0},
627  ((ctx->pipeline_state.unpack_row_length == 0) ?
628  extent[0] :
630  [compute_encoder setComputePipelineState:pso];
631  [compute_encoder setBytes:&params length:sizeof(params) atIndex:0];
632  [compute_encoder setBuffer:staging_buffer offset:staging_buffer_offset atIndex:1];
633  [compute_encoder setTexture:texture_handle atIndex:0];
634  [compute_encoder
635  dispatchThreads:MTLSizeMake(extent[0], 1, 1) /* Width, Height, Layer */
636  threadsPerThreadgroup:MTLSizeMake(64, 1, 1)];
637  }
638  else if (type_ == GPU_TEXTURE_1D_ARRAY) {
639  id<MTLComputePipelineState> pso = texture_update_1d_array_get_kernel(
640  compute_specialisation_kernel);
641  TextureUpdateParams params = {mip,
642  {extent[0], extent[1], 1},
643  {offset[0], offset[1], 0},
644  ((ctx->pipeline_state.unpack_row_length == 0) ?
645  extent[0] :
647  [compute_encoder setComputePipelineState:pso];
648  [compute_encoder setBytes:&params length:sizeof(params) atIndex:0];
649  [compute_encoder setBuffer:staging_buffer offset:staging_buffer_offset atIndex:1];
650  [compute_encoder setTexture:texture_handle atIndex:0];
651  [compute_encoder
652  dispatchThreads:MTLSizeMake(extent[0], extent[1], 1) /* Width, layers, nil */
653  threadsPerThreadgroup:MTLSizeMake(8, 8, 1)];
654  }
655  }
656  } break;
657 
658  /* 2D */
659  case GPU_TEXTURE_2D:
660  case GPU_TEXTURE_2D_ARRAY: {
661  if (can_use_direct_blit) {
662  /* Use Blit encoder update. */
663  int bytes_per_row = expected_dst_bytes_per_pixel *
664  ((ctx->pipeline_state.unpack_row_length == 0) ?
665  extent[0] :
667  int bytes_per_image = bytes_per_row * extent[1];
668 
669  int texture_array_relative_offset = 0;
670  int base_slice = (type_ == GPU_TEXTURE_2D_ARRAY) ? offset[2] : 0;
671  int final_slice = base_slice + ((type_ == GPU_TEXTURE_2D_ARRAY) ? extent[2] : 1);
672 
673  for (int array_slice = base_slice; array_slice < final_slice; array_slice++) {
674 
675  if (array_slice > 0) {
677  BLI_assert(array_slice < d_);
678  }
679 
680  [blit_encoder copyFromBuffer:staging_buffer
681  sourceOffset:staging_buffer_offset + texture_array_relative_offset
682  sourceBytesPerRow:bytes_per_row
683  sourceBytesPerImage:bytes_per_image
684  sourceSize:MTLSizeMake(extent[0], extent[1], 1)
685  toTexture:texture_handle
686  destinationSlice:array_slice
687  destinationLevel:mip
688  destinationOrigin:MTLOriginMake(offset[0], offset[1], 0)];
689 
690  texture_array_relative_offset += bytes_per_image;
691  }
692  }
693  else {
694  /* Use Compute texture update. */
695  if (type_ == GPU_TEXTURE_2D) {
696  id<MTLComputePipelineState> pso = texture_update_2d_get_kernel(
697  compute_specialisation_kernel);
698  TextureUpdateParams params = {mip,
699  {extent[0], extent[1], 1},
700  {offset[0], offset[1], 0},
701  ((ctx->pipeline_state.unpack_row_length == 0) ?
702  extent[0] :
704  [compute_encoder setComputePipelineState:pso];
705  [compute_encoder setBytes:&params length:sizeof(params) atIndex:0];
706  [compute_encoder setBuffer:staging_buffer offset:staging_buffer_offset atIndex:1];
707  [compute_encoder setTexture:texture_handle atIndex:0];
708  [compute_encoder
709  dispatchThreads:MTLSizeMake(
710  extent[0], extent[1], 1) /* Width, Height, Layer */
711  threadsPerThreadgroup:MTLSizeMake(8, 8, 1)];
712  }
713  else if (type_ == GPU_TEXTURE_2D_ARRAY) {
714  id<MTLComputePipelineState> pso = texture_update_2d_array_get_kernel(
715  compute_specialisation_kernel);
716  TextureUpdateParams params = {mip,
717  {extent[0], extent[1], extent[2]},
718  {offset[0], offset[1], offset[2]},
719  ((ctx->pipeline_state.unpack_row_length == 0) ?
720  extent[0] :
722  [compute_encoder setComputePipelineState:pso];
723  [compute_encoder setBytes:&params length:sizeof(params) atIndex:0];
724  [compute_encoder setBuffer:staging_buffer offset:staging_buffer_offset atIndex:1];
725  [compute_encoder setTexture:texture_handle atIndex:0];
726  [compute_encoder dispatchThreads:MTLSizeMake(extent[0],
727  extent[1],
728  extent[2]) /* Width, Height, Layer */
729  threadsPerThreadgroup:MTLSizeMake(4, 4, 4)];
730  }
731  }
732 
733  } break;
734 
735  /* 3D */
736  case GPU_TEXTURE_3D: {
737  if (can_use_direct_blit) {
738  int bytes_per_row = expected_dst_bytes_per_pixel *
739  ((ctx->pipeline_state.unpack_row_length == 0) ?
740  extent[0] :
742  int bytes_per_image = bytes_per_row * extent[1];
743  [blit_encoder copyFromBuffer:staging_buffer
744  sourceOffset:staging_buffer_offset
745  sourceBytesPerRow:bytes_per_row
746  sourceBytesPerImage:bytes_per_image
747  sourceSize:MTLSizeMake(extent[0], extent[1], extent[2])
748  toTexture:texture_handle
749  destinationSlice:0
750  destinationLevel:mip
751  destinationOrigin:MTLOriginMake(offset[0], offset[1], offset[2])];
752  }
753  else {
754  id<MTLComputePipelineState> pso = texture_update_3d_get_kernel(
755  compute_specialisation_kernel);
756  TextureUpdateParams params = {mip,
757  {extent[0], extent[1], extent[2]},
758  {offset[0], offset[1], offset[2]},
759  ((ctx->pipeline_state.unpack_row_length == 0) ?
760  extent[0] :
762  [compute_encoder setComputePipelineState:pso];
763  [compute_encoder setBytes:&params length:sizeof(params) atIndex:0];
764  [compute_encoder setBuffer:staging_buffer offset:staging_buffer_offset atIndex:1];
765  [compute_encoder setTexture:texture_handle atIndex:0];
766  [compute_encoder
767  dispatchThreads:MTLSizeMake(
768  extent[0], extent[1], extent[2]) /* Width, Height, Depth */
769  threadsPerThreadgroup:MTLSizeMake(4, 4, 4)];
770  }
771  } break;
772 
773  /* CUBE */
774  case GPU_TEXTURE_CUBE: {
775  if (can_use_direct_blit) {
776  int bytes_per_row = expected_dst_bytes_per_pixel *
777  ((ctx->pipeline_state.unpack_row_length == 0) ?
778  extent[0] :
780  int bytes_per_image = bytes_per_row * extent[1];
781 
782  int texture_array_relative_offset = 0;
783 
784  /* Iterate over all cube faces in range (offset[2], offset[2] + extent[2]). */
785  for (int i = 0; i < extent[2]; i++) {
786  int face_index = offset[2] + i;
787 
788  [blit_encoder copyFromBuffer:staging_buffer
789  sourceOffset:staging_buffer_offset + texture_array_relative_offset
790  sourceBytesPerRow:bytes_per_row
791  sourceBytesPerImage:bytes_per_image
792  sourceSize:MTLSizeMake(extent[0], extent[1], 1)
793  toTexture:texture_handle
794  destinationSlice:face_index /* = cubeFace+arrayIndex*6 */
795  destinationLevel:mip
796  destinationOrigin:MTLOriginMake(offset[0], offset[1], 0)];
797  texture_array_relative_offset += bytes_per_image;
798  }
799  }
800  else {
802  "TODO(Metal): Support compute texture update for GPU_TEXTURE_CUBE %d, %d, %d\n",
803  w_,
804  h_,
805  d_);
806  }
807  } break;
808 
809  case GPU_TEXTURE_CUBE_ARRAY: {
810  if (can_use_direct_blit) {
811 
812  int bytes_per_row = expected_dst_bytes_per_pixel *
813  ((ctx->pipeline_state.unpack_row_length == 0) ?
814  extent[0] :
816  int bytes_per_image = bytes_per_row * extent[1];
817 
818  /* Upload to all faces between offset[2] (which is zero in most cases) AND extent[2]. */
819  int texture_array_relative_offset = 0;
820  for (int i = 0; i < extent[2]; i++) {
821  int face_index = offset[2] + i;
822  [blit_encoder copyFromBuffer:staging_buffer
823  sourceOffset:staging_buffer_offset + texture_array_relative_offset
824  sourceBytesPerRow:bytes_per_row
825  sourceBytesPerImage:bytes_per_image
826  sourceSize:MTLSizeMake(extent[0], extent[1], 1)
827  toTexture:texture_handle
828  destinationSlice:face_index /* = cubeFace+arrayIndex*6. */
829  destinationLevel:mip
830  destinationOrigin:MTLOriginMake(offset[0], offset[1], 0)];
831  texture_array_relative_offset += bytes_per_image;
832  }
833  }
834  else {
836  "TODO(Metal): Support compute texture update for GPU_TEXTURE_CUBE_ARRAY %d, %d, "
837  "%d\n",
838  w_,
839  h_,
840  d_);
841  }
842  } break;
843 
844  case GPU_TEXTURE_BUFFER: {
845  /* TODO(Metal): Support Data upload to TEXTURE BUFFER
846  * Data uploads generally happen via GPUVertBuf instead. */
847  BLI_assert(false);
848  } break;
849 
850  case GPU_TEXTURE_ARRAY:
851  /* Not an actual format - modifier flag for others. */
852  return;
853  }
854 
855  /* Finalize Blit Encoder. */
856  if (can_use_direct_blit) {
857 
858  /* Textures which use MTLStorageModeManaged need to have updated contents
859  * synced back to CPU to avoid an automatic flush overwriting contents. */
860  if (texture_.storageMode == MTLStorageModeManaged) {
861  [blit_encoder synchronizeResource:texture_buffer_];
862  }
863  }
864  else {
865  /* Textures which use MTLStorageModeManaged need to have updated contents
866  * synced back to CPU to avoid an automatic flush overwriting contents. */
867  if (texture_.storageMode == MTLStorageModeManaged) {
868  blit_encoder = ctx->main_command_buffer.ensure_begin_blit_encoder();
869  [blit_encoder synchronizeResource:texture_buffer_];
870  }
871  }
872  }
873 }
874 
875 void gpu::MTLTexture::ensure_mipmaps(int miplvl)
876 {
877 
878  /* Do not update texture view. */
879  BLI_assert(resource_mode_ != MTL_TEXTURE_MODE_TEXTURE_VIEW);
880 
881  /* Clamp level to maximum. */
882  int effective_h = (type_ == GPU_TEXTURE_1D_ARRAY) ? 0 : h_;
883  int effective_d = (type_ != GPU_TEXTURE_3D) ? 0 : d_;
884  int max_dimension = max_iii(w_, effective_h, effective_d);
885  int max_miplvl = floor(log2(max_dimension));
886  miplvl = min_ii(max_miplvl, miplvl);
887 
888  /* Increase mipmap level. */
889  if (mipmaps_ < miplvl) {
890  mipmaps_ = miplvl;
891 
892  /* Check if baked. */
893  if (is_baked_ && mipmaps_ > mtl_max_mips_) {
894  is_dirty_ = true;
895  MTL_LOG_WARNING("Texture requires regenerating due to increase in mip-count\n");
896  }
897  }
898  this->mip_range_set(0, mipmaps_);
899 }
900 
902 {
903  /* Fetch Active Context. */
904  MTLContext *ctx = reinterpret_cast<MTLContext *>(GPU_context_active_get());
905  BLI_assert(ctx);
906 
907  if (!ctx->device) {
908  MTL_LOG_ERROR("Cannot Generate mip-maps -- metal device invalid\n");
909  BLI_assert(false);
910  return;
911  }
912 
913  /* Ensure mipmaps. */
914  this->ensure_mipmaps(9999);
915 
916  /* Ensure texture is baked. */
917  this->ensure_baked();
918  BLI_assert(is_baked_ && texture_ && "MTLTexture is not valid");
919 
920  if (mipmaps_ == 1 || mtl_max_mips_ == 1) {
921  MTL_LOG_WARNING("Call to generate mipmaps on texture with 'mipmaps_=1\n'");
922  return;
923  }
924 
925  /* Verify if we can perform mipmap generation. */
926  if (format_ == GPU_DEPTH_COMPONENT32F || format_ == GPU_DEPTH_COMPONENT24 ||
927  format_ == GPU_DEPTH_COMPONENT16 || format_ == GPU_DEPTH32F_STENCIL8 ||
928  format_ == GPU_DEPTH24_STENCIL8) {
929  MTL_LOG_WARNING("Cannot generate mipmaps for textures using DEPTH formats\n");
930  return;
931  }
932 
933  @autoreleasepool {
934 
935  /* Fetch active BlitCommandEncoder. */
936  id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
937  if (G.debug & G_DEBUG_GPU) {
938  [enc insertDebugSignpost:@"Generate MipMaps"];
939  }
940  [enc generateMipmapsForTexture:texture_];
941  }
942  return;
943 }
944 
946 {
947  /* Safety Checks. */
948  gpu::MTLTexture *mt_src = this;
949  gpu::MTLTexture *mt_dst = static_cast<gpu::MTLTexture *>(dst);
950  BLI_assert((mt_dst->w_ == mt_src->w_) && (mt_dst->h_ == mt_src->h_) &&
951  (mt_dst->d_ == mt_src->d_));
952  BLI_assert(mt_dst->format_ == mt_src->format_);
953  BLI_assert(mt_dst->type_ == mt_src->type_);
954 
955  UNUSED_VARS_NDEBUG(mt_src);
956 
957  /* Fetch active context. */
958  MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
959  BLI_assert(ctx);
960 
961  /* Ensure texture is baked. */
962  this->ensure_baked();
963 
964  @autoreleasepool {
965  /* Setup blit encoder. */
966  id<MTLBlitCommandEncoder> blit_encoder = ctx->main_command_buffer.ensure_begin_blit_encoder();
967  BLI_assert(blit_encoder != nil);
968 
969  /* TODO(Metal): Consider supporting multiple mip levels IF the GL implementation
970  * follows, currently it does not. */
971  int mip = 0;
972 
973  /* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
974  int extent[3] = {1, 1, 1};
975  this->mip_size_get(mip, extent);
976 
977  switch (mt_dst->type_) {
980  case GPU_TEXTURE_3D: {
981  /* Do full texture copy for 3D textures */
982  BLI_assert(mt_dst->d_ == d_);
983  [blit_encoder copyFromTexture:this->get_metal_handle_base()
984  toTexture:mt_dst->get_metal_handle_base()];
985  } break;
986  default: {
987  int slice = 0;
988  this->blit(blit_encoder,
989  0,
990  0,
991  0,
992  slice,
993  mip,
994  mt_dst,
995  0,
996  0,
997  0,
998  slice,
999  mip,
1000  extent[0],
1001  extent[1],
1002  extent[2]);
1003  } break;
1004  }
1005  }
1006 }
1007 
1008 void gpu::MTLTexture::clear(eGPUDataFormat data_format, const void *data)
1009 {
1010  /* Ensure texture is baked. */
1011  this->ensure_baked();
1012 
1013  /* Create clear framebuffer. */
1015  FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(this->get_blit_framebuffer(0, 0));
1016  fb->bind(true);
1017  fb->clear_attachment(this->attachment_type(0), data_format, data);
1018  GPU_framebuffer_bind(prev_fb);
1019 }
1020 
1021 static MTLTextureSwizzle swizzle_to_mtl(const char swizzle)
1022 {
1023  switch (swizzle) {
1024  default:
1025  case 'x':
1026  case 'r':
1027  return MTLTextureSwizzleRed;
1028  case 'y':
1029  case 'g':
1030  return MTLTextureSwizzleGreen;
1031  case 'z':
1032  case 'b':
1033  return MTLTextureSwizzleBlue;
1034  case 'w':
1035  case 'a':
1036  return MTLTextureSwizzleAlpha;
1037  case '0':
1038  return MTLTextureSwizzleZero;
1039  case '1':
1040  return MTLTextureSwizzleOne;
1041  }
1042 }
1043 
1044 void gpu::MTLTexture::swizzle_set(const char swizzle_mask[4])
1045 {
1046  if (memcmp(tex_swizzle_mask_, swizzle_mask, 4) != 0) {
1047  memcpy(tex_swizzle_mask_, swizzle_mask, 4);
1048 
1049  /* Creating the swizzle mask and flagging as dirty if changed. */
1050  MTLTextureSwizzleChannels new_swizzle_mask = MTLTextureSwizzleChannelsMake(
1051  swizzle_to_mtl(swizzle_mask[0]),
1052  swizzle_to_mtl(swizzle_mask[1]),
1053  swizzle_to_mtl(swizzle_mask[2]),
1054  swizzle_to_mtl(swizzle_mask[3]));
1055 
1056  mtl_swizzle_mask_ = new_swizzle_mask;
1057  texture_view_dirty_flags_ |= TEXTURE_VIEW_SWIZZLE_DIRTY;
1058  }
1059 }
1060 
1062 {
1063  BLI_assert(min <= max && min >= 0 && max <= mipmaps_);
1064 
1065  /* NOTE:
1066  * - mip_min_ and mip_max_ are used to Clamp LODs during sampling.
1067  * - Given functions like Framebuffer::recursive_downsample modifies the mip range
1068  * between each layer, we do not want to be re-baking the texture.
1069  * - For the time being, we are going to just need to generate a FULL mipmap chain
1070  * as we do not know ahead of time whether mipmaps will be used.
1071  *
1072  * TODO(Metal): Add texture initialization flag to determine whether mipmaps are used
1073  * or not. Will be important for saving memory for big textures. */
1074  mip_min_ = min;
1075  mip_max_ = max;
1076 
1077  if ((type_ == GPU_TEXTURE_1D || type_ == GPU_TEXTURE_1D_ARRAY || type_ == GPU_TEXTURE_BUFFER) &&
1078  max > 1) {
1079 
1080  MTL_LOG_ERROR(
1081  " MTLTexture of type TEXTURE_1D_ARRAY or TEXTURE_BUFFER cannot have a mipcount "
1082  "greater than 1\n");
1083  mip_min_ = 0;
1084  mip_max_ = 0;
1085  mipmaps_ = 0;
1086  BLI_assert(false);
1087  }
1088 
1089  /* Mip range for texture view. */
1090  mip_texture_base_level_ = mip_min_;
1091  mip_texture_max_level_ = mip_max_;
1092  texture_view_dirty_flags_ |= TEXTURE_VIEW_MIP_DIRTY;
1093 }
1094 
1096 {
1097  /* Prepare Array for return data. */
1098  BLI_assert(!(format_flag_ & GPU_FORMAT_COMPRESSED));
1099  BLI_assert(mip <= mipmaps_);
1101 
1102  /* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
1103  int extent[3] = {1, 1, 1};
1104  this->mip_size_get(mip, extent);
1105 
1106  size_t sample_len = extent[0] * extent[1] * extent[2];
1107  size_t sample_size = to_bytesize(format_, type);
1108  size_t texture_size = sample_len * sample_size;
1109  int num_channels = to_component_len(format_);
1110 
1111  void *data = MEM_mallocN(texture_size + 8, "GPU_texture_read");
1112 
1113  /* Ensure texture is baked. */
1114  if (is_baked_) {
1115  this->read_internal(
1116  mip, 0, 0, 0, extent[0], extent[1], extent[2], type, num_channels, texture_size + 8, data);
1117  }
1118  else {
1119  /* Clear return values? */
1120  MTL_LOG_WARNING("MTLTexture::read - reading from texture with no image data\n");
1121  }
1122 
1123  return data;
1124 }
1125 
1126 /* Fetch the raw buffer data from a texture and copy to CPU host ptr. */
1127 void gpu::MTLTexture::read_internal(int mip,
1128  int x_off,
1129  int y_off,
1130  int z_off,
1131  int width,
1132  int height,
1133  int depth,
1134  eGPUDataFormat desired_output_format,
1135  int num_output_components,
1136  int debug_data_size,
1137  void *r_data)
1138 {
1139  /* Verify textures are baked. */
1140  if (!is_baked_) {
1141  MTL_LOG_WARNING("gpu::MTLTexture::read_internal - Trying to read from a non-baked texture!\n");
1142  return;
1143  }
1144  /* Fetch active context. */
1145  MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
1146  BLI_assert(ctx);
1147 
1148  /* Calculate Desired output size. */
1149  int num_channels = to_component_len(format_);
1150  BLI_assert(num_output_components <= num_channels);
1151  uint desired_output_bpp = num_output_components * to_bytesize(desired_output_format);
1152 
1153  /* Calculate Metal data output for trivial copy. */
1154  uint image_bpp = get_mtl_format_bytesize(texture_.pixelFormat);
1155  uint image_components = get_mtl_format_num_components(texture_.pixelFormat);
1156  bool is_depth_format = (format_flag_ & GPU_FORMAT_DEPTH);
1157 
1158  /* Verify if we need to use compute read. */
1159  eGPUDataFormat data_format = to_mtl_internal_data_format(this->format_get());
1160  bool format_conversion_needed = (data_format != desired_output_format);
1161  bool can_use_simple_read = (desired_output_bpp == image_bpp) && (!format_conversion_needed) &&
1162  (num_output_components == image_components);
1163 
1164  /* Depth must be read using the compute shader -- Some safety checks to verify that params are
1165  * correct. */
1166  if (is_depth_format) {
1167  can_use_simple_read = false;
1168  /* TODO(Metal): Stencil data write not yet supported, so force components to one. */
1169  image_components = 1;
1170  BLI_assert(num_output_components == 1);
1171  BLI_assert(image_components == 1);
1172  BLI_assert(data_format == GPU_DATA_FLOAT || data_format == GPU_DATA_UINT_24_8);
1173  BLI_assert(validate_data_format_mtl(format_, data_format));
1174  }
1175 
1176  /* SPECIAL Workaround for R11G11B10 textures requesting a read using: GPU_DATA_10_11_11_REV. */
1177  if (desired_output_format == GPU_DATA_10_11_11_REV) {
1178  BLI_assert(format_ == GPU_R11F_G11F_B10F);
1179 
1180  /* override parameters - we'll be able to use simple copy, as bpp will match at 4 bytes. */
1181  image_bpp = sizeof(int);
1182  image_components = 1;
1183  desired_output_bpp = sizeof(int);
1184  num_output_components = 1;
1185 
1186  data_format = GPU_DATA_INT;
1187  format_conversion_needed = false;
1188  can_use_simple_read = true;
1189  }
1190 
1191  /* Determine size of output data. */
1192  uint bytes_per_row = desired_output_bpp * width;
1193  uint bytes_per_image = bytes_per_row * height;
1194  uint total_bytes = bytes_per_image * depth;
1195 
1196  if (can_use_simple_read) {
1197  /* DEBUG check that if direct copy is being used, then both the expected output size matches
1198  * the METAL texture size. */
1199  BLI_assert(
1200  ((num_output_components * to_bytesize(desired_output_format)) == desired_output_bpp) &&
1201  (desired_output_bpp == image_bpp));
1202  }
1203  /* DEBUG check that the allocated data size matches the bytes we expect. */
1204  BLI_assert(total_bytes <= debug_data_size);
1205 
1206  /* Fetch allocation from scratch buffer. */
1207  id<MTLBuffer> destination_buffer = nil;
1208  uint destination_offset = 0;
1209  void *destination_buffer_host_ptr = nullptr;
1210 
1211  /* TODO(Metal): Optimize buffer allocation. */
1212  MTLResourceOptions bufferOptions = MTLResourceStorageModeManaged;
1213  destination_buffer = [ctx->device newBufferWithLength:max_ii(total_bytes, 256)
1214  options:bufferOptions];
1215  destination_offset = 0;
1216  destination_buffer_host_ptr = (void *)((uint8_t *)([destination_buffer contents]) +
1217  destination_offset);
1218 
1219  /* Prepare specialisation struct (For non-trivial texture read routine). */
1220  int depth_format_mode = 0;
1221  if (is_depth_format) {
1222  depth_format_mode = 1;
1223  switch (desired_output_format) {
1224  case GPU_DATA_FLOAT:
1225  depth_format_mode = 1;
1226  break;
1227  case GPU_DATA_UINT_24_8:
1228  depth_format_mode = 2;
1229  break;
1230  case GPU_DATA_UINT:
1231  depth_format_mode = 4;
1232  break;
1233  default:
1234  BLI_assert(false && "Unhandled depth read format case");
1235  break;
1236  }
1237  }
1238 
1239  TextureReadRoutineSpecialisation compute_specialisation_kernel = {
1240  tex_data_format_to_msl_texture_template_type(data_format), /* TEXTURE DATA TYPE */
1241  tex_data_format_to_msl_type_str(desired_output_format), /* OUTPUT DATA TYPE */
1242  num_channels, /* TEXTURE COMPONENT COUNT */
1243  num_output_components, /* OUTPUT DATA COMPONENT COUNT */
1244  depth_format_mode};
1245 
1246  bool copy_successful = false;
1247  @autoreleasepool {
1248 
1249  /* TODO(Metal): Verify whether we need some form of barrier here to ensure reads
1250  * happen after work with associated texture is finished. */
1251  GPU_finish();
1252 
1253  /* Texture View for SRGB special case. */
1254  id<MTLTexture> read_texture = texture_;
1255  if (format_ == GPU_SRGB8_A8) {
1256  read_texture = [texture_ newTextureViewWithPixelFormat:MTLPixelFormatRGBA8Unorm];
1257  }
1258 
1259  /* Perform per-texture type read. */
1260  switch (type_) {
1261  case GPU_TEXTURE_2D: {
1262  if (can_use_simple_read) {
1263  /* Use Blit Encoder READ. */
1264  id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
1265  if (G.debug & G_DEBUG_GPU) {
1266  [enc insertDebugSignpost:@"GPUTextureRead"];
1267  }
1268  [enc copyFromTexture:read_texture
1269  sourceSlice:0
1270  sourceLevel:mip
1271  sourceOrigin:MTLOriginMake(x_off, y_off, 0)
1272  sourceSize:MTLSizeMake(width, height, 1)
1273  toBuffer:destination_buffer
1274  destinationOffset:destination_offset
1275  destinationBytesPerRow:bytes_per_row
1276  destinationBytesPerImage:bytes_per_image];
1277  [enc synchronizeResource:destination_buffer];
1278  copy_successful = true;
1279  }
1280  else {
1281 
1282  /* Use Compute READ. */
1283  id<MTLComputeCommandEncoder> compute_encoder =
1284  ctx->main_command_buffer.ensure_begin_compute_encoder();
1285  id<MTLComputePipelineState> pso = texture_read_2d_get_kernel(
1286  compute_specialisation_kernel);
1287  TextureReadParams params = {
1288  mip,
1289  {width, height, 1},
1290  {x_off, y_off, 0},
1291  };
1292  [compute_encoder setComputePipelineState:pso];
1293  [compute_encoder setBytes:&params length:sizeof(params) atIndex:0];
1294  [compute_encoder setBuffer:destination_buffer offset:destination_offset atIndex:1];
1295  [compute_encoder setTexture:read_texture atIndex:0];
1296  [compute_encoder dispatchThreads:MTLSizeMake(width, height, 1) /* Width, Height, Layer */
1297  threadsPerThreadgroup:MTLSizeMake(8, 8, 1)];
1298 
1299  /* Use Blit encoder to synchronize results back to CPU. */
1300  id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
1301  if (G.debug & G_DEBUG_GPU) {
1302  [enc insertDebugSignpost:@"GPUTextureRead-syncResource"];
1303  }
1304  [enc synchronizeResource:destination_buffer];
1305  copy_successful = true;
1306  }
1307  } break;
1308 
1309  case GPU_TEXTURE_2D_ARRAY: {
1310  if (can_use_simple_read) {
1311  /* Use Blit Encoder READ. */
1312  id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
1313  if (G.debug & G_DEBUG_GPU) {
1314  [enc insertDebugSignpost:@"GPUTextureRead"];
1315  }
1316  int base_slice = z_off;
1317  int final_slice = base_slice + depth;
1318  int texture_array_relative_offset = 0;
1319 
1320  for (int array_slice = base_slice; array_slice < final_slice; array_slice++) {
1321  [enc copyFromTexture:read_texture
1322  sourceSlice:0
1323  sourceLevel:mip
1324  sourceOrigin:MTLOriginMake(x_off, y_off, 0)
1325  sourceSize:MTLSizeMake(width, height, 1)
1326  toBuffer:destination_buffer
1327  destinationOffset:destination_offset + texture_array_relative_offset
1328  destinationBytesPerRow:bytes_per_row
1329  destinationBytesPerImage:bytes_per_image];
1330  [enc synchronizeResource:destination_buffer];
1331 
1332  texture_array_relative_offset += bytes_per_image;
1333  }
1334  copy_successful = true;
1335  }
1336  else {
1337 
1338  /* Use Compute READ */
1339  id<MTLComputeCommandEncoder> compute_encoder =
1340  ctx->main_command_buffer.ensure_begin_compute_encoder();
1341  id<MTLComputePipelineState> pso = texture_read_2d_array_get_kernel(
1342  compute_specialisation_kernel);
1343  TextureReadParams params = {
1344  mip,
1345  {width, height, depth},
1346  {x_off, y_off, z_off},
1347  };
1348  [compute_encoder setComputePipelineState:pso];
1349  [compute_encoder setBytes:&params length:sizeof(params) atIndex:0];
1350  [compute_encoder setBuffer:destination_buffer offset:destination_offset atIndex:1];
1351  [compute_encoder setTexture:read_texture atIndex:0];
1352  [compute_encoder
1353  dispatchThreads:MTLSizeMake(width, height, depth) /* Width, Height, Layer */
1354  threadsPerThreadgroup:MTLSizeMake(8, 8, 1)];
1355 
1356  /* Use Blit encoder to synchronize results back to CPU. */
1357  id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
1358  if (G.debug & G_DEBUG_GPU) {
1359  [enc insertDebugSignpost:@"GPUTextureRead-syncResource"];
1360  }
1361  [enc synchronizeResource:destination_buffer];
1362  copy_successful = true;
1363  }
1364  } break;
1365 
1366  case GPU_TEXTURE_CUBE_ARRAY: {
1367  if (can_use_simple_read) {
1368  id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
1369  if (G.debug & G_DEBUG_GPU) {
1370  [enc insertDebugSignpost:@"GPUTextureRead"];
1371  }
1372  int base_slice = z_off;
1373  int final_slice = base_slice + depth;
1374  int texture_array_relative_offset = 0;
1375 
1376  for (int array_slice = base_slice; array_slice < final_slice; array_slice++) {
1377  [enc copyFromTexture:read_texture
1378  sourceSlice:array_slice
1379  sourceLevel:mip
1380  sourceOrigin:MTLOriginMake(x_off, y_off, 0)
1381  sourceSize:MTLSizeMake(width, height, 1)
1382  toBuffer:destination_buffer
1383  destinationOffset:destination_offset + texture_array_relative_offset
1384  destinationBytesPerRow:bytes_per_row
1385  destinationBytesPerImage:bytes_per_image];
1386  [enc synchronizeResource:destination_buffer];
1387 
1388  texture_array_relative_offset += bytes_per_image;
1389  }
1390  MTL_LOG_INFO("Copying texture data to buffer GPU_TEXTURE_CUBE_ARRAY\n");
1391  copy_successful = true;
1392  }
1393  else {
1394  MTL_LOG_ERROR("TODO(Metal): unsupported compute copy of texture cube array");
1395  }
1396  } break;
1397 
1398  default:
1400  "[Warning] gpu::MTLTexture::read_internal simple-copy not yet supported for texture "
1401  "type: %d\n",
1402  (int)type_);
1403  break;
1404  }
1405 
1406  if (copy_successful) {
1407  /* Ensure GPU copy commands have completed. */
1408  GPU_finish();
1409 
1410  /* Copy data from Shared Memory into ptr. */
1411  memcpy(r_data, destination_buffer_host_ptr, total_bytes);
1412  MTL_LOG_INFO("gpu::MTLTexture::read_internal success! %d bytes read\n", total_bytes);
1413  }
1414  else {
1416  "[Warning] gpu::MTLTexture::read_internal not yet supported for this config -- data "
1417  "format different (src %d bytes, dst %d bytes) (src format: %d, dst format: %d), or "
1418  "varying component counts (src %d, dst %d)\n",
1419  image_bpp,
1420  desired_output_bpp,
1421  (int)data_format,
1422  (int)desired_output_format,
1423  image_components,
1424  num_output_components);
1425  }
1426  }
1427 }
1428 
1429 /* Remove once no longer required -- will just return 0 for now in MTL path. */
1431 {
1432  return 0;
1433 }
1434 
1436 {
1437  if (format_ == GPU_DEPTH24_STENCIL8) {
1438  /* Apple Silicon requires GPU_DEPTH32F_STENCIL8 instead of GPU_DEPTH24_STENCIL8. */
1439  format_ = GPU_DEPTH32F_STENCIL8;
1440  }
1441 
1442  this->prepare_internal();
1443  return true;
1444 }
1445 
1447 {
1448  /* Zero initialize. */
1449  this->prepare_internal();
1450 
1451  /* TODO(Metal): Add implementation for GPU Vert buf. */
1452  return false;
1453 }
1454 
1455 bool gpu::MTLTexture::init_internal(const GPUTexture *src, int mip_offset, int layer_offset)
1456 {
1457  BLI_assert(src);
1458 
1459  /* Zero initialize. */
1460  this->prepare_internal();
1461 
1462  /* Flag as using texture view. */
1463  resource_mode_ = MTL_TEXTURE_MODE_TEXTURE_VIEW;
1464  source_texture_ = src;
1465  mip_texture_base_level_ = mip_offset;
1466  mip_texture_base_layer_ = layer_offset;
1467 
1468  /* Assign texture as view. */
1469  const gpu::MTLTexture *mtltex = static_cast<const gpu::MTLTexture *>(unwrap(src));
1470  texture_ = mtltex->texture_;
1471  BLI_assert(texture_);
1472  [texture_ retain];
1473 
1474  /* Flag texture as baked -- we do not need explicit initialization. */
1475  is_baked_ = true;
1476  is_dirty_ = false;
1477 
1478  /* Bake mip swizzle view. */
1479  bake_mip_swizzle_view();
1480  return true;
1481 }
1482 
1485 /* -------------------------------------------------------------------- */
1490 {
1491  return is_baked_;
1492 }
1493 
1494 /* Prepare texture parameters after initialization, but before baking. */
1495 void gpu::MTLTexture::prepare_internal()
1496 {
1497 
1498  /* Derive implicit usage flags for Depth/Stencil attachments. */
1499  if (format_flag_ & GPU_FORMAT_DEPTH || format_flag_ & GPU_FORMAT_STENCIL) {
1500  gpu_image_usage_flags_ |= GPU_TEXTURE_USAGE_ATTACHMENT;
1501  }
1502 
1503  /* Derive maximum number of mip levels by default.
1504  * TODO(Metal): This can be removed if max mip counts are specified upfront. */
1505  if (type_ == GPU_TEXTURE_1D || type_ == GPU_TEXTURE_1D_ARRAY || type_ == GPU_TEXTURE_BUFFER) {
1506  mtl_max_mips_ = 1;
1507  }
1508  else {
1509  int effective_h = (type_ == GPU_TEXTURE_1D_ARRAY) ? 0 : h_;
1510  int effective_d = (type_ != GPU_TEXTURE_3D) ? 0 : d_;
1511  int max_dimension = max_iii(w_, effective_h, effective_d);
1512  int max_miplvl = max_ii(floor(log2(max_dimension)) + 1, 1);
1513  mtl_max_mips_ = max_miplvl;
1514  }
1515 }
1516 
1517 void gpu::MTLTexture::ensure_baked()
1518 {
1519 
1520  /* If properties have changed, re-bake. */
1521  bool copy_previous_contents = false;
1522  if (is_baked_ && is_dirty_) {
1523  copy_previous_contents = true;
1524  id<MTLTexture> previous_texture = texture_;
1525  [previous_texture retain];
1526 
1527  this->reset();
1528  }
1529 
1530  if (!is_baked_) {
1531  MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
1532  BLI_assert(ctx);
1533 
1534  /* Ensure texture mode is valid. */
1535  BLI_assert(resource_mode_ != MTL_TEXTURE_MODE_EXTERNAL);
1536  BLI_assert(resource_mode_ != MTL_TEXTURE_MODE_TEXTURE_VIEW);
1537  BLI_assert(resource_mode_ != MTL_TEXTURE_MODE_VBO);
1538 
1539  /* Format and mip levels (TODO(Metal): Optimize mipmaps counts, specify up-front). */
1540  MTLPixelFormat mtl_format = gpu_texture_format_to_metal(format_);
1541 
1542  /* Create texture descriptor. */
1543  switch (type_) {
1544 
1545  /* 1D */
1546  case GPU_TEXTURE_1D:
1547  case GPU_TEXTURE_1D_ARRAY: {
1548  BLI_assert(w_ > 0);
1549  texture_descriptor_ = [[MTLTextureDescriptor alloc] init];
1550  texture_descriptor_.pixelFormat = mtl_format;
1551  texture_descriptor_.textureType = (type_ == GPU_TEXTURE_1D_ARRAY) ? MTLTextureType1DArray :
1552  MTLTextureType1D;
1553  texture_descriptor_.width = w_;
1554  texture_descriptor_.height = 1;
1555  texture_descriptor_.depth = 1;
1556  texture_descriptor_.arrayLength = (type_ == GPU_TEXTURE_1D_ARRAY) ? h_ : 1;
1557  texture_descriptor_.mipmapLevelCount = (mtl_max_mips_ > 0) ? mtl_max_mips_ : 1;
1558  texture_descriptor_.usage =
1559  MTLTextureUsageRenderTarget | MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite |
1560  MTLTextureUsagePixelFormatView; /* TODO(Metal): Optimize usage flags. */
1561  texture_descriptor_.storageMode = MTLStorageModePrivate;
1562  texture_descriptor_.sampleCount = 1;
1563  texture_descriptor_.cpuCacheMode = MTLCPUCacheModeDefaultCache;
1564  texture_descriptor_.hazardTrackingMode = MTLHazardTrackingModeDefault;
1565  } break;
1566 
1567  /* 2D */
1568  case GPU_TEXTURE_2D:
1569  case GPU_TEXTURE_2D_ARRAY: {
1570  BLI_assert(w_ > 0 && h_ > 0);
1571  texture_descriptor_ = [[MTLTextureDescriptor alloc] init];
1572  texture_descriptor_.pixelFormat = mtl_format;
1573  texture_descriptor_.textureType = (type_ == GPU_TEXTURE_2D_ARRAY) ? MTLTextureType2DArray :
1574  MTLTextureType2D;
1575  texture_descriptor_.width = w_;
1576  texture_descriptor_.height = h_;
1577  texture_descriptor_.depth = 1;
1578  texture_descriptor_.arrayLength = (type_ == GPU_TEXTURE_2D_ARRAY) ? d_ : 1;
1579  texture_descriptor_.mipmapLevelCount = (mtl_max_mips_ > 0) ? mtl_max_mips_ : 1;
1580  texture_descriptor_.usage =
1581  MTLTextureUsageRenderTarget | MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite |
1582  MTLTextureUsagePixelFormatView; /* TODO(Metal): Optimize usage flags. */
1583  texture_descriptor_.storageMode = MTLStorageModePrivate;
1584  texture_descriptor_.sampleCount = 1;
1585  texture_descriptor_.cpuCacheMode = MTLCPUCacheModeDefaultCache;
1586  texture_descriptor_.hazardTrackingMode = MTLHazardTrackingModeDefault;
1587  } break;
1588 
1589  /* 3D */
1590  case GPU_TEXTURE_3D: {
1591  BLI_assert(w_ > 0 && h_ > 0 && d_ > 0);
1592  texture_descriptor_ = [[MTLTextureDescriptor alloc] init];
1593  texture_descriptor_.pixelFormat = mtl_format;
1594  texture_descriptor_.textureType = MTLTextureType3D;
1595  texture_descriptor_.width = w_;
1596  texture_descriptor_.height = h_;
1597  texture_descriptor_.depth = d_;
1598  texture_descriptor_.arrayLength = 1;
1599  texture_descriptor_.mipmapLevelCount = (mtl_max_mips_ > 0) ? mtl_max_mips_ : 1;
1600  texture_descriptor_.usage =
1601  MTLTextureUsageRenderTarget | MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite |
1602  MTLTextureUsagePixelFormatView; /* TODO(Metal): Optimize usage flags. */
1603  texture_descriptor_.storageMode = MTLStorageModePrivate;
1604  texture_descriptor_.sampleCount = 1;
1605  texture_descriptor_.cpuCacheMode = MTLCPUCacheModeDefaultCache;
1606  texture_descriptor_.hazardTrackingMode = MTLHazardTrackingModeDefault;
1607  } break;
1608 
1609  /* CUBE TEXTURES */
1610  case GPU_TEXTURE_CUBE:
1611  case GPU_TEXTURE_CUBE_ARRAY: {
1612  /* NOTE: For a cube-map 'Texture::d_' refers to total number of faces,
1613  * not just array slices. */
1614  BLI_assert(w_ > 0 && h_ > 0);
1615  texture_descriptor_ = [[MTLTextureDescriptor alloc] init];
1616  texture_descriptor_.pixelFormat = mtl_format;
1617  texture_descriptor_.textureType = (type_ == GPU_TEXTURE_CUBE_ARRAY) ?
1618  MTLTextureTypeCubeArray :
1619  MTLTextureTypeCube;
1620  texture_descriptor_.width = w_;
1621  texture_descriptor_.height = h_;
1622  texture_descriptor_.depth = 1;
1623  texture_descriptor_.arrayLength = (type_ == GPU_TEXTURE_CUBE_ARRAY) ? d_ / 6 : 1;
1624  texture_descriptor_.mipmapLevelCount = (mtl_max_mips_ > 0) ? mtl_max_mips_ : 1;
1625  texture_descriptor_.usage =
1626  MTLTextureUsageRenderTarget | MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite |
1627  MTLTextureUsagePixelFormatView; /* TODO(Metal): Optimize usage flags. */
1628  texture_descriptor_.storageMode = MTLStorageModePrivate;
1629  texture_descriptor_.sampleCount = 1;
1630  texture_descriptor_.cpuCacheMode = MTLCPUCacheModeDefaultCache;
1631  texture_descriptor_.hazardTrackingMode = MTLHazardTrackingModeDefault;
1632  } break;
1633 
1634  /* GPU_TEXTURE_BUFFER */
1635  case GPU_TEXTURE_BUFFER: {
1636  texture_descriptor_ = [[MTLTextureDescriptor alloc] init];
1637  texture_descriptor_.pixelFormat = mtl_format;
1638  texture_descriptor_.textureType = MTLTextureTypeTextureBuffer;
1639  texture_descriptor_.width = w_;
1640  texture_descriptor_.height = 1;
1641  texture_descriptor_.depth = 1;
1642  texture_descriptor_.arrayLength = 1;
1643  texture_descriptor_.mipmapLevelCount = (mtl_max_mips_ > 0) ? mtl_max_mips_ : 1;
1644  texture_descriptor_.usage =
1645  MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite |
1646  MTLTextureUsagePixelFormatView; /* TODO(Metal): Optimize usage flags. */
1647  texture_descriptor_.storageMode = MTLStorageModePrivate;
1648  texture_descriptor_.sampleCount = 1;
1649  texture_descriptor_.cpuCacheMode = MTLCPUCacheModeDefaultCache;
1650  texture_descriptor_.hazardTrackingMode = MTLHazardTrackingModeDefault;
1651  } break;
1652 
1653  default: {
1654  MTL_LOG_ERROR("[METAL] Error: Cannot create texture with unknown type: %d\n", type_);
1655  return;
1656  } break;
1657  }
1658 
1659  /* Determine Resource Mode. */
1660  resource_mode_ = MTL_TEXTURE_MODE_DEFAULT;
1661 
1662  /* Create texture. */
1663  texture_ = [ctx->device newTextureWithDescriptor:texture_descriptor_];
1664 
1665  [texture_descriptor_ release];
1666  texture_descriptor_ = nullptr;
1667  texture_.label = [NSString stringWithUTF8String:this->get_name()];
1668  BLI_assert(texture_);
1669  is_baked_ = true;
1670  is_dirty_ = false;
1671  }
1672 
1673  /* Re-apply previous contents. */
1674  if (copy_previous_contents) {
1675  id<MTLTexture> previous_texture;
1676  /* TODO(Metal): May need to copy previous contents of texture into new texture. */
1677  /*[previous_texture release]; */
1678  UNUSED_VARS(previous_texture);
1679  }
1680 }
1681 
1682 void gpu::MTLTexture::reset()
1683 {
1684 
1685  MTL_LOG_INFO("Texture %s reset. Size %d, %d, %d\n", this->get_name(), w_, h_, d_);
1686  /* Delete associated METAL resources. */
1687  if (texture_ != nil) {
1688  [texture_ release];
1689  texture_ = nil;
1690  is_baked_ = false;
1691  is_dirty_ = true;
1692  }
1693 
1694  if (mip_swizzle_view_ != nil) {
1695  [mip_swizzle_view_ release];
1696  mip_swizzle_view_ = nil;
1697  }
1698 
1699  if (texture_buffer_ != nil) {
1700  [texture_buffer_ release];
1701  }
1702 
1703  /* Blit framebuffer. */
1704  if (blit_fb_) {
1705  GPU_framebuffer_free(blit_fb_);
1706  blit_fb_ = nullptr;
1707  }
1708 
1709  BLI_assert(texture_ == nil);
1710  BLI_assert(mip_swizzle_view_ == nil);
1711 }
1712 
1715 } // namespace blender::gpu
@ G_DEBUG_GPU
Definition: BKE_global.h:193
#define BLI_assert(a)
Definition: BLI_assert.h:46
MINLINE int min_ii(int a, int b)
MINLINE int max_ii(int a, int b)
MINLINE int max_iii(int a, int b, int c)
unsigned int uint
Definition: BLI_sys_types.h:67
#define UNUSED_VARS(...)
#define UNUSED_VARS_NDEBUG(...)
GHOST C-API function and type declarations.
GPUBatch
Definition: GPU_batch.h:78
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
Definition: gpu_batch.cc:211
#define GPU_batch_texture_bind(batch, name, tex)
Definition: GPU_batch.h:161
void GPU_batch_draw(GPUBatch *batch)
Definition: gpu_batch.cc:223
struct GPUBatch * GPU_batch_preset_quad(void)
GPUContext * GPU_context_active_get(void)
Definition: gpu_context.cc:142
struct GPUFrameBuffer GPUFrameBuffer
void GPU_framebuffer_restore(void)
GPUFrameBuffer * GPU_framebuffer_active_get(void)
void GPU_framebuffer_free(GPUFrameBuffer *fb)
void GPU_framebuffer_bind(GPUFrameBuffer *fb)
GPUFrameBuffer * GPU_framebuffer_create(const char *name)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
struct GPUShader GPUShader
Definition: GPU_shader.h:20
void GPU_shader_uniform_1i(GPUShader *sh, const char *name, int value)
Definition: gpu_shader.cc:652
void GPU_shader_uniform_2f(GPUShader *sh, const char *name, float x, float y)
Definition: gpu_shader.cc:663
void GPU_face_culling(eGPUFaceCullTest culling)
Definition: gpu_state.cc:44
eGPUBlend
Definition: GPU_state.h:59
@ GPU_BLEND_NONE
Definition: GPU_state.h:60
void GPU_blend(eGPUBlend blend)
Definition: gpu_state.cc:39
void GPU_scissor_test(bool enable)
Definition: gpu_state.cc:180
uint GPU_stencil_mask_get(void)
Definition: gpu_state.cc:230
void GPU_depth_mask(bool depth)
Definition: gpu_state.cc:107
void GPU_stencil_test(eGPUStencilTest test)
Definition: gpu_state.cc:70
void GPU_stencil_write_mask_set(uint write_mask)
Definition: gpu_state.cc:202
eGPUFaceCullTest
Definition: GPU_state.h:107
@ GPU_CULL_NONE
Definition: GPU_state.h:108
void GPU_finish(void)
Definition: gpu_state.cc:296
void GPU_stencil_reference_set(uint reference)
Definition: gpu_state.cc:197
eGPUBlend GPU_blend_get(void)
Definition: gpu_state.cc:218
eGPUFaceCullTest GPU_face_culling_get(void)
Definition: gpu_state.cc:49
eGPUStencilTest GPU_stencil_test_get(void)
Definition: gpu_state.cc:242
bool GPU_depth_mask_get(void)
Definition: gpu_state.cc:273
eGPUDepthTest
Definition: GPU_state.h:82
@ GPU_DEPTH_ALWAYS
Definition: GPU_state.h:84
eGPUDepthTest GPU_depth_test_get(void)
Definition: gpu_state.cc:236
eGPUStencilTest
Definition: GPU_state.h:92
@ GPU_STENCIL_ALWAYS
Definition: GPU_state.h:94
void GPU_depth_test(eGPUDepthTest test)
Definition: gpu_state.cc:65
struct GPUTexture GPUTexture
Definition: GPU_texture.h:17
eGPUDataFormat
Definition: GPU_texture.h:170
@ GPU_DATA_UINT_24_8
Definition: GPU_texture.h:175
@ GPU_DATA_INT
Definition: GPU_texture.h:172
@ GPU_DATA_10_11_11_REV
Definition: GPU_texture.h:176
@ GPU_DATA_UINT
Definition: GPU_texture.h:173
@ GPU_DATA_FLOAT
Definition: GPU_texture.h:171
@ GPU_TEXTURE_USAGE_SHADER_READ
Definition: GPU_texture.h:182
@ GPU_TEXTURE_USAGE_ATTACHMENT
Definition: GPU_texture.h:184
eGPUTextureFormat
Definition: GPU_texture.h:83
@ GPU_DEPTH32F_STENCIL8
Definition: GPU_texture.h:119
@ GPU_SRGB8_A8
Definition: GPU_texture.h:121
@ GPU_DEPTH24_STENCIL8
Definition: GPU_texture.h:120
@ GPU_DEPTH_COMPONENT24
Definition: GPU_texture.h:166
@ GPU_DEPTH_COMPONENT32F
Definition: GPU_texture.h:165
@ GPU_DEPTH_COMPONENT16
Definition: GPU_texture.h:167
@ GPU_R11F_G11F_B10F
Definition: GPU_texture.h:118
struct GPUVertBuf GPUVertBuf
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Texture
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition: btQuadWord.h:119
id< MTLComputeCommandEncoder > ensure_begin_compute_encoder()
id< MTLBlitCommandEncoder > ensure_begin_blit_encoder()
MTLContextGlobalShaderPipelineState pipeline_state
Definition: mtl_context.hh:600
id< MTLDevice > device
Definition: mtl_context.hh:604
MTLScratchBufferManager & get_scratchbuffer_manager()
Definition: mtl_context.hh:708
MTLCommandBufferManager main_command_buffer
Definition: mtl_context.hh:611
static MTLContext * get()
Definition: mtl_context.hh:629
MTLTemporaryBuffer scratch_buffer_allocate_range_aligned(uint64_t alloc_size, uint alignment)
Definition: mtl_memory.mm:662
uint gl_bindcode_get() const override
void swizzle_set(const char swizzle_mask[4]) override
MTLTexture(const char *name)
Definition: mtl_texture.mm:67
void generate_mipmap() override
Definition: mtl_texture.mm:901
void mip_range_set(int min, int max) override
void * read(int mip, eGPUDataFormat type) override
void update_sub(int mip, int offset[3], int extent[3], eGPUDataFormat type, const void *data) override
Definition: mtl_texture.mm:396
bool init_internal() override
void clear(eGPUDataFormat format, const void *data) override
void copy_to(Texture *dst) override
Definition: mtl_texture.mm:945
virtual void texture_unbind(Texture *tex)=0
eGPUTextureFormat format_
bool init_2D(int w, int h, int layers, int mip_len, eGPUTextureFormat format)
Definition: gpu_texture.cc:70
CCL_NAMESPACE_BEGIN struct Options options
const char * label
SyclQueue void void * src
SyclQueue void * dest
GPUBatch * quad
BLI_INLINE float fb(float length, float L)
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
ccl_global float * buffer
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
ccl_gpu_kernel_postfix ccl_global float int int int int float bool reset
clear internal cached data and reset random seed
format
Definition: logImageCore.h:38
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
#define G(x, y, z)
#define MTL_LOG_INFO(info,...)
Definition: mtl_debug.hh:47
#define MTL_LOG_WARNING(info,...)
Definition: mtl_debug.hh:36
#define MTL_LOG_ERROR(info,...)
Definition: mtl_debug.hh:24
MTLPixelFormat gpu_texture_format_to_metal(eGPUTextureFormat tex_format)
std::string tex_data_format_to_msl_type_str(eGPUDataFormat type)
Definition: mtl_texture.hh:427
bool validate_data_format_mtl(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
static GPUContext * wrap(Context *ctx)
std::string tex_data_format_to_msl_texture_template_type(eGPUDataFormat type)
Definition: mtl_texture.hh:452
static Context * unwrap(GPUContext *ctx)
MTLPixelFormat mtl_format_get_writeable_view_format(MTLPixelFormat format)
Definition: mtl_texture.hh:497
static MTLTextureSwizzle swizzle_to_mtl(const char swizzle)
int get_mtl_format_num_components(MTLPixelFormat tex_format)
eGPUDataFormat to_mtl_internal_data_format(eGPUTextureFormat tex_format)
Definition: mtl_texture.hh:531
static size_t to_bytesize(GPUIndexBufType type)
int to_component_len(eGPUTextureFormat format)
int get_mtl_format_bytesize(MTLPixelFormat tex_format)
T length(const vec_base< T, Size > &a)
T floor(const T &a)
#define min(a, b)
Definition: sort.c:35
unsigned char uint8_t
Definition: stdint.h:78
unsigned __int64 uint64_t
Definition: stdint.h:90
id< MTLBuffer > metal_buffer
Definition: mtl_memory.hh:183
float max