Blender  V3.3
mtl_command_buffer.mm
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #include "DNA_userdef_types.h"
4 
5 #include "mtl_backend.hh"
6 #include "mtl_common.hh"
7 #include "mtl_context.hh"
8 #include "mtl_debug.hh"
9 #include "mtl_framebuffer.hh"
10 
11 #include <fstream>
12 
13 using namespace blender;
14 using namespace blender::gpu;
15 
16 namespace blender::gpu {
17 
18 /* Global sync event used across MTLContext's.
19  * This resolves flickering artifacts from command buffer
20  * dependencies not being honored for work submitted between
21  * different GPUContext's. */
22 id<MTLEvent> MTLCommandBufferManager::sync_event = nil;
24 
25 /* Counter for active command buffers. */
27 
28 /* -------------------------------------------------------------------- */
32 void MTLCommandBufferManager::prepare(bool supports_render)
33 {
34  render_pass_state_.reset_state();
35 }
36 
37 void MTLCommandBufferManager::register_encoder_counters()
38 {
39  encoder_count_++;
40  empty_ = false;
41 }
42 
43 id<MTLCommandBuffer> MTLCommandBufferManager::ensure_begin()
44 {
45  if (active_command_buffer_ == nil) {
46 
47  /* Verify number of active command buffers is below limit.
48  * Exceeding this limit will mean we either have a leak/GPU hang
49  * or we should increase the command buffer limit during MTLQueue creation */
51 
52  if (G.debug & G_DEBUG_GPU) {
53  /* Debug: Enable Advanced Errors for GPU work execution. */
54  MTLCommandBufferDescriptor *desc = [[MTLCommandBufferDescriptor alloc] init];
55  desc.errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
56  desc.retainedReferences = YES;
57  active_command_buffer_ = [context_.queue commandBufferWithDescriptor:desc];
58  }
59  else {
60  active_command_buffer_ = [context_.queue commandBuffer];
61  }
62  [active_command_buffer_ retain];
64 
65  /* Ensure command buffers execute in submission order across multiple MTLContext's. */
66  if (this->sync_event != nil) {
67  [active_command_buffer_ encodeWaitForEvent:this->sync_event value:this->event_signal_val];
68  }
69 
70  /* Ensure we begin new Scratch Buffer if we are on a new frame. */
73 
74  /* Reset Command buffer heuristics. */
75  this->reset_counters();
76  }
77  BLI_assert(active_command_buffer_ != nil);
78  return active_command_buffer_;
79 }
80 
81 /* If wait is true, CPU will stall until GPU work has completed. */
83 {
84  /* Skip submission if command buffer is empty. */
85  if (empty_ || active_command_buffer_ == nil) {
86  return false;
87  }
88 
89  /* Ensure current encoders are finished. */
91  BLI_assert(active_command_encoder_type_ == MTL_NO_COMMAND_ENCODER);
92 
93  /* Flush active ScratchBuffer associated with parent MTLContext. */
95 
96  /*** Submit Command Buffer. ***/
97  /* Strict ordering ensures command buffers are guaranteed to execute after a previous
98  * one has completed. Resolves flickering when command buffers are submitted from
99  * different MTLContext's. */
101  MTLCommandBufferManager::sync_event = [context_.device newEvent];
104  }
107 
108  [active_command_buffer_ encodeSignalEvent:MTLCommandBufferManager::sync_event
110 
111  /* Command buffer lifetime tracking. */
112  /* Increment current MTLSafeFreeList reference counter to flag MTLBuffers freed within
113  * the current command buffer lifetime as used.
114  * This ensures that in-use resources are not prematurely de-referenced and returned to the
115  * available buffer pool while they are in-use by the GPU. */
116  MTLSafeFreeList *cmd_free_buffer_list =
118  BLI_assert(cmd_free_buffer_list);
119  cmd_free_buffer_list->increment_reference();
120 
121  id<MTLCommandBuffer> cmd_buffer_ref = active_command_buffer_;
122  [cmd_buffer_ref retain];
123 
124  [cmd_buffer_ref addCompletedHandler:^(id<MTLCommandBuffer> cb) {
125  /* Upon command buffer completion, decrement MTLSafeFreeList reference count
126  * to allow buffers no longer in use by this CommandBuffer to be freed. */
127  cmd_free_buffer_list->decrement_reference();
128 
129  /* Release command buffer after completion callback handled. */
130  [cmd_buffer_ref release];
131 
132  /* Decrement count. */
134  }];
135 
136  /* Submit command buffer to GPU. */
137  [active_command_buffer_ commit];
138 
139  if (wait || (G.debug & G_DEBUG_GPU)) {
140  /* Wait until current GPU work has finished executing. */
141  [active_command_buffer_ waitUntilCompleted];
142 
143  /* Command buffer execution debugging can return an error message if
144  * execution has failed or encountered GPU-side errors. */
145  if (G.debug & G_DEBUG_GPU) {
146 
147  NSError *error = [active_command_buffer_ error];
148  if (error != nil) {
149  NSLog(@"%@", error);
150  BLI_assert(false);
151 
152  @autoreleasepool {
153  const char *stringAsChar = [[NSString stringWithFormat:@"%@", error] UTF8String];
154 
155  std::ofstream outfile;
156  outfile.open("command_buffer_error.txt", std::fstream::out | std::fstream::app);
157  outfile << stringAsChar;
158  outfile.close();
159  }
160  }
161  }
162  }
163 
164  /* Release previous frames command buffer and reset active cmd buffer. */
165  if (last_submitted_command_buffer_ != nil) {
166 
167  BLI_assert(MTLBackend::get()->is_inside_render_boundary());
168  [last_submitted_command_buffer_ autorelease];
169  last_submitted_command_buffer_ = nil;
170  }
171  last_submitted_command_buffer_ = active_command_buffer_;
172  active_command_buffer_ = nil;
173 
174  return true;
175 }
176 
179 /* -------------------------------------------------------------------- */
183 /* Fetch/query current encoder. */
185 {
186  return (active_command_encoder_type_ == MTL_RENDER_COMMAND_ENCODER);
187 }
188 
190 {
191  return (active_command_encoder_type_ == MTL_BLIT_COMMAND_ENCODER);
192 }
193 
195 {
196  return (active_command_encoder_type_ == MTL_COMPUTE_COMMAND_ENCODER);
197 }
198 
200 {
201  /* Calling code should check if inside render pass. Otherwise nil. */
202  return active_render_command_encoder_;
203 }
204 
206 {
207  /* Calling code should check if inside render pass. Otherwise nil. */
208  return active_blit_command_encoder_;
209 }
210 
212 {
213  /* Calling code should check if inside render pass. Otherwise nil. */
214  return active_compute_command_encoder_;
215 }
216 
218 {
219  /* If outside of RenderPass, nullptr will be returned. */
220  if (this->is_inside_render_pass()) {
221  return active_frame_buffer_;
222  }
223  return nullptr;
224 }
225 
226 /* Encoder and Pass management. */
227 /* End currently active MTLCommandEncoder. */
229 {
230 
231  /* End active encoder if one is active. */
232  if (active_command_encoder_type_ != MTL_NO_COMMAND_ENCODER) {
233 
234  switch (active_command_encoder_type_) {
235  case MTL_RENDER_COMMAND_ENCODER: {
236  /* Verify a RenderCommandEncoder is active and end. */
237  BLI_assert(active_render_command_encoder_ != nil);
238 
239  /* Complete Encoding. */
240  [active_render_command_encoder_ endEncoding];
241  [active_render_command_encoder_ release];
242  active_render_command_encoder_ = nil;
243  active_command_encoder_type_ = MTL_NO_COMMAND_ENCODER;
244 
245  /* Reset associated framebuffer flag. */
246  active_frame_buffer_ = nullptr;
247  active_pass_descriptor_ = nullptr;
248  return true;
249  }
250 
251  case MTL_BLIT_COMMAND_ENCODER: {
252  /* Verify a RenderCommandEncoder is active and end. */
253  BLI_assert(active_blit_command_encoder_ != nil);
254  [active_blit_command_encoder_ endEncoding];
255  [active_blit_command_encoder_ release];
256  active_blit_command_encoder_ = nil;
257  active_command_encoder_type_ = MTL_NO_COMMAND_ENCODER;
258  return true;
259  }
260 
261  case MTL_COMPUTE_COMMAND_ENCODER: {
262  /* Verify a RenderCommandEncoder is active and end. */
263  BLI_assert(active_compute_command_encoder_ != nil);
264  [active_compute_command_encoder_ endEncoding];
265  [active_compute_command_encoder_ release];
266  active_compute_command_encoder_ = nil;
267  active_command_encoder_type_ = MTL_NO_COMMAND_ENCODER;
268  return true;
269  }
270 
271  default: {
272  BLI_assert(false && "Invalid command encoder type");
273  return false;
274  }
275  };
276  }
277  else {
278  /* MTL_NO_COMMAND_ENCODER. */
279  BLI_assert(active_render_command_encoder_ == nil);
280  BLI_assert(active_blit_command_encoder_ == nil);
281  BLI_assert(active_compute_command_encoder_ == nil);
282  return false;
283  }
284 }
285 
287  MTLFrameBuffer *ctx_framebuffer, bool force_begin, bool *new_pass)
288 {
289  /* Ensure valid framebuffer. */
290  BLI_assert(ctx_framebuffer != nullptr);
291 
292  /* Ensure active command buffer. */
293  id<MTLCommandBuffer> cmd_buf = this->ensure_begin();
294  BLI_assert(cmd_buf);
295 
296  /* Begin new command encoder if the currently active one is
297  * incompatible or requires updating. */
298  if (active_command_encoder_type_ != MTL_RENDER_COMMAND_ENCODER ||
299  active_frame_buffer_ != ctx_framebuffer || force_begin) {
301 
302  /* Determine if this is a re-bind of the same framebuffer. */
303  bool is_rebind = (active_frame_buffer_ == ctx_framebuffer);
304 
305  /* Generate RenderPassDescriptor from bound framebuffer. */
306  BLI_assert(ctx_framebuffer);
307  active_frame_buffer_ = ctx_framebuffer;
308  active_pass_descriptor_ = active_frame_buffer_->bake_render_pass_descriptor(
309  is_rebind && (!active_frame_buffer_->get_pending_clear()));
310 
311  /* Determine if there is a visibility buffer assigned to the context. */
312  gpu::MTLBuffer *visibility_buffer = context_.get_visibility_buffer();
313  this->active_pass_descriptor_.visibilityResultBuffer =
314  (visibility_buffer) ? visibility_buffer->get_metal_buffer() : nil;
315  context_.clear_visibility_dirty();
316 
317  /* Ensure we have already cleaned up our previous render command encoder. */
318  BLI_assert(active_render_command_encoder_ == nil);
319 
320  /* Create new RenderCommandEncoder based on descriptor (and begin encoding). */
321  active_render_command_encoder_ = [cmd_buf
322  renderCommandEncoderWithDescriptor:active_pass_descriptor_];
323  [active_render_command_encoder_ retain];
324  active_command_encoder_type_ = MTL_RENDER_COMMAND_ENCODER;
325 
326  /* Update command buffer encoder heuristics. */
327  this->register_encoder_counters();
328 
329  /* Apply initial state. */
330  /* Update Viewport and Scissor State */
331  active_frame_buffer_->apply_state();
332 
333  /* FLAG FRAMEBUFFER AS CLEARED -- A clear only lasts as long as one has been specified.
334  * After this, resets to Load attachments to parallel GL behavior. */
335  active_frame_buffer_->mark_cleared();
336 
337  /* Reset RenderPassState to ensure resource bindings are re-applied. */
338  render_pass_state_.reset_state();
339 
340  /* Return true as new pass started. */
341  *new_pass = true;
342  }
343  else {
344  /* No new pass. */
345  *new_pass = false;
346  }
347 
348  BLI_assert(active_render_command_encoder_ != nil);
349  return active_render_command_encoder_;
350 }
351 
353 {
354  /* Ensure active command buffer. */
355  id<MTLCommandBuffer> cmd_buf = this->ensure_begin();
356  BLI_assert(cmd_buf);
357 
358  /* Ensure no existing command encoder of a different type is active. */
359  if (active_command_encoder_type_ != MTL_BLIT_COMMAND_ENCODER) {
361  }
362 
363  /* Begin new Blit Encoder. */
364  if (active_blit_command_encoder_ == nil) {
365  active_blit_command_encoder_ = [cmd_buf blitCommandEncoder];
366  BLI_assert(active_blit_command_encoder_ != nil);
367  [active_blit_command_encoder_ retain];
368  active_command_encoder_type_ = MTL_BLIT_COMMAND_ENCODER;
369 
370  /* Update command buffer encoder heuristics. */
371  this->register_encoder_counters();
372  }
373  BLI_assert(active_blit_command_encoder_ != nil);
374  return active_blit_command_encoder_;
375 }
376 
378 {
379  /* Ensure active command buffer. */
380  id<MTLCommandBuffer> cmd_buf = this->ensure_begin();
381  BLI_assert(cmd_buf);
382 
383  /* Ensure no existing command encoder of a different type is active. */
384  if (active_command_encoder_type_ != MTL_COMPUTE_COMMAND_ENCODER) {
386  }
387 
388  /* Begin new Compute Encoder. */
389  if (active_compute_command_encoder_ == nil) {
390  active_compute_command_encoder_ = [cmd_buf computeCommandEncoder];
391  BLI_assert(active_compute_command_encoder_ != nil);
392  [active_compute_command_encoder_ retain];
393  active_command_encoder_type_ = MTL_COMPUTE_COMMAND_ENCODER;
394 
395  /* Update command buffer encoder heuristics. */
396  this->register_encoder_counters();
397  }
398  BLI_assert(active_compute_command_encoder_ != nil);
399  return active_compute_command_encoder_;
400 }
401 
404 /* -------------------------------------------------------------------- */
408 /* Rendering Heuristics. */
410 {
411  current_draw_call_count_++;
412  vertex_submitted_count_ += vertex_submission;
413  empty_ = false;
414 }
415 
416 /* Reset workload counters. */
418 {
419  empty_ = true;
420  current_draw_call_count_ = 0;
421  encoder_count_ = 0;
422  vertex_submitted_count_ = 0;
423 }
424 
425 /* Workload evaluation. */
427 {
428  /* Skip if no active command buffer. */
429  if (active_command_buffer_ == nil) {
430  return false;
431  }
432 
433  /* Use optimized heuristic to split heavy command buffer submissions to better saturate the
434  * hardware and also reduce stalling from individual large submissions. */
437  return ((current_draw_call_count_ > 30000) || (vertex_submitted_count_ > 100000000) ||
438  (encoder_count_ > 25));
439  }
440  else {
441  /* Apple Silicon is less efficient if splitting submissions. */
442  return false;
443  }
444 }
445 
448 /* -------------------------------------------------------------------- */
452 /* Debug. */
453 void MTLCommandBufferManager::push_debug_group(const char *name, int index)
454 {
455  id<MTLCommandBuffer> cmd = this->ensure_begin();
456  if (cmd != nil) {
457  [cmd pushDebugGroup:[NSString stringWithFormat:@"%s_%d", name, index]];
458  }
459 }
460 
462 {
463  id<MTLCommandBuffer> cmd = this->ensure_begin();
464  if (cmd != nil) {
465  [cmd popDebugGroup];
466  }
467 }
468 
469 /* Workload Synchronization. */
471  eGPUStageBarrierBits before_stages,
472  eGPUStageBarrierBits after_stages)
473 {
474  /* Only supporting Metal on 10.14 onward anyway - Check required for warnings. */
475  if (@available(macOS 10.14, *)) {
476 
477  /* Resolve scope. */
478  MTLBarrierScope scope = 0;
479  if (barrier_bits & GPU_BARRIER_SHADER_IMAGE_ACCESS ||
480  barrier_bits & GPU_BARRIER_TEXTURE_FETCH) {
481  scope = scope | MTLBarrierScopeTextures | MTLBarrierScopeRenderTargets;
482  }
483  if (barrier_bits & GPU_BARRIER_SHADER_STORAGE ||
484  barrier_bits & GPU_BARRIER_VERTEX_ATTRIB_ARRAY ||
485  barrier_bits & GPU_BARRIER_ELEMENT_ARRAY) {
486  scope = scope | MTLBarrierScopeBuffers;
487  }
488 
489  if (scope != 0) {
490  /* Issue barrier based on encoder. */
491  switch (active_command_encoder_type_) {
492  case MTL_NO_COMMAND_ENCODER:
493  case MTL_BLIT_COMMAND_ENCODER: {
494  /* No barrier to be inserted. */
495  return false;
496  }
497 
498  /* Rendering. */
499  case MTL_RENDER_COMMAND_ENCODER: {
500  /* Currently flagging both stages -- can use bits above to filter on stage type --
501  * though full barrier is safe for now*/
502  MTLRenderStages before_stage_flags = 0;
503  MTLRenderStages after_stage_flags = 0;
504  if (before_stages & GPU_BARRIER_STAGE_VERTEX &&
505  !(before_stages & GPU_BARRIER_STAGE_FRAGMENT)) {
506  before_stage_flags = before_stage_flags | MTLRenderStageVertex;
507  }
508  if (before_stages & GPU_BARRIER_STAGE_FRAGMENT) {
509  before_stage_flags = before_stage_flags | MTLRenderStageFragment;
510  }
511  if (after_stages & GPU_BARRIER_STAGE_VERTEX) {
512  after_stage_flags = after_stage_flags | MTLRenderStageVertex;
513  }
514  if (after_stages & GPU_BARRIER_STAGE_FRAGMENT) {
515  after_stage_flags = MTLRenderStageFragment;
516  }
517 
518  id<MTLRenderCommandEncoder> rec = this->get_active_render_command_encoder();
519  BLI_assert(rec != nil);
520  [rec memoryBarrierWithScope:scope
521  afterStages:after_stage_flags
522  beforeStages:before_stage_flags];
523  return true;
524  }
525 
526  /* Compute. */
527  case MTL_COMPUTE_COMMAND_ENCODER: {
528  id<MTLComputeCommandEncoder> rec = this->get_active_compute_command_encoder();
529  BLI_assert(rec != nil);
530  [rec memoryBarrierWithScope:scope];
531  return true;
532  }
533  }
534  }
535  }
536  /* No barrier support. */
537  return false;
538 }
539 
542 /* -------------------------------------------------------------------- */
545 /* Reset binding state when a new RenderCommandEncoder is bound, to ensure
546  * pipeline resources are re-applied to the new Encoder.
547  * NOTE: In Metal, state is only persistent within an MTLCommandEncoder,
548  * not globally. */
550 {
551  /* Reset Cached pipeline state. */
552  this->bound_pso = nil;
553  this->bound_ds_state = nil;
554 
555  /* Clear shader binding. */
556  this->last_bound_shader_state.set(nullptr, 0);
557 
558  /* Other states. */
560  this->last_used_stencil_ref_value = 0;
561  this->last_scissor_rect = {0,
562  0,
563  (uint)((fb != nullptr) ? fb->get_width() : 0),
564  (uint)((fb != nullptr) ? fb->get_height() : 0)};
565 
566  /* Reset cached resource binding state */
567  for (int ubo = 0; ubo < MTL_MAX_UNIFORM_BUFFER_BINDINGS; ubo++) {
568  this->cached_vertex_buffer_bindings[ubo].is_bytes = false;
570  this->cached_vertex_buffer_bindings[ubo].offset = -1;
571 
572  this->cached_fragment_buffer_bindings[ubo].is_bytes = false;
574  this->cached_fragment_buffer_bindings[ubo].offset = -1;
575  }
576 
577  /* Reset cached texture and sampler state binding state. */
578  for (int tex = 0; tex < MTL_MAX_TEXTURE_SLOTS; tex++) {
582 
586  }
587 }
588 
589 /* Bind Texture to current RenderCommandEncoder. */
591 {
592  if (this->cached_vertex_texture_bindings[slot].metal_texture != tex) {
593  id<MTLRenderCommandEncoder> rec = this->cmd.get_active_render_command_encoder();
594  BLI_assert(rec != nil);
595  [rec setVertexTexture:tex atIndex:slot];
597  }
598 }
599 
601 {
602  if (this->cached_fragment_texture_bindings[slot].metal_texture != tex) {
603  id<MTLRenderCommandEncoder> rec = this->cmd.get_active_render_command_encoder();
604  BLI_assert(rec != nil);
605  [rec setFragmentTexture:tex atIndex:slot];
607  }
608 }
609 
611  bool use_argument_buffer_for_samplers,
612  uint slot)
613 {
614  /* TODO(Metal): Implement RenderCommandEncoder vertex sampler binding utility. This will be
615  * implemented alongside MTLShader. */
616 }
617 
619  bool use_argument_buffer_for_samplers,
620  uint slot)
621 {
622  /* TODO(Metal): Implement RenderCommandEncoder fragment sampler binding utility. This will be
623  * implemented alongside MTLShader. */
624 }
625 
626 void MTLRenderPassState::bind_vertex_buffer(id<MTLBuffer> buffer, uint buffer_offset, uint index)
627 {
628  /* TODO(Metal): Implement RenderCommandEncoder vertex buffer binding utility. This will be
629  * implemented alongside the full MTLMemoryManager. */
630 }
631 
632 void MTLRenderPassState::bind_fragment_buffer(id<MTLBuffer> buffer, uint buffer_offset, uint index)
633 {
634  /* TODO(Metal): Implement RenderCommandEncoder fragment buffer binding utility. This will be
635  * implemented alongside the full MTLMemoryManager. */
636 }
637 
639 {
640  /* TODO(Metal): Implement RenderCommandEncoder vertex bytes binding utility. This will be
641  * implemented alongside the full MTLMemoryManager. */
642 }
643 
645 {
646  /* TODO(Metal): Implement RenderCommandEncoder fragment bytes binding utility. This will be
647  * implemented alongside the full MTLMemoryManager. */
648 }
649 
652 } // blender::gpu
@ G_DEBUG_GPU
Definition: BKE_global.h:193
#define BLI_assert(a)
Definition: BLI_assert.h:46
unsigned int uint
Definition: BLI_sys_types.h:67
@ GPU_DRIVER_ANY
Definition: GPU_platform.h:47
@ GPU_OS_ANY
Definition: GPU_platform.h:40
@ GPU_DEVICE_ATI
Definition: GPU_platform.h:25
@ GPU_DEVICE_INTEL
Definition: GPU_platform.h:26
bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
eGPUStageBarrierBits
Definition: GPU_state.h:40
@ GPU_BARRIER_STAGE_FRAGMENT
Definition: GPU_state.h:42
@ GPU_BARRIER_STAGE_VERTEX
Definition: GPU_state.h:41
eGPUBarrier
Definition: GPU_state.h:24
@ GPU_BARRIER_SHADER_STORAGE
Definition: GPU_state.h:29
@ GPU_BARRIER_TEXTURE_FETCH
Definition: GPU_state.h:30
@ GPU_BARRIER_ELEMENT_ARRAY
Definition: GPU_state.h:33
@ GPU_BARRIER_SHADER_IMAGE_ACCESS
Definition: GPU_state.h:28
@ GPU_BARRIER_VERTEX_ATTRIB_ARRAY
Definition: GPU_state.h:32
static MTLBackend * get()
Definition: mtl_backend.hh:49
MTLSafeFreeList * get_current_safe_list()
Definition: mtl_memory.mm:325
id< MTLBuffer > get_metal_buffer() const
Definition: mtl_memory.mm:515
id< MTLComputeCommandEncoder > get_active_compute_command_encoder()
void register_draw_counters(int vertex_submission)
void prepare(bool supports_render=true)
id< MTLRenderCommandEncoder > ensure_begin_render_command_encoder(MTLFrameBuffer *ctx_framebuffer, bool force_begin, bool *new_pass)
id< MTLComputeCommandEncoder > ensure_begin_compute_encoder()
bool insert_memory_barrier(eGPUBarrier barrier_bits, eGPUStageBarrierBits before_stages, eGPUStageBarrierBits after_stages)
id< MTLBlitCommandEncoder > ensure_begin_blit_encoder()
void push_debug_group(const char *name, int index)
id< MTLRenderCommandEncoder > get_active_render_command_encoder()
id< MTLBlitCommandEncoder > get_active_blit_command_encoder()
gpu::MTLBuffer * get_visibility_buffer() const
Definition: mtl_context.mm:397
MTLScratchBufferManager memory_manager
Definition: mtl_context.hh:607
id< MTLDevice > device
Definition: mtl_context.hh:604
id< MTLCommandQueue > queue
Definition: mtl_context.hh:603
static MTLBufferPool & get_global_memory_manager()
Definition: mtl_context.hh:713
MTLRenderPassDescriptor * bake_render_pass_descriptor(bool load_contents)
BLI_INLINE float fb(float length, float L)
ccl_global float * buffer
#define G(x, y, z)
static void error(const char *str)
Definition: meshlaplacian.c:51
#define MTL_MAX_TEXTURE_SLOTS
#define MTL_MAX_UNIFORM_BUFFER_BINDINGS
#define MTL_MAX_COMMAND_BUFFERS
Definition: mtl_common.hh:10
static int sampler_binding(int32_t program, uint32_t uniform_index, int32_t uniform_location, int *sampler_len)
T length(const vec_base< T, Size > &a)
static const pxr::TfToken out("out", pxr::TfToken::Immortal)
unsigned __int64 uint64_t
Definition: stdint.h:90
void set(MTLShader *shader, uint pso_index)
Definition: mtl_context.hh:77
void bind_fragment_bytes(void *bytes, uint length, uint index)
void bind_fragment_sampler(MTLSamplerBinding &sampler_binding, bool use_argument_buffer_for_samplers, uint slot)
BufferBindingCached cached_vertex_buffer_bindings[MTL_MAX_UNIFORM_BUFFER_BINDINGS]
Definition: mtl_context.hh:99
SamplerStateBindingCached cached_fragment_sampler_state_bindings[MTL_MAX_TEXTURE_SLOTS]
Definition: mtl_context.hh:118
void bind_vertex_sampler(MTLSamplerBinding &sampler_binding, bool use_argument_buffer_for_samplers, uint slot)
void bind_vertex_texture(id< MTLTexture > tex, uint slot)
void bind_vertex_bytes(void *bytes, uint length, uint index)
BufferBindingCached cached_fragment_buffer_bindings[MTL_MAX_UNIFORM_BUFFER_BINDINGS]
Definition: mtl_context.hh:100
void bind_vertex_buffer(id< MTLBuffer > buffer, uint buffer_offset, uint index)
id< MTLDepthStencilState > bound_ds_state
Definition: mtl_context.hh:86
TextureBindingCached cached_vertex_texture_bindings[MTL_MAX_TEXTURE_SLOTS]
Definition: mtl_context.hh:107
SamplerStateBindingCached cached_vertex_sampler_state_bindings[MTL_MAX_TEXTURE_SLOTS]
Definition: mtl_context.hh:117
MTLCommandBufferManager & cmd
Definition: mtl_context.hh:69
void bind_fragment_buffer(id< MTLBuffer > buffer, uint buffer_offset, uint index)
id< MTLRenderPipelineState > bound_pso
Definition: mtl_context.hh:85
TextureBindingCached cached_fragment_texture_bindings[MTL_MAX_TEXTURE_SLOTS]
Definition: mtl_context.hh:108
MTLBoundShaderState last_bound_shader_state
Definition: mtl_context.hh:84
void bind_fragment_texture(id< MTLTexture > tex, uint slot)