25 void MetalDevice::set_error(
const string &
error)
28 std::lock_guard<std::mutex>
lock(s_error_mutex);
33 fprintf(stderr,
"\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
35 "https://docs.blender.org/manual/en/latest/render/cycles/gpu_rendering.html\n\n");
41 :
Device(info, stats, profiler), texture_info(this,
"texture_info",
MEM_GLOBAL)
46 auto usable_devices = MetalInfo::get_usable_devices();
47 assert(mtlDevId < usable_devices.size());
48 mtlDevice = usable_devices[mtlDevId];
49 device_vendor = MetalInfo::get_device_vendor(mtlDevice);
50 assert(device_vendor != METAL_GPU_UNKNOWN);
51 metal_printf(
"Creating new Cycles device for Metal: %s\n", info.
description.c_str());
55 default_storage_mode = MTLResourceStorageModeManaged;
57 if (@available(macos 11.0, *)) {
58 if ([mtlDevice hasUnifiedMemory]) {
59 default_storage_mode = MTLResourceStorageModeShared;
64 texture_bindings_2d = [mtlDevice newBufferWithLength:4096
options:default_storage_mode];
65 texture_bindings_3d = [mtlDevice newBufferWithLength:4096
options:default_storage_mode];
67 stats.
mem_alloc(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
69 switch (device_vendor) {
72 case METAL_GPU_INTEL: {
73 max_threads_per_threadgroup = 64;
77 max_threads_per_threadgroup = 128;
80 case METAL_GPU_APPLE: {
81 max_threads_per_threadgroup = 512;
86 kernel_specialization_level = PSO_SPECIALIZED_INTERSECT;
91 if (
auto metalrt = getenv(
"CYCLES_METALRT")) {
92 use_metalrt = (atoi(metalrt) != 0);
95 if (getenv(
"CYCLES_DEBUG_METAL_CAPTURE_KERNEL")) {
96 capture_enabled =
true;
99 if (
auto envstr = getenv(
"CYCLES_METAL_SPECIALIZATION_LEVEL")) {
100 kernel_specialization_level = (MetalPipelineType)atoi(envstr);
102 metal_printf(
"kernel_specialization_level = %s\n",
103 kernel_type_as_string(
104 (MetalPipelineType)
min((
int)kernel_specialization_level, (
int)PSO_NUM - 1)));
106 MTLArgumentDescriptor *arg_desc_params = [[MTLArgumentDescriptor alloc]
init];
107 arg_desc_params.dataType = MTLDataTypePointer;
108 arg_desc_params.access = MTLArgumentAccessReadOnly;
110 mtlBufferKernelParamsEncoder = [mtlDevice newArgumentEncoderWithArguments:@[ arg_desc_params ]];
112 MTLArgumentDescriptor *arg_desc_texture = [[MTLArgumentDescriptor alloc]
init];
113 arg_desc_texture.dataType = MTLDataTypeTexture;
114 arg_desc_texture.access = MTLArgumentAccessReadOnly;
115 mtlTextureArgEncoder = [mtlDevice newArgumentEncoderWithArguments:@[ arg_desc_texture ]];
118 mtlGeneralCommandQueue = [mtlDevice newCommandQueue];
121 if (@available(macos 12.0, *)) {
123 MTLArgumentDescriptor *arg_desc_as = [[MTLArgumentDescriptor alloc]
init];
124 arg_desc_as.dataType = MTLDataTypeInstanceAccelerationStructure;
125 arg_desc_as.access = MTLArgumentAccessReadOnly;
126 mtlASArgEncoder = [mtlDevice newArgumentEncoderWithArguments:@[ arg_desc_as ]];
127 [arg_desc_as release];
133 NSMutableArray *ancillary_desc = [[NSMutableArray alloc]
init];
136 MTLArgumentDescriptor *arg_desc_tex = [[MTLArgumentDescriptor alloc]
init];
137 arg_desc_tex.dataType = MTLDataTypePointer;
138 arg_desc_tex.access = MTLArgumentAccessReadOnly;
140 arg_desc_tex.index = index++;
141 [ancillary_desc addObject:[arg_desc_tex
copy]];
142 arg_desc_tex.index = index++;
143 [ancillary_desc addObject:[arg_desc_tex
copy]];
145 [arg_desc_tex release];
147 if (@available(macos 12.0, *)) {
149 MTLArgumentDescriptor *arg_desc_as = [[MTLArgumentDescriptor alloc]
init];
150 arg_desc_as.dataType = MTLDataTypeInstanceAccelerationStructure;
151 arg_desc_as.access = MTLArgumentAccessReadOnly;
153 MTLArgumentDescriptor *arg_desc_ift = [[MTLArgumentDescriptor alloc]
init];
154 arg_desc_ift.dataType = MTLDataTypeIntersectionFunctionTable;
155 arg_desc_ift.access = MTLArgumentAccessReadOnly;
157 arg_desc_as.index = index++;
158 [ancillary_desc addObject:[arg_desc_as
copy]];
159 arg_desc_ift.index = index++;
160 [ancillary_desc addObject:[arg_desc_ift
copy]];
161 arg_desc_ift.index = index++;
162 [ancillary_desc addObject:[arg_desc_ift
copy]];
163 arg_desc_ift.index = index++;
164 [ancillary_desc addObject:[arg_desc_ift
copy]];
166 [arg_desc_ift release];
167 [arg_desc_as release];
171 mtlAncillaryArgEncoder = [mtlDevice newArgumentEncoderWithArguments:ancillary_desc];
173 for (
int i = 0; i < ancillary_desc.count; i++) {
174 [ancillary_desc[i] release];
176 [ancillary_desc release];
178 [arg_desc_params release];
179 [arg_desc_texture release];
182 MetalDevice::~MetalDevice()
184 for (
auto &
tex : texture_slot_map) {
190 flush_delayed_free_list();
192 if (texture_bindings_2d) {
193 stats.
mem_free(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
195 [texture_bindings_2d release];
196 [texture_bindings_3d release];
198 [mtlTextureArgEncoder release];
199 [mtlBufferKernelParamsEncoder release];
200 [mtlASArgEncoder release];
201 [mtlAncillaryArgEncoder release];
202 [mtlGeneralCommandQueue release];
208 bool MetalDevice::support_device(
const uint kernel_features )
213 bool MetalDevice::check_peer_access(
Device *peer_device)
220 bool MetalDevice::use_adaptive_compilation()
225 void MetalDevice::make_source(MetalPipelineType pso_type,
const uint kernel_features)
227 string global_defines;
228 if (use_adaptive_compilation()) {
229 global_defines +=
"#define __KERNEL_FEATURES__ " +
to_string(kernel_features) +
"\n";
233 global_defines +=
"#define __METALRT__\n";
235 global_defines +=
"#define __METALRT_MOTION__\n";
239 # ifdef WITH_CYCLES_DEBUG
240 global_defines +=
"#define __KERNEL_DEBUG__\n";
243 switch (device_vendor) {
246 case METAL_GPU_INTEL:
247 global_defines +=
"#define __KERNEL_METAL_INTEL__\n";
250 global_defines +=
"#define __KERNEL_METAL_AMD__\n";
252 case METAL_GPU_APPLE:
253 global_defines +=
"#define __KERNEL_METAL_APPLE__\n";
257 string &source = this->source[pso_type];
258 source =
"\n#include \"kernel/device/metal/kernel.metal\"\n";
265 string baked_constants;
271 if (pso_type != PSO_GENERIC) {
272 const double starttime =
time_dt();
274 # define KERNEL_STRUCT_BEGIN(name, parent) \
275 string_replace_same_length(source, "kernel_data." #parent ".", "kernel_data_" #parent "_");
278 # define KERNEL_STRUCT_MEMBER(parent, _type, name) \
279 baked_constants += string(#parent "." #name "=") + \
280 to_string(_type(launch_params.data.parent.name)) + "\n";
289 global_defines +=
"#define __KERNEL_USE_DATA_CONSTANTS__\n";
291 metal_printf(
"KernelData patching took %.1f ms\n", (
time_dt() - starttime) * 1000.0);
294 source = global_defines + source;
295 metal_printf(
"================\n%s================\n\%s================\n",
296 global_defines.c_str(),
297 baked_constants.c_str());
302 md5.
append(baked_constants);
307 source_md5[pso_type] = md5.
get_hex();
310 bool MetalDevice::load_kernels(
const uint _kernel_features)
312 kernel_features = _kernel_features;
315 if (!support_device(kernel_features))
323 bool result = compile_and_load(PSO_GENERIC);
325 reserve_local_memory(kernel_features);
329 bool MetalDevice::compile_and_load(MetalPipelineType pso_type)
331 make_source(pso_type, kernel_features);
333 if (!MetalDeviceKernels::should_load_kernels(
this, pso_type)) {
335 metal_printf(
"%s kernels already requested\n", kernel_type_as_string(pso_type));
339 MTLCompileOptions *
options = [[MTLCompileOptions alloc]
init];
341 # if defined(MAC_OS_VERSION_13_0)
342 if (@available(macos 13.0, *)) {
343 if (device_vendor == METAL_GPU_INTEL) {
344 [
options setOptimizationLevel:MTLLibraryOptimizationLevelSize];
350 if (@available(macOS 12.0, *)) {
351 options.languageVersion = MTLLanguageVersion2_4;
354 if (getenv(
"CYCLES_METAL_PROFILING") || getenv(
"CYCLES_METAL_DEBUG")) {
359 const double starttime =
time_dt();
362 mtlLibrary[pso_type] = [mtlDevice newLibraryWithSource:@(source[pso_type].c_str())
366 if (!mtlLibrary[pso_type]) {
367 NSString *
err = [
error localizedDescription];
368 set_error(
string_printf(
"Failed to compile library:\n%s", [
err UTF8String]));
371 metal_printf(
"Front-end compilation finished in %.1f seconds (%s)\n",
373 kernel_type_as_string(pso_type));
377 return MetalDeviceKernels::load(
this, pso_type);
380 void MetalDevice::reserve_local_memory(
const uint kernel_features)
385 void MetalDevice::init_host_memory()
390 void MetalDevice::load_texture_info()
392 if (need_texture_info) {
394 need_texture_info =
false;
395 texture_info.copy_to_device();
397 int num_textures = texture_info.size();
399 for (
int tex = 0;
tex < num_textures;
tex++) {
402 id<MTLTexture> metal_texture = texture_slot_map[
tex];
403 if (!metal_texture) {
404 [mtlTextureArgEncoder setArgumentBuffer:texture_bindings_2d
offset:
offset];
405 [mtlTextureArgEncoder setTexture:nil atIndex:0];
406 [mtlTextureArgEncoder setArgumentBuffer:texture_bindings_3d
offset:
offset];
407 [mtlTextureArgEncoder setTexture:nil atIndex:0];
410 MTLTextureType
type = metal_texture.textureType;
411 [mtlTextureArgEncoder setArgumentBuffer:texture_bindings_2d
offset:
offset];
412 [mtlTextureArgEncoder setTexture:
type == MTLTextureType2D ? metal_texture : nil atIndex:0];
413 [mtlTextureArgEncoder setArgumentBuffer:texture_bindings_3d
offset:
offset];
414 [mtlTextureArgEncoder setTexture:
type == MTLTextureType3D ? metal_texture : nil atIndex:0];
417 if (default_storage_mode == MTLResourceStorageModeManaged) {
418 [texture_bindings_2d didModifyRange:NSMakeRange(0, num_textures *
sizeof(
void *))];
419 [texture_bindings_3d didModifyRange:NSMakeRange(0, num_textures *
sizeof(
void *))];
430 auto it = metal_mem_map.find(&mem);
431 if (it != metal_mem_map.end()) {
432 MetalMem *mmem = it->second.get();
435 if (mmem->pointer_index >= 0) {
437 pointers[mmem->pointer_index] = 0;
439 metal_mem_map.erase(it);
443 MetalDevice::MetalMem *MetalDevice::generic_alloc(
device_memory &mem)
449 id<MTLBuffer> metal_buffer = nil;
450 MTLResourceOptions
options = default_storage_mode;
454 if (strstr(mem.
name,
"RenderBuffers")) {
455 options = MTLResourceStorageModeManaged;
460 options = MTLResourceStorageModePrivate;
466 set_error(
"System is out of GPU memory");
480 metal_buffer.label = [[NSString alloc] initWithFormat:
@"%s", mem.
name];
482 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
484 assert(metal_mem_map.count(&mem) == 0);
485 MetalMem *mmem =
new MetalMem;
486 metal_mem_map[&mem] = std::unique_ptr<MetalMem>(mmem);
489 mmem->mtlBuffer = metal_buffer;
492 if (
options != MTLResourceStorageModePrivate) {
493 mmem->hostPtr = [metal_buffer contents];
496 mmem->hostPtr =
nullptr;
503 if (metal_buffer.storageMode == MTLResourceStorageModeShared) {
514 mmem->use_UMA =
true;
517 mmem->use_UMA =
false;
529 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
531 MetalMem &mmem = *metal_mem_map.at(&mem);
533 if (mmem.mtlBuffer.storageMode == MTLStorageModeManaged) {
534 [mmem.mtlBuffer didModifyRange:NSMakeRange(0, mem.
memory_size())];
542 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
543 MetalMem &mmem = *metal_mem_map.at(&mem);
544 size_t size = mmem.size;
549 bool free_mtlBuffer =
false;
556 free_mtlBuffer =
true;
561 free_mtlBuffer =
true;
564 if (free_mtlBuffer) {
569 mmem.use_UMA =
false;
575 delayed_free_list.push_back(mmem.mtlBuffer);
576 mmem.mtlBuffer = nil;
579 erase_allocation(mem);
586 assert(!
"mem_alloc not supported for textures.");
610 generic_copy_to(mem);
614 void MetalDevice::mem_copy_from(
device_memory &mem,
size_t y,
size_t w,
size_t h,
size_t elem)
618 bool subcopy = (
w >= 0 && h >= 0);
620 const size_t offset = subcopy ? (elem *
y *
w) : 0;
623 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
624 MetalMem &mmem = *metal_mem_map.at(&mem);
626 if ([mmem.mtlBuffer storageMode] == MTLStorageModeManaged) {
628 id<MTLCommandBuffer> cmdBuffer = [mtlGeneralCommandQueue commandBuffer];
629 id<MTLBlitCommandEncoder> blitEncoder = [cmdBuffer blitCommandEncoder];
630 [blitEncoder synchronizeResource:mmem.mtlBuffer];
631 [blitEncoder endEncoding];
633 [cmdBuffer waitUntilCompleted];
656 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
657 MetalMem &mmem = *metal_mem_map.at(&mem);
658 memset(mmem.hostPtr, 0,
size);
659 if ([mmem.mtlBuffer storageMode] == MTLStorageModeManaged) {
660 [mmem.mtlBuffer didModifyRange:NSMakeRange(0,
size)];
684 void MetalDevice::optimize_for_scene(
Scene *
scene)
686 MetalPipelineType specialization_level = kernel_specialization_level;
688 if (specialization_level < PSO_SPECIALIZED_INTERSECT) {
694 compile_and_load(PSO_SPECIALIZED_INTERSECT);
696 if (specialization_level < PSO_SPECIALIZED_SHADE) {
708 auto specialize_shade_fn = ^() {
709 compile_and_load(PSO_SPECIALIZED_SHADE);
710 async_compile_and_load =
false;
713 bool async_specialize_shade =
true;
716 if (getenv(
"CYCLES_METAL_PROFILING") !=
nullptr) {
717 async_specialize_shade =
false;
720 if (async_specialize_shade) {
721 if (!async_compile_and_load) {
722 async_compile_and_load =
true;
723 dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
724 specialize_shade_fn);
728 "Async PSO_SPECIALIZED_SHADE load request already in progress - dropping request\n");
732 specialize_shade_fn();
736 void MetalDevice::const_copy_to(
const char *name,
void *host,
size_t size)
738 if (strcmp(name,
"data") == 0) {
744 auto update_launch_pointers =
745 [&](
size_t offset,
void *
data,
size_t data_size,
size_t pointers_size) {
748 MetalMem **mmem = (MetalMem **)
data;
749 int pointer_count = pointers_size /
sizeof(
device_ptr);
751 for (
int i = 0; i < pointer_count; i++) {
753 mmem[i]->pointer_index = pointer_index + i;
759 if (strcmp(name,
"integrator_state") == 0) {
761 const size_t pointer_block_size = offsetof(
IntegratorStateGPU, sort_partition_divisor);
762 update_launch_pointers(
765 # define KERNEL_DATA_ARRAY(data_type, tex_name) \
766 else if (strcmp(name, #tex_name) == 0) \
768 update_launch_pointers(offsetof(KernelParamsMetal, tex_name), host, size, size); \
771 # undef KERNEL_DATA_ARRAY
778 generic_copy_to(mem);
794 generic_copy_to(mem);
798 if (slot >= texture_info.size()) {
801 texture_info.resize(
round_up(slot + 1, 128));
807 texture_info[slot] = mem.
info;
808 need_texture_info =
true;
817 "Texture exceeds maximum allowed size of 16384 x 16384 (requested: %zu x %zu)",
823 MTLStorageMode storage_mode = MTLStorageModeManaged;
824 if (@available(macos 10.15, *)) {
825 if ([mtlDevice hasUnifiedMemory] &&
828 storage_mode = MTLStorageModeShared;
833 string bind_name = mem.
name;
847 MTLPixelFormat formats[] = {MTLPixelFormatR8Unorm,
848 MTLPixelFormatRG8Unorm,
849 MTLPixelFormatInvalid,
850 MTLPixelFormatRGBA8Unorm};
854 MTLPixelFormat formats[] = {MTLPixelFormatR16Unorm,
855 MTLPixelFormatRG16Unorm,
856 MTLPixelFormatInvalid,
857 MTLPixelFormatRGBA16Unorm};
861 MTLPixelFormat formats[] = {MTLPixelFormatR32Uint,
862 MTLPixelFormatRG32Uint,
863 MTLPixelFormatInvalid,
864 MTLPixelFormatRGBA32Uint};
868 MTLPixelFormat formats[] = {MTLPixelFormatR32Sint,
869 MTLPixelFormatRG32Sint,
870 MTLPixelFormatInvalid,
871 MTLPixelFormatRGBA32Sint};
875 MTLPixelFormat formats[] = {MTLPixelFormatR32Float,
876 MTLPixelFormatRG32Float,
877 MTLPixelFormatInvalid,
878 MTLPixelFormatRGBA32Float};
882 MTLPixelFormat formats[] = {MTLPixelFormatR16Float,
883 MTLPixelFormatRG16Float,
884 MTLPixelFormatInvalid,
885 MTLPixelFormatRGBA16Float};
893 assert(
format != MTLPixelFormatInvalid);
895 id<MTLTexture> mtlTexture = nil;
900 MTLTextureDescriptor *desc;
902 desc = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:
format
907 desc.storageMode = storage_mode;
908 desc.usage = MTLTextureUsageShaderRead;
910 desc.textureType = MTLTextureType3D;
917 mtlTexture = [mtlDevice newTextureWithDescriptor:desc];
924 const size_t imageBytes = src_pitch * mem.
data_height;
926 const size_t offset =
d * imageBytes;
931 bytesPerRow:src_pitch
935 else if (mem.data_height > 0) {
937 MTLTextureDescriptor *desc;
939 desc = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:
format
944 desc.storageMode = storage_mode;
945 desc.usage = MTLTextureUsageShaderRead;
951 mtlTexture = [mtlDevice newTextureWithDescriptor:desc];
956 withBytes:mem.host_pointer
957 bytesPerRow:src_pitch];
968 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
969 MetalMem *mmem =
new MetalMem;
970 metal_mem_map[&mem] = std::unique_ptr<MetalMem>(mmem);
972 mmem->mtlTexture = mtlTexture;
976 if (slot >= texture_info.size()) {
979 texture_info.resize(slot + 128);
980 texture_slot_map.resize(slot + 128);
982 ssize_t min_buffer_length =
sizeof(
void *) * texture_info.size();
983 if (!texture_bindings_2d || (texture_bindings_2d.length < min_buffer_length)) {
984 if (texture_bindings_2d) {
985 delayed_free_list.push_back(texture_bindings_2d);
986 delayed_free_list.push_back(texture_bindings_3d);
988 stats.
mem_free(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
990 texture_bindings_2d = [mtlDevice newBufferWithLength:min_buffer_length
992 texture_bindings_3d = [mtlDevice newBufferWithLength:min_buffer_length
995 stats.
mem_alloc(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
999 if (@available(macos 10.14, *)) {
1001 id<MTLCommandBuffer> commandBuffer = [mtlGeneralCommandQueue commandBuffer];
1002 id<MTLBlitCommandEncoder> blitCommandEncoder = [commandBuffer blitCommandEncoder];
1003 [blitCommandEncoder optimizeContentsForGPUAccess:mtlTexture];
1004 [blitCommandEncoder endEncoding];
1005 [commandBuffer commit];
1009 texture_slot_map[slot] = mtlTexture;
1010 texture_info[slot] = mem.
info;
1011 need_texture_info =
true;
1013 texture_info[slot].
data =
uint64_t(slot) | (sampler_index << 32);
1018 if (metal_mem_map.count(&mem)) {
1019 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
1020 MetalMem &mmem = *metal_mem_map.at(&mem);
1022 assert(texture_slot_map[mem.
slot] == mmem.mtlTexture);
1023 texture_slot_map[mem.
slot] = nil;
1025 if (mmem.mtlTexture) {
1027 delayed_free_list.push_back(mmem.mtlTexture);
1028 mmem.mtlTexture = nil;
1030 erase_allocation(mem);
1034 unique_ptr<DeviceQueue> MetalDevice::gpu_queue_create()
1036 return make_unique<MetalDeviceQueue>(
this);
1039 bool MetalDevice::should_use_graphics_interop()
1045 void MetalDevice::flush_delayed_free_list()
1050 std::lock_guard<std::recursive_mutex>
lock(metal_mem_map_mutex);
1051 for (
auto &it : delayed_free_list) {
1054 delayed_free_list.clear();
1064 BVHMetal *bvh_metal =
static_cast<BVHMetal *
>(bvh);
1065 bvh_metal->motion_blur = motion_blur;
1066 if (bvh_metal->build(progress, mtlDevice, mtlGeneralCommandQueue,
refit)) {
1068 if (@available(macos 11.0, *)) {
1070 bvhMetalRT = bvh_metal;
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
void refit(btStridingMeshInterface *triangles, const btVector3 &aabbMin, const btVector3 &aabbMax)
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
virtual void build_bvh(BVH *bvh, Progress &progress, bool refit)
virtual void set_error(const string &error)
void append(const uint8_t *data, int size)
void mem_free(size_t size)
void mem_alloc(size_t size)
bool is_resident(Device *sub_device) const
device_ptr device_pointer
void * host_alloc(size_t size)
#define CCL_NAMESPACE_END
static constexpr size_t datatype_size(DataType datatype)
CCL_NAMESPACE_BEGIN struct Options options
DebugFlags & DebugFlags()
static const char * to_string(const Interpolation &interp)
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
@ KERNEL_FEATURE_OBJECT_MOTION
static void error(const char *str)
std::string to_string(const T &n)
static void copy(bNodeTree *dest_ntree, bNode *dest_node, const bNode *src_node)
string path_cache_get(const string &sub)
string path_source_replace_includes(const string &source, const string &path)
string path_get(const string &sub)
bool path_write_text(const string &path, string &text)
unsigned __int64 uint64_t
string string_human_readable_size(size_t size)
string string_human_readable_number(size_t num)
CCL_NAMESPACE_BEGIN string string_printf(const char *format,...)
CCL_NAMESPACE_BEGIN double time_dt()
ccl_device_inline size_t round_up(size_t x, size_t multiple)