Blender  V3.3
path_state.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: Apache-2.0
2  * Copyright 2011-2022 Blender Foundation */
3 
4 #pragma once
5 
7 
9 
10 /* Initialize queues, so that the this path is considered terminated.
11  * Used for early outputs in the camera ray initialization, as well as initialization of split
12  * states for shadow catcher. */
14 {
15  INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = 0;
16 #ifndef __KERNEL_GPU__
17  INTEGRATOR_STATE_WRITE(&state->shadow, shadow_path, queued_kernel) = 0;
18  INTEGRATOR_STATE_WRITE(&state->ao, shadow_path, queued_kernel) = 0;
19 #endif
20 }
21 
22 /* Minimalistic initialization of the path state, which is needed for early outputs in the
23  * integrator initialization to work. */
26  const int x,
27  const int y)
28 {
29  const uint render_pixel_index = (uint)tile->offset + x + y * tile->stride;
30 
32 
34 }
35 
36 /* Initialize the rest of the path state needed to continue the path integration. */
39  const int sample,
40  const uint rng_hash)
41 {
43  INTEGRATOR_STATE_WRITE(state, path, bounce) = 0;
44  INTEGRATOR_STATE_WRITE(state, path, diffuse_bounce) = 0;
45  INTEGRATOR_STATE_WRITE(state, path, glossy_bounce) = 0;
46  INTEGRATOR_STATE_WRITE(state, path, transmission_bounce) = 0;
47  INTEGRATOR_STATE_WRITE(state, path, transparent_bounce) = 0;
48  INTEGRATOR_STATE_WRITE(state, path, volume_bounce) = 0;
49  INTEGRATOR_STATE_WRITE(state, path, volume_bounds_bounce) = 0;
50  INTEGRATOR_STATE_WRITE(state, path, rng_hash) = rng_hash;
51  INTEGRATOR_STATE_WRITE(state, path, rng_offset) = PRNG_BASE_NUM;
54  INTEGRATOR_STATE_WRITE(state, path, mis_ray_pdf) = 0.0f;
55  INTEGRATOR_STATE_WRITE(state, path, min_ray_pdf) = FLT_MAX;
56  INTEGRATOR_STATE_WRITE(state, path, continuation_probability) = 1.0f;
57  INTEGRATOR_STATE_WRITE(state, path, throughput) = make_float3(1.0f, 1.0f, 1.0f);
58 
59 #ifdef __MNEE__
60  INTEGRATOR_STATE_WRITE(state, path, mnee) = 0;
61 #endif
62 
63  INTEGRATOR_STATE_WRITE(state, isect, object) = OBJECT_NONE;
64  INTEGRATOR_STATE_WRITE(state, isect, prim) = PRIM_NONE;
65 
66  if (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) {
67  INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, 0, object) = OBJECT_NONE;
69  state, volume_stack, 0, shader) = kernel_data.background.volume_shader;
70  INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, 1, object) = OBJECT_NONE;
71  INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, 1, shader) = SHADER_NONE;
72  }
73 
74 #ifdef __DENOISING_FEATURES__
75  if (kernel_data.kernel_features & KERNEL_FEATURE_DENOISING) {
77  INTEGRATOR_STATE_WRITE(state, path, denoising_feature_throughput) = one_float3();
78  }
79 #endif
80 }
81 
83 {
84  uint32_t flag = INTEGRATOR_STATE(state, path, flag);
85 
86  /* ray through transparent keeps same flags from previous ray and is
87  * not counted as a regular bounce, transparent has separate max */
88  if (label & LABEL_TRANSPARENT) {
89  uint32_t transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce) + 1;
90 
91  flag |= PATH_RAY_TRANSPARENT;
92  if (transparent_bounce >= kernel_data.integrator.transparent_max_bounce) {
94  }
95 
96  if (!kernel_data.integrator.transparent_shadows)
97  flag |= PATH_RAY_MIS_SKIP;
98 
99  INTEGRATOR_STATE_WRITE(state, path, flag) = flag;
100  INTEGRATOR_STATE_WRITE(state, path, transparent_bounce) = transparent_bounce;
101  /* Random number generator next bounce. */
102  INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;
103  return;
104  }
105 
106  uint32_t bounce = INTEGRATOR_STATE(state, path, bounce) + 1;
107  if (bounce >= kernel_data.integrator.max_bounce) {
109  }
110 
112 
113 #ifdef __VOLUME__
114  if (label & LABEL_VOLUME_SCATTER) {
115  /* volume scatter */
116  flag |= PATH_RAY_VOLUME_SCATTER;
118  if (!(flag & PATH_RAY_ANY_PASS)) {
119  flag |= PATH_RAY_VOLUME_PASS;
120  }
121 
122  const int volume_bounce = INTEGRATOR_STATE(state, path, volume_bounce) + 1;
123  INTEGRATOR_STATE_WRITE(state, path, volume_bounce) = volume_bounce;
124  if (volume_bounce >= kernel_data.integrator.max_volume_bounce) {
126  }
127  }
128  else
129 #endif
130  {
131  /* surface reflection/transmission */
132  if (label & LABEL_REFLECT) {
133  flag |= PATH_RAY_REFLECT;
135 
136  if (label & LABEL_DIFFUSE) {
137  const int diffuse_bounce = INTEGRATOR_STATE(state, path, diffuse_bounce) + 1;
138  INTEGRATOR_STATE_WRITE(state, path, diffuse_bounce) = diffuse_bounce;
139  if (diffuse_bounce >= kernel_data.integrator.max_diffuse_bounce) {
141  }
142  }
143  else {
144  const int glossy_bounce = INTEGRATOR_STATE(state, path, glossy_bounce) + 1;
145  INTEGRATOR_STATE_WRITE(state, path, glossy_bounce) = glossy_bounce;
146  if (glossy_bounce >= kernel_data.integrator.max_glossy_bounce) {
148  }
149  }
150  }
151  else {
153 
154  flag |= PATH_RAY_TRANSMIT;
155 
158  }
159 
160  const int transmission_bounce = INTEGRATOR_STATE(state, path, transmission_bounce) + 1;
161  INTEGRATOR_STATE_WRITE(state, path, transmission_bounce) = transmission_bounce;
162  if (transmission_bounce >= kernel_data.integrator.max_transmission_bounce) {
164  }
165  }
166 
167  /* diffuse/glossy/singular */
168  if (label & LABEL_DIFFUSE) {
170  }
171  else if (label & LABEL_GLOSSY) {
172  flag |= PATH_RAY_GLOSSY;
173  }
174  else {
177  }
178 
179  /* Render pass categories. */
180  if (!(flag & PATH_RAY_ANY_PASS) && !(flag & PATH_RAY_TRANSPARENT_BACKGROUND)) {
181  flag |= PATH_RAY_SURFACE_PASS;
182  }
183  }
184 
185  INTEGRATOR_STATE_WRITE(state, path, flag) = flag;
186  INTEGRATOR_STATE_WRITE(state, path, bounce) = bounce;
187 
188  /* Random number generator next bounce. */
189  INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;
190 }
191 
192 #ifdef __VOLUME__
193 ccl_device_inline bool path_state_volume_next(IntegratorState state)
194 {
195  /* For volume bounding meshes we pass through without counting transparent
196  * bounces, only sanity check in case self intersection gets us stuck. */
197  uint32_t volume_bounds_bounce = INTEGRATOR_STATE(state, path, volume_bounds_bounce) + 1;
198  INTEGRATOR_STATE_WRITE(state, path, volume_bounds_bounce) = volume_bounds_bounce;
199  if (volume_bounds_bounce > VOLUME_BOUNDS_MAX) {
200  return false;
201  }
202 
203  /* Random number generator next bounce. */
204  INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;
205 
206  return true;
207 }
208 #endif
209 
211 {
212  const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
213 
214  uint32_t visibility = path_flag & PATH_RAY_ALL_VISIBILITY;
215 
216  /* For visibility, diffuse/glossy are for reflection only. */
217  if (visibility & PATH_RAY_TRANSMIT) {
218  visibility &= ~(PATH_RAY_DIFFUSE | PATH_RAY_GLOSSY);
219  }
220 
221  /* todo: this is not supported as its own ray visibility yet. */
222  if (path_flag & PATH_RAY_VOLUME_SCATTER) {
223  visibility |= PATH_RAY_DIFFUSE;
224  }
225 
226  visibility = SHADOW_CATCHER_PATH_VISIBILITY(path_flag, visibility);
227 
228  return visibility;
229 }
230 
233  const uint32_t path_flag)
234 {
235  if (path_flag & PATH_RAY_TRANSPARENT) {
236  const uint32_t transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce);
237  /* Do at least specified number of bounces without RR. */
238  if (transparent_bounce <= kernel_data.integrator.transparent_min_bounce) {
239  return 1.0f;
240  }
241  }
242  else {
243  const uint32_t bounce = INTEGRATOR_STATE(state, path, bounce);
244  /* Do at least specified number of bounces without RR. */
245  if (bounce <= kernel_data.integrator.min_bounce) {
246  return 1.0f;
247  }
248  }
249 
250  /* Probabilistic termination: use sqrt() to roughly match typical view
251  * transform and do path termination a bit later on average. */
252  return min(sqrtf(reduce_max(fabs(INTEGRATOR_STATE(state, path, throughput)))), 1.0f);
253 }
254 
256 {
257  if (!kernel_data.integrator.ao_bounces) {
258  return false;
259  }
260 
261  const int bounce = INTEGRATOR_STATE(state, path, bounce) -
262  INTEGRATOR_STATE(state, path, transmission_bounce) -
263  (INTEGRATOR_STATE(state, path, glossy_bounce) > 0) + 1;
264  return (bounce > kernel_data.integrator.ao_bounces);
265 }
266 
267 /* Random Number Sampling Utility Functions
268  *
269  * For each random number in each step of the path we must have a unique
270  * dimension to avoid using the same sequence twice.
271  *
272  * For branches in the path we must be careful not to reuse the same number
273  * in a sequence and offset accordingly.
274  */
275 
276 /* RNG State loaded onto stack. */
277 typedef struct RNGState {
280  int sample;
282 
284  ccl_private RNGState *rng_state)
285 {
286  rng_state->rng_hash = INTEGRATOR_STATE(state, path, rng_hash);
287  rng_state->rng_offset = INTEGRATOR_STATE(state, path, rng_offset);
288  rng_state->sample = INTEGRATOR_STATE(state, path, sample);
289 }
290 
292  ccl_private RNGState *rng_state)
293 {
294  rng_state->rng_hash = INTEGRATOR_STATE(state, shadow_path, rng_hash);
295  rng_state->rng_offset = INTEGRATOR_STATE(state, shadow_path, rng_offset);
296  rng_state->sample = INTEGRATOR_STATE(state, shadow_path, sample);
297 }
298 
300  ccl_private const RNGState *rng_state,
301  int dimension)
302 {
303  return path_rng_1D(
304  kg, rng_state->rng_hash, rng_state->sample, rng_state->rng_offset + dimension);
305 }
306 
308  ccl_private const RNGState *rng_state,
309  int dimension,
310  ccl_private float *fx,
311  ccl_private float *fy)
312 {
313  path_rng_2D(
314  kg, rng_state->rng_hash, rng_state->sample, rng_state->rng_offset + dimension, fx, fy);
315 }
316 
318  ccl_private const RNGState *rng_state,
319  uint hash)
320 {
321  /* Use a hash instead of dimension, this is not great but avoids adding
322  * more dimensions to each bounce which reduces quality of dimensions we
323  * are already using. */
324  return path_rng_1D(
325  kg, cmj_hash_simple(rng_state->rng_hash, hash), rng_state->sample, rng_state->rng_offset);
326 }
327 
329  ccl_private const RNGState *rng_state,
330  int branch,
331  int num_branches,
332  int dimension)
333 {
334  return path_rng_1D(kg,
335  rng_state->rng_hash,
336  rng_state->sample * num_branches + branch,
337  rng_state->rng_offset + dimension);
338 }
339 
341  ccl_private const RNGState *rng_state,
342  int branch,
343  int num_branches,
344  int dimension,
345  ccl_private float *fx,
346  ccl_private float *fy)
347 {
348  path_rng_2D(kg,
349  rng_state->rng_hash,
350  rng_state->sample * num_branches + branch,
351  rng_state->rng_offset + dimension,
352  fx,
353  fy);
354 }
355 
356 /* Utility functions to get light termination value,
357  * since it might not be needed in many cases.
358  */
360  ccl_private const RNGState *state)
361 {
362  if (kernel_data.integrator.light_inv_rr_threshold > 0.0f) {
364  }
365  return 0.0f;
366 }
367 
unsigned int uint
Definition: BLI_sys_types.h:67
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
__forceinline int reduce_max(const avxi &v)
Definition: avxi.h:692
#define kernel_assert(cond)
Definition: cpu/compat.h:34
#define ccl_restrict
Definition: cuda/compat.h:50
#define ccl_private
Definition: cuda/compat.h:48
#define ccl_device_inline
Definition: cuda/compat.h:34
#define ccl_global
Definition: cuda/compat.h:43
#define CCL_NAMESPACE_END
Definition: cuda/compat.h:9
const char * label
#define kernel_data
const KernelGlobalsCPU *ccl_restrict KernelGlobals
const uint64_t render_pixel_index
ccl_global const KernelWorkTile * tile
const int state
ccl_device_inline uint cmj_hash_simple(uint i, uint p)
#define VOLUME_BOUNDS_MAX
Definition: kernel/types.h:35
#define SHADER_NONE
Definition: kernel/types.h:39
@ PRNG_BOUNCE_NUM
Definition: kernel/types.h:172
@ PRNG_LIGHT_TERMINATE
Definition: kernel/types.h:168
@ PRNG_BASE_NUM
Definition: kernel/types.h:162
#define PRIM_NONE
Definition: kernel/types.h:41
@ PATH_RAY_VOLUME_PASS
Definition: kernel/types.h:270
@ PATH_RAY_TERMINATE_AFTER_TRANSPARENT
Definition: kernel/types.h:245
@ PATH_RAY_SINGULAR
Definition: kernel/types.h:199
@ PATH_RAY_REFLECT
Definition: kernel/types.h:195
@ PATH_RAY_TRANSPARENT
Definition: kernel/types.h:200
@ PATH_RAY_TRANSMIT
Definition: kernel/types.h:196
@ PATH_RAY_VOLUME_SCATTER
Definition: kernel/types.h:201
@ PATH_RAY_MIS_SKIP
Definition: kernel/types.h:225
@ PATH_RAY_DENOISING_FEATURES
Definition: kernel/types.h:266
@ PATH_RAY_GLOSSY
Definition: kernel/types.h:198
@ PATH_RAY_SURFACE_PASS
Definition: kernel/types.h:269
@ PATH_RAY_ALL_VISIBILITY
Definition: kernel/types.h:217
@ PATH_RAY_DIFFUSE
Definition: kernel/types.h:197
@ PATH_RAY_TRANSPARENT_BACKGROUND
Definition: kernel/types.h:235
@ PATH_RAY_CAMERA
Definition: kernel/types.h:194
@ PATH_RAY_ANY_PASS
Definition: kernel/types.h:271
@ PATH_RAY_TERMINATE_ON_NEXT_SURFACE
Definition: kernel/types.h:238
@ PATH_RAY_DIFFUSE_ANCESTOR
Definition: kernel/types.h:229
@ KERNEL_FEATURE_VOLUME
@ KERNEL_FEATURE_DENOISING
#define OBJECT_NONE
Definition: kernel/types.h:40
@ LABEL_TRANSMIT
Definition: kernel/types.h:317
@ LABEL_TRANSMIT_TRANSPARENT
Definition: kernel/types.h:324
@ LABEL_VOLUME_SCATTER
Definition: kernel/types.h:323
@ LABEL_DIFFUSE
Definition: kernel/types.h:319
@ LABEL_SINGULAR
Definition: kernel/types.h:321
@ LABEL_GLOSSY
Definition: kernel/types.h:320
@ LABEL_REFLECT
Definition: kernel/types.h:318
@ LABEL_TRANSPARENT
Definition: kernel/types.h:322
#define SHADOW_CATCHER_PATH_VISIBILITY(path_flag, visibility)
Definition: kernel/types.h:306
ccl_device_inline float2 fabs(const float2 &a)
Definition: math_float2.h:222
ccl_device_inline float3 one_float3()
Definition: math_float3.h:89
#define sqrtf(x)
Definition: metal/compat.h:243
#define make_float3(x, y, z)
Definition: metal/compat.h:204
#define hash
Definition: noise.c:153
ccl_device_inline void path_state_init(IntegratorState state, ccl_global const KernelWorkTile *ccl_restrict tile, const int x, const int y)
Definition: path_state.h:24
ccl_device_inline float path_state_continuation_probability(KernelGlobals kg, ConstIntegratorState state, const uint32_t path_flag)
Definition: path_state.h:231
ccl_device_inline bool path_state_ao_bounce(KernelGlobals kg, ConstIntegratorState state)
Definition: path_state.h:255
ccl_device_inline void shadow_path_state_rng_load(ConstIntegratorShadowState state, ccl_private RNGState *rng_state)
Definition: path_state.h:291
ccl_device_inline void path_state_init_integrator(KernelGlobals kg, IntegratorState state, const int sample, const uint rng_hash)
Definition: path_state.h:37
ccl_device_inline float path_state_rng_1D(KernelGlobals kg, ccl_private const RNGState *rng_state, int dimension)
Definition: path_state.h:299
ccl_device_inline void path_branched_rng_2D(KernelGlobals kg, ccl_private const RNGState *rng_state, int branch, int num_branches, int dimension, ccl_private float *fx, ccl_private float *fy)
Definition: path_state.h:340
ccl_device_inline float path_branched_rng_1D(KernelGlobals kg, ccl_private const RNGState *rng_state, int branch, int num_branches, int dimension)
Definition: path_state.h:328
ccl_device_inline float path_state_rng_light_termination(KernelGlobals kg, ccl_private const RNGState *state)
Definition: path_state.h:359
struct RNGState RNGState
ccl_device_inline float path_state_rng_1D_hash(KernelGlobals kg, ccl_private const RNGState *rng_state, uint hash)
Definition: path_state.h:317
ccl_device_inline void path_state_rng_load(ConstIntegratorState state, ccl_private RNGState *rng_state)
Definition: path_state.h:283
ccl_device_inline uint path_state_ray_visibility(ConstIntegratorState state)
Definition: path_state.h:210
ccl_device_inline void path_state_next(KernelGlobals kg, IntegratorState state, int label)
Definition: path_state.h:82
CCL_NAMESPACE_BEGIN ccl_device_inline void path_state_init_queues(IntegratorState state)
Definition: path_state.h:13
ccl_device_inline void path_state_rng_2D(KernelGlobals kg, ccl_private const RNGState *rng_state, int dimension, ccl_private float *fx, ccl_private float *fy)
Definition: path_state.h:307
CCL_NAMESPACE_BEGIN ccl_device_forceinline float path_rng_1D(KernelGlobals kg, uint rng_hash, int sample, int dimension)
Definition: pattern.h:42
ccl_device_forceinline void path_rng_2D(KernelGlobals kg, uint rng_hash, int sample, int dimension, ccl_private float *fx, ccl_private float *fy)
Definition: pattern.h:76
#define min(a, b)
Definition: sort.c:35
const IntegratorShadowStateCPU *ccl_restrict ConstIntegratorShadowState
Definition: state.h:150
#define INTEGRATOR_STATE_ARRAY_WRITE(state, nested_struct, array_index, member)
Definition: state.h:159
IntegratorStateCPU *ccl_restrict IntegratorState
Definition: state.h:147
#define INTEGRATOR_STATE_WRITE(state, nested_struct, member)
Definition: state.h:155
const IntegratorStateCPU *ccl_restrict ConstIntegratorState
Definition: state.h:148
#define INTEGRATOR_STATE(state, nested_struct, member)
Definition: state.h:154
unsigned int uint32_t
Definition: stdint.h:80
uint rng_hash
Definition: path_state.h:278
int sample
Definition: path_state.h:280
uint rng_offset
Definition: path_state.h:279