Blender  V3.3
workbench_shader.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2020 Blender Foundation. */
3 
8 #include "DRW_render.h"
9 
10 #include <string>
11 
12 #include "workbench_engine.h"
13 #include "workbench_private.h"
14 
15 /* Maximum number of variations. */
16 #define MAX_LIGHTING 3
17 
23 };
24 
25 static struct {
26  struct GPUShader
30 
35 
38 
39  struct GPUShader *cavity_sh[2][2];
40 
46 
48  struct GPUShader *smaa_sh[3];
49 
50  struct GPUShader *volume_sh[2][2][3][2];
51 
52 } e_data = {{{{nullptr}}}};
53 
54 /* -------------------------------------------------------------------- */
58 static const char *workbench_lighting_mode_to_str(int light)
59 {
60  switch (light) {
61  default:
62  BLI_assert_msg(0, "Error: Unknown lighting mode.");
65  return "_studio";
67  return "_matcap";
68  case V3D_LIGHTING_FLAT:
69  return "_flat";
70  return "";
71  }
72 }
73 
75 {
76  switch (datatype) {
77  default:
78  BLI_assert_msg(0, "Error: Unknown data mode.");
81  return "_mesh";
83  return "_hair";
85  return "_ptcloud";
86  }
87 }
88 
90 {
91  switch (interp_type) {
92  default:
93  BLI_assert_msg(0, "Error: Unknown lighting mode.");
96  return "_linear";
98  return "_cubic";
100  return "_closest";
101  }
102 }
103 
105 {
106  switch (tex_type) {
107  default:
108  BLI_assert_msg(0, "Error: Unknown texture mode.");
110  case TEXTURE_SH_NONE:
111  return "_tex_none";
112  case TEXTURE_SH_TILED:
113  return "_tex_tile";
114  case TEXTURE_SH_SINGLE:
115  return "_tex_single";
116  }
117 }
118 
119 static eWORKBENCH_TextureType workbench_texture_type_get(bool textured, bool tiled)
120 {
121  return textured ? (tiled ? TEXTURE_SH_TILED : TEXTURE_SH_SINGLE) : TEXTURE_SH_NONE;
122 }
123 
126 /* -------------------------------------------------------------------- */
131  bool transp,
132  eWORKBENCH_DataType datatype,
133  bool textured,
134  bool tiled)
135 {
136  eWORKBENCH_TextureType tex_type = workbench_texture_type_get(textured, tiled);
137  int light = wpd->shading.light;
138  BLI_assert(light < MAX_LIGHTING);
139  struct GPUShader **shader =
140  (transp) ? &e_data.transp_prepass_sh_cache[wpd->sh_cfg][datatype][light][tex_type] :
141  &e_data.opaque_prepass_sh_cache[wpd->sh_cfg][datatype][tex_type];
142 
143  if (*shader == nullptr) {
144  std::string create_info_name = "workbench";
145  create_info_name += (transp) ? "_transp" : "_opaque";
146  if (transp) {
147  create_info_name += workbench_lighting_mode_to_str(light);
148  }
149  create_info_name += workbench_datatype_mode_to_str(datatype);
150  create_info_name += workbench_texture_type_to_str(tex_type);
151  create_info_name += (wpd->sh_cfg == GPU_SHADER_CFG_CLIPPED) ? "_clip" : "_no_clip";
152 
153  *shader = GPU_shader_create_from_info_name(create_info_name.c_str());
154  }
155  return *shader;
156 }
157 
159 {
160  return workbench_shader_get_ex(wpd, false, datatype, false, false);
161 }
162 
164  eWORKBENCH_DataType datatype,
165  bool tiled)
166 {
167  return workbench_shader_get_ex(wpd, false, datatype, true, tiled);
168 }
169 
171  eWORKBENCH_DataType datatype)
172 {
173  return workbench_shader_get_ex(wpd, true, datatype, false, false);
174 }
175 
177  eWORKBENCH_DataType datatype,
178  bool tiled)
179 {
180  return workbench_shader_get_ex(wpd, true, datatype, true, tiled);
181 }
182 
184 {
185  int light = wpd->shading.light;
186  struct GPUShader **shader = &e_data.opaque_composite_sh[light];
187  BLI_assert(light < MAX_LIGHTING);
188 
189  if (*shader == nullptr) {
190  std::string create_info_name = "workbench_composite";
191  create_info_name += workbench_lighting_mode_to_str(light);
192  *shader = GPU_shader_create_from_info_name(create_info_name.c_str());
193  }
194  return *shader;
195 }
196 
198 {
199  if (e_data.merge_infront_sh == nullptr) {
200  e_data.merge_infront_sh = GPU_shader_create_from_info_name("workbench_merge_infront");
201  }
202  return e_data.merge_infront_sh;
203 }
204 
206 {
207  if (e_data.oit_resolve_sh == nullptr) {
208  e_data.oit_resolve_sh = GPU_shader_create_from_info_name("workbench_transparent_resolve");
209  }
210  return e_data.oit_resolve_sh;
211 }
212 
213 static GPUShader *workbench_shader_shadow_pass_get_ex(bool depth_pass, bool manifold, bool cap)
214 {
215  struct GPUShader **shader = (depth_pass) ? &e_data.shadow_depth_pass_sh[manifold] :
216  &e_data.shadow_depth_fail_sh[manifold][cap];
217 
218  if (*shader == nullptr) {
219  std::string create_info_name = "workbench_shadow";
220  create_info_name += (depth_pass) ? "_pass" : "_fail";
221  create_info_name += (manifold) ? "_manifold" : "_no_manifold";
222  create_info_name += (cap) ? "_caps" : "_no_caps";
223 #if DEBUG_SHADOW_VOLUME
224  create_info_name += "_debug";
225 #endif
226  *shader = GPU_shader_create_from_info_name(create_info_name.c_str());
227  }
228  return *shader;
229 }
230 
232 {
233  return workbench_shader_shadow_pass_get_ex(true, manifold, false);
234 }
235 
237 {
238  return workbench_shader_shadow_pass_get_ex(false, manifold, cap);
239 }
240 
241 GPUShader *workbench_shader_cavity_get(bool cavity, bool curvature)
242 {
243  BLI_assert(cavity || curvature);
244  struct GPUShader **shader = &e_data.cavity_sh[cavity][curvature];
245 
246  if (*shader == nullptr) {
247  std::string create_info_name = "workbench_effect";
248  create_info_name += (cavity) ? "_cavity" : "";
249  create_info_name += (curvature) ? "_curvature" : "";
250  *shader = GPU_shader_create_from_info_name(create_info_name.c_str());
251  }
252  return *shader;
253 }
254 
256 {
257  if (e_data.outline_sh == nullptr) {
258  e_data.outline_sh = GPU_shader_create_from_info_name("workbench_effect_outline");
259  }
260  return e_data.outline_sh;
261 }
262 
265  GPUShader **blur1_sh,
266  GPUShader **blur2_sh,
267  GPUShader **resolve_sh)
268 {
269  if (e_data.dof_prepare_sh == nullptr) {
270  e_data.dof_prepare_sh = GPU_shader_create_from_info_name("workbench_effect_dof_prepare");
271  e_data.dof_downsample_sh = GPU_shader_create_from_info_name("workbench_effect_dof_downsample");
272 #if 0 /* TODO(fclem): finish COC min_max optimization */
273  e_data.dof_flatten_v_sh = GPU_shader_create_from_info_name("workbench_effect_dof_flatten_v");
274  e_data.dof_flatten_h_sh = GPU_shader_create_from_info_name("workbench_effect_dof_flatten_h");
275  e_data.dof_dilate_v_sh = GPU_shader_create_from_info_name("workbench_effect_dof_dilate_v");
276  e_data.dof_dilate_h_sh = GPU_shader_create_from_info_name("workbench_effect_dof_dilate_h");
277 #endif
278  e_data.dof_blur1_sh = GPU_shader_create_from_info_name("workbench_effect_dof_blur1");
279  e_data.dof_blur2_sh = GPU_shader_create_from_info_name("workbench_effect_dof_blur2");
280  e_data.dof_resolve_sh = GPU_shader_create_from_info_name("workbench_effect_dof_resolve");
281  }
282 
283  *prepare_sh = e_data.dof_prepare_sh;
284  *downsample_sh = e_data.dof_downsample_sh;
285  *blur1_sh = e_data.dof_blur1_sh;
286  *blur2_sh = e_data.dof_blur2_sh;
287  *resolve_sh = e_data.dof_resolve_sh;
288 }
289 
291 {
292  if (e_data.aa_accum_sh == nullptr) {
293  e_data.aa_accum_sh = GPU_shader_create_from_info_name("workbench_taa");
294  }
295  return e_data.aa_accum_sh;
296 }
297 
299 {
300  BLI_assert(stage < 3);
301  GPUShader **shader = &e_data.smaa_sh[stage];
302 
303  if (*shader == nullptr) {
304  std::string create_info_name = "workbench_smaa_stage_";
305  create_info_name += std::to_string(stage);
306  *shader = GPU_shader_create_from_info_name(create_info_name.c_str());
307  }
308  return e_data.smaa_sh[stage];
309 }
310 
312  bool coba,
313  eWORKBENCH_VolumeInterpType interp_type,
314  bool smoke)
315 {
316  GPUShader **shader = &e_data.volume_sh[slice][coba][interp_type][smoke];
317 
318  if (*shader == nullptr) {
319  std::string create_info_name = "workbench_volume";
320  create_info_name += (smoke) ? "_smoke" : "_object";
321  create_info_name += workbench_volume_interp_to_str(interp_type);
322  create_info_name += (coba) ? "_coba" : "_no_coba";
323  create_info_name += (slice) ? "_slice" : "_no_slice";
324  *shader = GPU_shader_create_from_info_name(create_info_name.c_str());
325  }
326  return *shader;
327 }
328 
331 /* -------------------------------------------------------------------- */
336 {
337  for (int j = 0; j < sizeof(e_data.opaque_prepass_sh_cache) / sizeof(void *); j++) {
338  struct GPUShader **sh_array = &e_data.opaque_prepass_sh_cache[0][0][0];
339  DRW_SHADER_FREE_SAFE(sh_array[j]);
340  }
341  for (int j = 0; j < sizeof(e_data.transp_prepass_sh_cache) / sizeof(void *); j++) {
342  struct GPUShader **sh_array = &e_data.transp_prepass_sh_cache[0][0][0][0];
343  DRW_SHADER_FREE_SAFE(sh_array[j]);
344  }
345  for (int j = 0; j < ARRAY_SIZE(e_data.opaque_composite_sh); j++) {
346  struct GPUShader **sh_array = &e_data.opaque_composite_sh[0];
347  DRW_SHADER_FREE_SAFE(sh_array[j]);
348  }
349  for (int j = 0; j < ARRAY_SIZE(e_data.shadow_depth_pass_sh); j++) {
350  struct GPUShader **sh_array = &e_data.shadow_depth_pass_sh[0];
351  DRW_SHADER_FREE_SAFE(sh_array[j]);
352  }
353  for (int j = 0; j < sizeof(e_data.shadow_depth_fail_sh) / sizeof(void *); j++) {
354  struct GPUShader **sh_array = &e_data.shadow_depth_fail_sh[0][0];
355  DRW_SHADER_FREE_SAFE(sh_array[j]);
356  }
357  for (int j = 0; j < sizeof(e_data.cavity_sh) / sizeof(void *); j++) {
358  struct GPUShader **sh_array = &e_data.cavity_sh[0][0];
359  DRW_SHADER_FREE_SAFE(sh_array[j]);
360  }
361  for (int j = 0; j < ARRAY_SIZE(e_data.smaa_sh); j++) {
362  struct GPUShader **sh_array = &e_data.smaa_sh[0];
363  DRW_SHADER_FREE_SAFE(sh_array[j]);
364  }
365  for (int j = 0; j < sizeof(e_data.volume_sh) / sizeof(void *); j++) {
366  struct GPUShader **sh_array = &e_data.volume_sh[0][0][0][0];
367  DRW_SHADER_FREE_SAFE(sh_array[j]);
368  }
369 
370  DRW_SHADER_FREE_SAFE(e_data.oit_resolve_sh);
371  DRW_SHADER_FREE_SAFE(e_data.outline_sh);
372  DRW_SHADER_FREE_SAFE(e_data.merge_infront_sh);
373 
374  DRW_SHADER_FREE_SAFE(e_data.dof_prepare_sh);
375  DRW_SHADER_FREE_SAFE(e_data.dof_downsample_sh);
376  DRW_SHADER_FREE_SAFE(e_data.dof_blur1_sh);
377  DRW_SHADER_FREE_SAFE(e_data.dof_blur2_sh);
378  DRW_SHADER_FREE_SAFE(e_data.dof_resolve_sh);
379 
380  DRW_SHADER_FREE_SAFE(e_data.aa_accum_sh);
381 }
382 
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition: BLI_assert.h:53
#define ATTR_FALLTHROUGH
#define ARRAY_SIZE(arr)
#define UNUSED(x)
@ V3D_LIGHTING_FLAT
@ V3D_LIGHTING_STUDIO
@ V3D_LIGHTING_MATCAP
#define DRW_SHADER_FREE_SAFE(shader)
Definition: DRW_render.h:254
struct GPUShader GPUShader
Definition: GPU_shader.h:20
@ GPU_SHADER_CFG_CLIPPED
Definition: GPU_shader.h:366
GPUShader * GPU_shader_create_from_info_name(const char *info_name)
Definition: gpu_shader.cc:265
#define GPU_SHADER_CFG_LEN
Definition: GPU_shader.h:368
EvaluationStage stage
Definition: deg_eval.cc:89
struct GPUShader * downsample_sh
Definition: eevee_shaders.c:93
std::string to_string(const T &n)
eGPUShaderConfig sh_cfg
eWORKBENCH_VolumeInterpType
@ WORKBENCH_VOLUME_INTERP_CUBIC
@ WORKBENCH_VOLUME_INTERP_LINEAR
@ WORKBENCH_VOLUME_INTERP_CLOSEST
eWORKBENCH_DataType
@ WORKBENCH_DATATYPE_MESH
@ WORKBENCH_DATATYPE_MAX
@ WORKBENCH_DATATYPE_HAIR
@ WORKBENCH_DATATYPE_POINTCLOUD
struct GPUShader * dof_blur2_sh
static const char * workbench_volume_interp_to_str(eWORKBENCH_VolumeInterpType interp_type)
static struct @270 e_data
struct GPUShader * outline_sh
struct GPUShader * dof_downsample_sh
GPUShader * workbench_shader_shadow_fail_get(bool manifold, bool cap)
struct GPUShader * opaque_prepass_sh_cache[GPU_SHADER_CFG_LEN][WORKBENCH_DATATYPE_MAX][TEXTURE_SH_MAX]
GPUShader * workbench_shader_merge_infront_get(WORKBENCH_PrivateData *UNUSED(wpd))
GPUShader * workbench_shader_shadow_pass_get(bool manifold)
struct GPUShader * shadow_depth_fail_sh[2][2]
static GPUShader * workbench_shader_shadow_pass_get_ex(bool depth_pass, bool manifold, bool cap)
GPUShader * workbench_shader_composite_get(WORKBENCH_PrivateData *wpd)
struct GPUShader * shadow_depth_pass_sh[2]
struct GPUShader * aa_accum_sh
GPUShader * workbench_shader_volume_get(bool slice, bool coba, eWORKBENCH_VolumeInterpType interp_type, bool smoke)
void workbench_shader_free(void)
eWORKBENCH_TextureType
@ TEXTURE_SH_TILED
@ TEXTURE_SH_SINGLE
@ TEXTURE_SH_NONE
@ TEXTURE_SH_MAX
static const char * workbench_lighting_mode_to_str(int light)
GPUShader * workbench_shader_antialiasing_get(int stage)
struct GPUShader * merge_infront_sh
struct GPUShader * cavity_sh[2][2]
struct GPUShader * dof_resolve_sh
#define MAX_LIGHTING
GPUShader * workbench_shader_antialiasing_accumulation_get(void)
GPUShader * workbench_shader_opaque_image_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype, bool tiled)
struct GPUShader * oit_resolve_sh
struct GPUShader * volume_sh[2][2][3][2]
struct GPUShader * dof_prepare_sh
struct GPUShader * smaa_sh[3]
static eWORKBENCH_TextureType workbench_texture_type_get(bool textured, bool tiled)
GPUShader * workbench_shader_cavity_get(bool cavity, bool curvature)
struct GPUShader * dof_blur1_sh
struct GPUShader * transp_prepass_sh_cache[GPU_SHADER_CFG_LEN][WORKBENCH_DATATYPE_MAX][MAX_LIGHTING][TEXTURE_SH_MAX]
GPUShader * workbench_shader_opaque_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype)
GPUShader * workbench_shader_transparent_resolve_get(WORKBENCH_PrivateData *UNUSED(wpd))
GPUShader * workbench_shader_transparent_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype)
struct GPUShader * opaque_composite_sh[MAX_LIGHTING]
GPUShader * workbench_shader_transparent_image_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype, bool tiled)
GPUShader * workbench_shader_outline_get(void)
void workbench_shader_depth_of_field_get(GPUShader **prepare_sh, GPUShader **downsample_sh, GPUShader **blur1_sh, GPUShader **blur2_sh, GPUShader **resolve_sh)
static const char * workbench_datatype_mode_to_str(eWORKBENCH_DataType datatype)
static GPUShader * workbench_shader_get_ex(WORKBENCH_PrivateData *wpd, bool transp, eWORKBENCH_DataType datatype, bool textured, bool tiled)
static const char * workbench_texture_type_to_str(eWORKBENCH_TextureType tex_type)