Blender  V3.3
draw_cache_impl_mesh.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2017 Blender Foundation. All rights reserved. */
3 
10 #include <optional>
11 
12 #include "MEM_guardedalloc.h"
13 
14 #include "BLI_bitmap.h"
15 #include "BLI_buffer.h"
16 #include "BLI_edgehash.h"
17 #include "BLI_index_range.hh"
18 #include "BLI_listbase.h"
19 #include "BLI_map.hh"
20 #include "BLI_math_bits.h"
21 #include "BLI_math_vector.h"
22 #include "BLI_span.hh"
23 #include "BLI_string.h"
24 #include "BLI_string_ref.hh"
25 #include "BLI_task.h"
26 #include "BLI_utildefines.h"
27 
28 #include "DNA_mesh_types.h"
29 #include "DNA_meshdata_types.h"
30 #include "DNA_object_types.h"
31 #include "DNA_scene_types.h"
32 
33 #include "BKE_attribute.h"
34 #include "BKE_customdata.h"
35 #include "BKE_deform.h"
36 #include "BKE_editmesh.h"
37 #include "BKE_editmesh_cache.h"
38 #include "BKE_editmesh_tangent.h"
39 #include "BKE_mesh.h"
40 #include "BKE_mesh_runtime.h"
41 #include "BKE_mesh_tangent.h"
42 #include "BKE_modifier.h"
43 #include "BKE_object_deform.h"
44 #include "BKE_paint.h"
45 #include "BKE_pbvh.h"
46 #include "BKE_subdiv_modifier.h"
47 
48 #include "atomic_ops.h"
49 
50 #include "bmesh.h"
51 
52 #include "GPU_batch.h"
53 #include "GPU_material.h"
54 
55 #include "DRW_render.h"
56 
57 #include "ED_mesh.h"
58 #include "ED_uvedit.h"
59 
60 #include "draw_cache_extract.hh"
61 #include "draw_cache_inline.h"
62 #include "draw_subdivision.h"
63 
64 #include "draw_cache_impl.h" /* own include */
65 
67 
69 using blender::Map;
70 using blender::Span;
72 
73 /* ---------------------------------------------------------------------- */
77 /* clang-format off */
78 
79 #define BUFFER_INDEX(buff_name) ((offsetof(MeshBufferList, buff_name) - offsetof(MeshBufferList, vbo)) / sizeof(void *))
80 #define BUFFER_LEN (sizeof(MeshBufferList) / sizeof(void *))
81 
82 #define _BATCH_MAP1(a) batches_that_use_buffer(BUFFER_INDEX(a))
83 #define _BATCH_MAP2(a, b) _BATCH_MAP1(a) | _BATCH_MAP1(b)
84 #define _BATCH_MAP3(a, b, c) _BATCH_MAP2(a, b) | _BATCH_MAP1(c)
85 #define _BATCH_MAP4(a, b, c, d) _BATCH_MAP3(a, b, c) | _BATCH_MAP1(d)
86 #define _BATCH_MAP5(a, b, c, d, e) _BATCH_MAP4(a, b, c, d) | _BATCH_MAP1(e)
87 #define _BATCH_MAP6(a, b, c, d, e, f) _BATCH_MAP5(a, b, c, d, e) | _BATCH_MAP1(f)
88 #define _BATCH_MAP7(a, b, c, d, e, f, g) _BATCH_MAP6(a, b, c, d, e, f) | _BATCH_MAP1(g)
89 #define _BATCH_MAP8(a, b, c, d, e, f, g, h) _BATCH_MAP7(a, b, c, d, e, f, g) | _BATCH_MAP1(h)
90 #define _BATCH_MAP9(a, b, c, d, e, f, g, h, i) _BATCH_MAP8(a, b, c, d, e, f, g, h) | _BATCH_MAP1(i)
91 #define _BATCH_MAP10(a, b, c, d, e, f, g, h, i, j) _BATCH_MAP9(a, b, c, d, e, f, g, h, i) | _BATCH_MAP1(j)
92 
93 #define BATCH_MAP(...) VA_NARGS_CALL_OVERLOAD(_BATCH_MAP, __VA_ARGS__)
94 
95 /* clang-format on */
96 
97 #define TRIS_PER_MAT_INDEX BUFFER_LEN
98 
99 static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
100 {
101  switch (buffer_index) {
102  case BUFFER_INDEX(vbo.pos_nor):
108  case BUFFER_INDEX(vbo.lnor):
110  case BUFFER_INDEX(vbo.edge_fac):
111  return MBC_WIRE_EDGES;
112  case BUFFER_INDEX(vbo.weights):
113  return MBC_SURFACE_WEIGHTS;
114  case BUFFER_INDEX(vbo.uv):
118  case BUFFER_INDEX(vbo.tan):
119  return MBC_SURFACE_PER_MAT;
120  case BUFFER_INDEX(vbo.sculpt_data):
121  return MBC_SCULPT_OVERLAYS;
122  case BUFFER_INDEX(vbo.orco):
123  return MBC_SURFACE_PER_MAT;
124  case BUFFER_INDEX(vbo.edit_data):
126  case BUFFER_INDEX(vbo.edituv_data):
129  case BUFFER_INDEX(vbo.edituv_stretch_area):
131  case BUFFER_INDEX(vbo.edituv_stretch_angle):
133  case BUFFER_INDEX(vbo.mesh_analysis):
134  return MBC_EDIT_MESH_ANALYSIS;
135  case BUFFER_INDEX(vbo.fdots_pos):
137  case BUFFER_INDEX(vbo.fdots_nor):
138  return MBC_EDIT_FACEDOTS;
139  case BUFFER_INDEX(vbo.fdots_uv):
140  return MBC_EDITUV_FACEDOTS;
141  case BUFFER_INDEX(vbo.fdots_edituv_data):
142  return MBC_EDITUV_FACEDOTS;
143  case BUFFER_INDEX(vbo.skin_roots):
144  return MBC_SKIN_ROOTS;
145  case BUFFER_INDEX(vbo.vert_idx):
147  case BUFFER_INDEX(vbo.edge_idx):
149  case BUFFER_INDEX(vbo.poly_idx):
151  case BUFFER_INDEX(vbo.fdot_idx):
153  case BUFFER_INDEX(vbo.attr[0]):
154  case BUFFER_INDEX(vbo.attr[1]):
155  case BUFFER_INDEX(vbo.attr[2]):
156  case BUFFER_INDEX(vbo.attr[3]):
157  case BUFFER_INDEX(vbo.attr[4]):
158  case BUFFER_INDEX(vbo.attr[5]):
159  case BUFFER_INDEX(vbo.attr[6]):
160  case BUFFER_INDEX(vbo.attr[7]):
161  case BUFFER_INDEX(vbo.attr[8]):
162  case BUFFER_INDEX(vbo.attr[9]):
163  case BUFFER_INDEX(vbo.attr[10]):
164  case BUFFER_INDEX(vbo.attr[11]):
165  case BUFFER_INDEX(vbo.attr[12]):
166  case BUFFER_INDEX(vbo.attr[13]):
167  case BUFFER_INDEX(vbo.attr[14]):
169  case BUFFER_INDEX(ibo.tris):
172  case BUFFER_INDEX(ibo.lines):
174  case BUFFER_INDEX(ibo.lines_loose):
175  return MBC_LOOSE_EDGES;
176  case BUFFER_INDEX(ibo.points):
178  case BUFFER_INDEX(ibo.fdots):
180  case BUFFER_INDEX(ibo.lines_paint_mask):
181  return MBC_WIRE_LOOPS;
182  case BUFFER_INDEX(ibo.lines_adjacency):
183  return MBC_EDGE_DETECTION;
184  case BUFFER_INDEX(ibo.edituv_tris):
186  case BUFFER_INDEX(ibo.edituv_lines):
188  case BUFFER_INDEX(ibo.edituv_points):
189  return MBC_EDITUV_VERTS;
190  case BUFFER_INDEX(ibo.edituv_fdots):
191  return MBC_EDITUV_FACEDOTS;
192  case TRIS_PER_MAT_INDEX:
193  return MBC_SURFACE_PER_MAT;
194  }
195  return (DRWBatchFlag)0;
196 }
197 
199 static void mesh_batch_cache_clear(Mesh *me);
200 
201 static void mesh_batch_cache_discard_batch(MeshBatchCache *cache, const DRWBatchFlag batch_map)
202 {
203  for (int i = 0; i < MBC_BATCH_LEN; i++) {
204  DRWBatchFlag batch_requested = (DRWBatchFlag)(1u << i);
205  if (batch_map & batch_requested) {
206  GPU_BATCH_DISCARD_SAFE(((GPUBatch **)&cache->batch)[i]);
207  cache->batch_ready &= ~batch_requested;
208  }
209  }
210 
211  if (batch_map & MBC_SURFACE_PER_MAT) {
213  }
214 }
215 
216 /* Return true is all layers in _b_ are inside _a_. */
218 {
219  return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
220 }
221 
223 {
224  return *((uint32_t *)&a) == *((uint32_t *)&b);
225 }
226 
228 {
229  uint32_t *a_p = (uint32_t *)a;
230  uint32_t *b_p = (uint32_t *)&b;
231  atomic_fetch_and_or_uint32(a_p, *b_p);
232 }
233 
235 {
236  *((uint32_t *)a) = 0;
237 }
238 
239 static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
240 {
241  cd_used->edit_uv = 1;
242 }
243 
244 static void mesh_cd_calc_active_uv_layer(const Object *object,
245  const Mesh *me,
246  DRW_MeshCDMask *cd_used)
247 {
248  const Mesh *me_final = editmesh_final_or_this(object, me);
249  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
250  int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
251  if (layer != -1) {
252  cd_used->uv |= (1 << layer);
253  }
254 }
255 
256 static void mesh_cd_calc_active_mask_uv_layer(const Object *object,
257  const Mesh *me,
258  DRW_MeshCDMask *cd_used)
259 {
260  const Mesh *me_final = editmesh_final_or_this(object, me);
261  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
262  int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
263  if (layer != -1) {
264  cd_used->uv |= (1 << layer);
265  }
266 }
267 
269  const Mesh *me,
270  struct GPUMaterial **gpumat_array,
271  int gpumat_array_len,
272  DRW_Attributes *attributes)
273 {
274  const Mesh *me_final = editmesh_final_or_this(object, me);
275  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
276  const CustomData *cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
277  const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
278  const CustomData *cd_edata = mesh_cd_edata_get_from_mesh(me_final);
279 
280  /* Create a mesh with final customdata domains
281  * we can query with attribute API. */
282  Mesh me_query = blender::dna::shallow_zero_initialize();
283 
285  ID_ME, cd_vdata, cd_edata, cd_ldata, cd_pdata, nullptr, &me_query.id);
286 
287  /* See: DM_vertex_attributes_from_gpu for similar logic */
288  DRW_MeshCDMask cd_used;
289  mesh_cd_layers_type_clear(&cd_used);
290 
291  const CustomDataLayer *default_color = BKE_id_attributes_render_color_get(&me_query.id);
292  const StringRefNull default_color_name = default_color ? default_color->name : "";
293 
294  for (int i = 0; i < gpumat_array_len; i++) {
295  GPUMaterial *gpumat = gpumat_array[i];
296  if (gpumat) {
297  ListBase gpu_attrs = GPU_material_attributes(gpumat);
298  LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
299  const char *name = gpu_attr->name;
300  eCustomDataType type = static_cast<eCustomDataType>(gpu_attr->type);
301  int layer = -1;
302  std::optional<eAttrDomain> domain;
303 
304  if (gpu_attr->is_default_color) {
305  name = default_color_name.c_str();
306  }
307 
308  if (type == CD_AUTO_FROM_NAME) {
309  /* We need to deduce what exact layer is used.
310  *
311  * We do it based on the specified name.
312  */
313  if (name[0] != '\0') {
314  layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
315  type = CD_MTFACE;
316 
317 #if 0 /* Tangents are always from UV's - this will never happen. */
318  if (layer == -1) {
319  layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
320  type = CD_TANGENT;
321  }
322 #endif
323  if (layer == -1) {
324  /* Try to match a generic attribute, we use the first attribute domain with a
325  * matching name. */
326  if (drw_custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
327  domain = ATTR_DOMAIN_POINT;
328  }
329  else if (drw_custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
330  domain = ATTR_DOMAIN_CORNER;
331  }
332  else if (drw_custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
333  domain = ATTR_DOMAIN_FACE;
334  }
335  else if (drw_custom_data_match_attribute(cd_edata, name, &layer, &type)) {
336  domain = ATTR_DOMAIN_EDGE;
337  }
338  else {
339  layer = -1;
340  }
341  }
342 
343  if (layer == -1) {
344  continue;
345  }
346  }
347  else {
348  /* Fall back to the UV layer, which matches old behavior. */
349  type = CD_MTFACE;
350  }
351  }
352 
353  switch (type) {
354  case CD_MTFACE: {
355  if (layer == -1) {
356  layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
358  }
359  if (layer != -1) {
360  cd_used.uv |= (1 << layer);
361  }
362  break;
363  }
364  case CD_TANGENT: {
365  if (layer == -1) {
366  layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
368 
369  /* Only fallback to orco (below) when we have no UV layers, see: T56545 */
370  if (layer == -1 && name[0] != '\0') {
371  layer = CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
372  }
373  }
374  if (layer != -1) {
375  cd_used.tan |= (1 << layer);
376  }
377  else {
378  /* no UV layers at all => requesting orco */
379  cd_used.tan_orco = 1;
380  cd_used.orco = 1;
381  }
382  break;
383  }
384 
385  case CD_ORCO: {
386  cd_used.orco = 1;
387  break;
388  }
389  case CD_PROP_BYTE_COLOR:
390  case CD_PROP_COLOR:
391  case CD_PROP_FLOAT3:
392  case CD_PROP_BOOL:
393  case CD_PROP_INT8:
394  case CD_PROP_INT32:
395  case CD_PROP_FLOAT:
396  case CD_PROP_FLOAT2: {
397  if (layer != -1 && domain.has_value()) {
398  drw_attributes_add_request(attributes, name, type, layer, *domain);
399  }
400  break;
401  }
402  default:
403  break;
404  }
405  }
406  }
407  }
408  return cd_used;
409 }
410 
413 /* ---------------------------------------------------------------------- */
419 {
420  MEM_SAFE_FREE(wstate->defgroup_sel);
423 
424  memset(wstate, 0, sizeof(*wstate));
425 
426  wstate->defgroup_active = -1;
427 }
428 
430 static void drw_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst,
431  const struct DRW_MeshWeightState *wstate_src)
432 {
433  MEM_SAFE_FREE(wstate_dst->defgroup_sel);
434  MEM_SAFE_FREE(wstate_dst->defgroup_locked);
435  MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
436 
437  memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
438 
439  if (wstate_src->defgroup_sel) {
440  wstate_dst->defgroup_sel = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_sel));
441  }
442  if (wstate_src->defgroup_locked) {
443  wstate_dst->defgroup_locked = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_locked));
444  }
445  if (wstate_src->defgroup_unlocked) {
446  wstate_dst->defgroup_unlocked = static_cast<bool *>(
447  MEM_dupallocN(wstate_src->defgroup_unlocked));
448  }
449 }
450 
451 static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
452 {
453  return ((!array1 && !array2) ||
454  (array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
455 }
456 
459  const struct DRW_MeshWeightState *b)
460 {
461  return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
462  a->flags == b->flags && a->alert_mode == b->alert_mode &&
463  a->defgroup_sel_count == b->defgroup_sel_count &&
464  drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
465  drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
466  drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
467 }
468 
470  Mesh *me,
471  const ToolSettings *ts,
472  bool paint_mode,
473  struct DRW_MeshWeightState *wstate)
474 {
475  /* Extract complete vertex weight group selection state and mode flags. */
476  memset(wstate, 0, sizeof(*wstate));
477 
478  wstate->defgroup_active = me->vertex_group_active_index - 1;
480 
481  wstate->alert_mode = ts->weightuser;
482 
483  if (paint_mode && ts->multipaint) {
484  /* Multi-paint needs to know all selected bones, not just the active group.
485  * This is actually a relatively expensive operation, but caching would be difficult. */
487  ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
488 
489  if (wstate->defgroup_sel_count > 1) {
492 
495  wstate->defgroup_len,
496  wstate->defgroup_sel,
497  wstate->defgroup_sel,
498  &wstate->defgroup_sel_count);
499  }
500  }
501  /* With only one selected bone Multi-paint reverts to regular mode. */
502  else {
503  wstate->defgroup_sel_count = 0;
504  MEM_SAFE_FREE(wstate->defgroup_sel);
505  }
506  }
507 
508  if (paint_mode && ts->wpaint_lock_relative) {
509  /* Set of locked vertex groups for the lock relative mode. */
512 
513  /* Check that a deform group is active, and none of selected groups are locked. */
515  wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
517  wstate->defgroup_locked,
518  wstate->defgroup_sel,
519  wstate->defgroup_sel_count)) {
521 
522  /* Compute the set of locked and unlocked deform vertex groups. */
524  wstate->defgroup_locked,
525  wstate->defgroup_unlocked,
526  wstate->defgroup_locked, /* out */
527  wstate->defgroup_unlocked);
528  }
529  else {
532  }
533  }
534 }
535 
538 /* ---------------------------------------------------------------------- */
543 {
544  atomic_fetch_and_or_uint32((uint32_t *)(&cache->batch_requested), *(uint32_t *)&new_flag);
545 }
546 
547 /* GPUBatch cache management. */
548 
549 static bool mesh_batch_cache_valid(Object *object, Mesh *me)
550 {
551  MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
552 
553  if (cache == nullptr) {
554  return false;
555  }
556 
557  if (object->sculpt && object->sculpt->pbvh) {
558  if (cache->pbvh_is_drawing != BKE_pbvh_is_drawing(object->sculpt->pbvh)) {
559  return false;
560  }
561 
562  if (BKE_pbvh_is_drawing(object->sculpt->pbvh) &&
564  return false;
565  }
566  }
567 
568  if (cache->is_editmode != (me->edit_mesh != nullptr)) {
569  return false;
570  }
571 
572  if (cache->is_dirty) {
573  return false;
574  }
575 
576  if (cache->mat_len != mesh_render_mat_len_get(object, me)) {
577  return false;
578  }
579 
580  return true;
581 }
582 
583 static void mesh_batch_cache_init(Object *object, Mesh *me)
584 {
585  MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
586 
587  if (!cache) {
588  me->runtime.batch_cache = MEM_cnew<MeshBatchCache>(__func__);
589  cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
590  }
591  else {
592  memset(cache, 0, sizeof(*cache));
593  }
594 
595  cache->is_editmode = me->edit_mesh != nullptr;
596 
597  if (object->sculpt && object->sculpt->pbvh) {
598  cache->pbvh_is_drawing = BKE_pbvh_is_drawing(object->sculpt->pbvh);
599  }
600 
601  if (cache->is_editmode == false) {
602  // cache->edge_len = mesh_render_edges_len_get(me);
603  // cache->tri_len = mesh_render_looptri_len_get(me);
604  // cache->poly_len = mesh_render_polys_len_get(me);
605  // cache->vert_len = mesh_render_verts_len_get(me);
606  }
607 
608  cache->mat_len = mesh_render_mat_len_get(object, me);
609  cache->surface_per_mat = static_cast<GPUBatch **>(
610  MEM_callocN(sizeof(*cache->surface_per_mat) * cache->mat_len, __func__));
611  cache->tris_per_mat = static_cast<GPUIndexBuf **>(
612  MEM_callocN(sizeof(*cache->tris_per_mat) * cache->mat_len, __func__));
613 
614  cache->is_dirty = false;
615  cache->batch_ready = (DRWBatchFlag)0;
616  cache->batch_requested = (DRWBatchFlag)0;
617 
619 }
620 
622 {
623  if (!mesh_batch_cache_valid(object, me)) {
625  mesh_batch_cache_init(object, me);
626  }
627 }
628 
630 {
631  return static_cast<MeshBatchCache *>(me->runtime.batch_cache);
632 }
633 
635  const struct DRW_MeshWeightState *wstate)
636 {
637  if (!drw_mesh_weight_state_compare(&cache->weight_state, wstate)) {
638  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
639  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.weights);
640  }
642 
643  cache->batch_ready &= ~MBC_SURFACE_WEIGHTS;
644 
646  }
647 }
648 
650 {
653  for (int i = 0; i < cache->mat_len; i++) {
655  }
656 }
657 
658 /* Free batches with material-mapped looptris.
659  * NOTE: The updating of the indices buffers (#tris_per_mat) is handled in the extractors.
660  * No need to discard they here. */
662 {
664  for (int i = 0; i < cache->mat_len; i++) {
666  }
667  cache->batch_ready &= ~MBC_SURFACE;
668 }
669 
671 {
672  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
673  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
674  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
675  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
676  }
677  DRWBatchFlag batch_map = BATCH_MAP(vbo.uv, vbo.tan, vbo.orco);
678  mesh_batch_cache_discard_batch(cache, batch_map);
680 }
681 
683 {
684  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
685  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_angle);
686  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_stretch_area);
687  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
688  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
689  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
690  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
691  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
692  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
693  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
694  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
695  }
696  DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_stretch_angle,
697  vbo.edituv_stretch_area,
698  vbo.uv,
699  vbo.edituv_data,
700  vbo.fdots_uv,
701  vbo.fdots_edituv_data,
702  ibo.edituv_tris,
703  ibo.edituv_lines,
704  ibo.edituv_points,
705  ibo.edituv_fdots);
706  mesh_batch_cache_discard_batch(cache, batch_map);
707 
708  cache->tot_area = 0.0f;
709  cache->tot_uv_area = 0.0f;
710 
711  cache->batch_ready &= ~MBC_EDITUV;
712 
713  /* We discarded the vbo.uv so we need to reset the cd_used flag. */
714  cache->cd_used.uv = 0;
715  cache->cd_used.edit_uv = 0;
716 }
717 
719 {
720  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
721  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
722  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
723  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
724  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
725  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
726  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
727  }
728  DRWBatchFlag batch_map = BATCH_MAP(vbo.edituv_data,
729  vbo.fdots_edituv_data,
730  ibo.edituv_tris,
731  ibo.edituv_lines,
732  ibo.edituv_points,
733  ibo.edituv_fdots);
734  mesh_batch_cache_discard_batch(cache, batch_map);
735 }
736 
738 {
739  MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
740  if (cache == nullptr) {
741  return;
742  }
743  DRWBatchFlag batch_map;
744  switch (mode) {
746  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
747  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edit_data);
748  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_nor);
749  }
750  batch_map = BATCH_MAP(vbo.edit_data, vbo.fdots_nor);
751  mesh_batch_cache_discard_batch(cache, batch_map);
752 
753  /* Because visible UVs depends on edit mode selection, discard topology. */
755  break;
757  /* Paint mode selection flag is packed inside the nor attribute.
758  * Note that it can be slow if auto smooth is enabled. (see T63946) */
759  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
760  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.lines_paint_mask);
761  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.pos_nor);
762  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.lnor);
763  }
764  batch_map = BATCH_MAP(ibo.lines_paint_mask, vbo.pos_nor, vbo.lnor);
765  mesh_batch_cache_discard_batch(cache, batch_map);
766  break;
768  cache->is_dirty = true;
769  break;
773  break;
776  break;
778  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
779  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
780  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
781  }
782  batch_map = BATCH_MAP(vbo.edituv_data, vbo.fdots_edituv_data);
783  mesh_batch_cache_discard_batch(cache, batch_map);
784  break;
785  default:
786  BLI_assert(0);
787  }
788 }
789 
791 {
792  GPUVertBuf **vbos = (GPUVertBuf **)&mbuflist->vbo;
793  GPUIndexBuf **ibos = (GPUIndexBuf **)&mbuflist->ibo;
794  for (int i = 0; i < sizeof(mbuflist->vbo) / sizeof(void *); i++) {
795  GPU_VERTBUF_DISCARD_SAFE(vbos[i]);
796  }
797  for (int i = 0; i < sizeof(mbuflist->ibo) / sizeof(void *); i++) {
798  GPU_INDEXBUF_DISCARD_SAFE(ibos[i]);
799  }
800 }
801 
803 {
805 
808  mbc->loose_geom.edge_len = 0;
809  mbc->loose_geom.vert_len = 0;
810 
813  mbc->poly_sorted.visible_tri_len = 0;
814 }
815 
817 {
818  if (cache->subdiv_cache) {
820  MEM_freeN(cache->subdiv_cache);
821  cache->subdiv_cache = nullptr;
822  }
823 }
824 
825 static void mesh_batch_cache_clear(Mesh *me)
826 {
827  MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
828  if (!cache) {
829  return;
830  }
831  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
833  }
834 
835  for (int i = 0; i < cache->mat_len; i++) {
837  }
838  MEM_SAFE_FREE(cache->tris_per_mat);
839 
840  for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
841  GPUBatch **batch = (GPUBatch **)&cache->batch;
843  }
844 
848  cache->mat_len = 0;
849 
850  cache->batch_ready = (DRWBatchFlag)0;
852 
854 }
855 
857 {
860 }
861 
864 /* ---------------------------------------------------------------------- */
868 static void texpaint_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
869 {
870  DRW_MeshCDMask cd_needed;
871  mesh_cd_layers_type_clear(&cd_needed);
872  mesh_cd_calc_active_uv_layer(object, me, &cd_needed);
873 
874  BLI_assert(cd_needed.uv != 0 &&
875  "No uv layer available in texpaint, but batches requested anyway!");
876 
877  mesh_cd_calc_active_mask_uv_layer(object, me, &cd_needed);
878  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
879 }
880 
882  const Mesh &mesh,
883  DRW_Attributes &attributes)
884 {
885  const Mesh *me_final = editmesh_final_or_this(&object, &mesh);
886  const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
887  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
888 
889  /* Necessary because which attributes are active/default is stored in #CustomData. */
890  Mesh me_query = blender::dna::shallow_zero_initialize();
892  ID_ME, cd_vdata, nullptr, cd_ldata, nullptr, nullptr, &me_query.id);
893 
894  auto request_color_attribute = [&](const char *name) {
895  int layer_index;
897  if (drw_custom_data_match_attribute(cd_vdata, name, &layer_index, &type)) {
898  drw_attributes_add_request(&attributes, name, type, layer_index, ATTR_DOMAIN_POINT);
899  }
900  else if (drw_custom_data_match_attribute(cd_ldata, name, &layer_index, &type)) {
901  drw_attributes_add_request(&attributes, name, type, layer_index, ATTR_DOMAIN_CORNER);
902  }
903  };
904 
906  request_color_attribute(active->name);
907  }
908  if (const CustomDataLayer *render = BKE_id_attributes_render_color_get(&me_query.id)) {
909  request_color_attribute(render->name);
910  }
911 }
912 
914 {
917  return DRW_batch_request(&cache->batch.all_verts);
918 }
919 
921 {
924  return DRW_batch_request(&cache->batch.all_edges);
925 }
926 
928 {
931 
932  return cache->batch.surface;
933 }
934 
936 {
939  if (cache->no_loose_wire) {
940  return nullptr;
941  }
942 
943  return DRW_batch_request(&cache->batch.loose_edges);
944 }
945 
947 {
950  return DRW_batch_request(&cache->batch.surface_weights);
951 }
952 
954 {
957  /* Even if is_manifold is not correct (not updated),
958  * the default (not manifold) is just the worst case. */
959  if (r_is_manifold) {
960  *r_is_manifold = cache->is_manifold;
961  }
962  return DRW_batch_request(&cache->batch.edge_detection);
963 }
964 
966 {
969  return DRW_batch_request(&cache->batch.wire_edges);
970 }
971 
973 {
977 }
978 
980  Mesh *me,
981  struct GPUMaterial **gpumat_array,
982  uint gpumat_array_len)
983 {
985  DRW_Attributes attrs_needed;
986  drw_attributes_clear(&attrs_needed);
988  object, me, gpumat_array, gpumat_array_len, &attrs_needed);
989 
990  BLI_assert(gpumat_array_len == cache->mat_len);
991 
992  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
993  ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
994  drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
996  return cache->surface_per_mat;
997 }
998 
1000 {
1001  MeshBatchCache *cache = mesh_batch_cache_get(me);
1002  texpaint_request_active_uv(cache, object, me);
1004  return cache->surface_per_mat;
1005 }
1006 
1008 {
1009  MeshBatchCache *cache = mesh_batch_cache_get(me);
1010  texpaint_request_active_uv(cache, object, me);
1012  return cache->batch.surface;
1013 }
1014 
1016 {
1017  MeshBatchCache *cache = mesh_batch_cache_get(me);
1018 
1019  DRW_Attributes attrs_needed{};
1020  request_active_and_default_color_attributes(*object, *me, attrs_needed);
1021 
1022  ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
1023  drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
1024 
1026  return cache->batch.surface;
1027 }
1028 
1030 {
1031  MeshBatchCache *cache = mesh_batch_cache_get(me);
1032 
1033  DRW_Attributes attrs_needed{};
1034  request_active_and_default_color_attributes(*object, *me, attrs_needed);
1035 
1036  ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
1037  drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
1038 
1040  return cache->batch.surface;
1041 }
1042 
1043 int DRW_mesh_material_count_get(const Object *object, const Mesh *me)
1044 {
1045  return mesh_render_mat_len_get(object, me);
1046 }
1047 
1049 {
1050  MeshBatchCache *cache = mesh_batch_cache_get(me);
1051 
1052  cache->cd_needed.sculpt_overlays = 1;
1055 
1056  return cache->batch.sculpt_overlays;
1057 }
1058 
1061 /* ---------------------------------------------------------------------- */
1066 {
1067  MeshBatchCache *cache = mesh_batch_cache_get(me);
1068  /* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
1070 
1071  DRW_vbo_request(nullptr, &cache->final.buff.vbo.pos_nor);
1072  return cache->final.buff.vbo.pos_nor;
1073 }
1074 
1077 /* ---------------------------------------------------------------------- */
1082 {
1083  MeshBatchCache *cache = mesh_batch_cache_get(me);
1085  return DRW_batch_request(&cache->batch.edit_triangles);
1086 }
1087 
1089 {
1090  MeshBatchCache *cache = mesh_batch_cache_get(me);
1092  return DRW_batch_request(&cache->batch.edit_edges);
1093 }
1094 
1096 {
1097  MeshBatchCache *cache = mesh_batch_cache_get(me);
1099  return DRW_batch_request(&cache->batch.edit_vertices);
1100 }
1101 
1103 {
1104  MeshBatchCache *cache = mesh_batch_cache_get(me);
1106  return DRW_batch_request(&cache->batch.edit_vnor);
1107 }
1108 
1110 {
1111  MeshBatchCache *cache = mesh_batch_cache_get(me);
1113  return DRW_batch_request(&cache->batch.edit_lnor);
1114 }
1115 
1117 {
1118  MeshBatchCache *cache = mesh_batch_cache_get(me);
1120  return DRW_batch_request(&cache->batch.edit_fdots);
1121 }
1122 
1124 {
1125  MeshBatchCache *cache = mesh_batch_cache_get(me);
1127  return DRW_batch_request(&cache->batch.edit_skin_roots);
1128 }
1129 
1132 /* ---------------------------------------------------------------------- */
1137 {
1138  MeshBatchCache *cache = mesh_batch_cache_get(me);
1141 }
1142 
1144 {
1145  MeshBatchCache *cache = mesh_batch_cache_get(me);
1148 }
1149 
1151 {
1152  MeshBatchCache *cache = mesh_batch_cache_get(me);
1155 }
1156 
1158 {
1159  MeshBatchCache *cache = mesh_batch_cache_get(me);
1162 }
1163 
1166 /* ---------------------------------------------------------------------- */
1170 static void edituv_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
1171 {
1172  DRW_MeshCDMask cd_needed;
1173  mesh_cd_layers_type_clear(&cd_needed);
1174  mesh_cd_calc_active_uv_layer(object, me, &cd_needed);
1175  mesh_cd_calc_edit_uv_layer(me, &cd_needed);
1176 
1177  BLI_assert(cd_needed.edit_uv != 0 &&
1178  "No uv layer available in edituv, but batches requested anyway!");
1179 
1180  mesh_cd_calc_active_mask_uv_layer(object, me, &cd_needed);
1181  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
1182 }
1183 
1185  Mesh *me,
1186  float **tot_area,
1187  float **tot_uv_area)
1188 {
1189  MeshBatchCache *cache = mesh_batch_cache_get(me);
1190  edituv_request_active_uv(cache, object, me);
1192 
1193  if (tot_area != nullptr) {
1194  *tot_area = &cache->tot_area;
1195  }
1196  if (tot_uv_area != nullptr) {
1197  *tot_uv_area = &cache->tot_uv_area;
1198  }
1200 }
1201 
1203 {
1204  MeshBatchCache *cache = mesh_batch_cache_get(me);
1205  edituv_request_active_uv(cache, object, me);
1208 }
1209 
1211 {
1212  MeshBatchCache *cache = mesh_batch_cache_get(me);
1213  edituv_request_active_uv(cache, object, me);
1215  return DRW_batch_request(&cache->batch.edituv_faces);
1216 }
1217 
1219 {
1220  MeshBatchCache *cache = mesh_batch_cache_get(me);
1221  edituv_request_active_uv(cache, object, me);
1223  return DRW_batch_request(&cache->batch.edituv_edges);
1224 }
1225 
1227 {
1228  MeshBatchCache *cache = mesh_batch_cache_get(me);
1229  edituv_request_active_uv(cache, object, me);
1231  return DRW_batch_request(&cache->batch.edituv_verts);
1232 }
1233 
1235 {
1236  MeshBatchCache *cache = mesh_batch_cache_get(me);
1237  edituv_request_active_uv(cache, object, me);
1239  return DRW_batch_request(&cache->batch.edituv_fdots);
1240 }
1241 
1243 {
1244  MeshBatchCache *cache = mesh_batch_cache_get(me);
1245  edituv_request_active_uv(cache, object, me);
1247  return DRW_batch_request(&cache->batch.wire_loops_uvs);
1248 }
1249 
1251 {
1252  MeshBatchCache *cache = mesh_batch_cache_get(me);
1253  texpaint_request_active_uv(cache, object, me);
1255  return DRW_batch_request(&cache->batch.wire_loops);
1256 }
1257 
1260 /* ---------------------------------------------------------------------- */
1265 {
1266  MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
1267 
1268  if (cache == nullptr) {
1269  return;
1270  }
1271 
1272  if (mesh_cd_layers_type_equal(cache->cd_used_over_time, cache->cd_used)) {
1273  cache->lastmatch = ctime;
1274  }
1275 
1276  if (drw_attributes_overlap(&cache->attr_used_over_time, &cache->attr_used)) {
1277  cache->lastmatch = ctime;
1278  }
1279 
1280  if (ctime - cache->lastmatch > U.vbotimeout) {
1282  }
1283 
1286 }
1287 
1289  MeshBufferList *mbuflist,
1290  DRW_Attributes *attr_used)
1291 {
1292  for (int i = 0; i < attr_used->num_requests; i++) {
1293  DRW_vbo_request(batch, &mbuflist->vbo.attr[i]);
1294  }
1295 }
1296 
1297 #ifdef DEBUG
1298 /* Sanity check function to test if all requested batches are available. */
1299 static void drw_mesh_batch_cache_check_available(struct TaskGraph *task_graph, Mesh *me)
1300 {
1301  MeshBatchCache *cache = mesh_batch_cache_get(me);
1302  /* Make sure all requested batches have been setup. */
1303  /* NOTE: The next line creates a different scheduling than during release builds what can lead to
1304  * some issues (See T77867 where we needed to disable this function in order to debug what was
1305  * happening in release builds). */
1306  BLI_task_graph_work_and_wait(task_graph);
1307  for (int i = 0; i < MBC_BATCH_LEN; i++) {
1308  BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], (GPUPrimType)0));
1309  }
1310  for (int i = 0; i < MBC_VBO_LEN; i++) {
1311  BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->final.buff.vbo)[i]));
1312  }
1313  for (int i = 0; i < MBC_IBO_LEN; i++) {
1314  BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->final.buff.ibo)[i]));
1315  }
1316  for (int i = 0; i < MBC_VBO_LEN; i++) {
1317  BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->cage.buff.vbo)[i]));
1318  }
1319  for (int i = 0; i < MBC_IBO_LEN; i++) {
1320  BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->cage.buff.ibo)[i]));
1321  }
1322  for (int i = 0; i < MBC_VBO_LEN; i++) {
1323  BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->uv_cage.buff.vbo)[i]));
1324  }
1325  for (int i = 0; i < MBC_IBO_LEN; i++) {
1327  }
1328 }
1329 #endif
1330 
1332  Object *ob,
1333  Mesh *me,
1334  const Scene *scene,
1335  const bool is_paint_mode,
1336  const bool use_hide)
1337 {
1338  BLI_assert(task_graph);
1339  const ToolSettings *ts = nullptr;
1340  if (scene) {
1341  ts = scene->toolsettings;
1342  }
1343  MeshBatchCache *cache = mesh_batch_cache_get(me);
1344  bool cd_uv_update = false;
1345 
1346  /* Early out */
1347  if (cache->batch_requested == 0) {
1348 #ifdef DEBUG
1349  drw_mesh_batch_cache_check_available(task_graph, me);
1350 #endif
1351  return;
1352  }
1353 
1354 #ifdef DEBUG
1355  /* Map the index of a buffer to a flag containing all batches that use it. */
1356  Map<int, DRWBatchFlag> batches_that_use_buffer_local;
1357 
1358  auto assert_deps_valid = [&](DRWBatchFlag batch_flag, Span<int> used_buffer_indices) {
1359  for (const int buffer_index : used_buffer_indices) {
1360  batches_that_use_buffer_local.add_or_modify(
1361  buffer_index,
1362  [&](DRWBatchFlag *value) { *value = batch_flag; },
1363  [&](DRWBatchFlag *value) { *value |= batch_flag; });
1364  BLI_assert(batches_that_use_buffer(buffer_index) & batch_flag);
1365  }
1366  };
1367 #else
1368  auto assert_deps_valid = [&](DRWBatchFlag UNUSED(batch_flag),
1369  Span<int> UNUSED(used_buffer_indices)) {};
1370 
1371 #endif
1372 
1373  /* Sanity check. */
1374  if ((me->edit_mesh != nullptr) && (ob->mode & OB_MODE_EDIT)) {
1376  }
1377 
1378  const bool is_editmode = (me->edit_mesh != nullptr) &&
1379  (BKE_object_get_editmesh_eval_final(ob) != nullptr) &&
1381 
1382  /* This could be set for paint mode too, currently it's only used for edit-mode. */
1383  const bool is_mode_active = is_editmode && DRW_object_is_in_edit_mode(ob);
1384 
1385  DRWBatchFlag batch_requested = cache->batch_requested;
1386  cache->batch_requested = (DRWBatchFlag)0;
1387 
1388  if (batch_requested & MBC_SURFACE_WEIGHTS) {
1389  /* Check vertex weights. */
1390  if ((cache->batch.surface_weights != nullptr) && (ts != nullptr)) {
1391  struct DRW_MeshWeightState wstate;
1392  BLI_assert(ob->type == OB_MESH);
1393  drw_mesh_weight_state_extract(ob, me, ts, is_paint_mode, &wstate);
1394  mesh_batch_cache_check_vertex_group(cache, &wstate);
1395  drw_mesh_weight_state_copy(&cache->weight_state, &wstate);
1396  drw_mesh_weight_state_clear(&wstate);
1397  }
1398  }
1399 
1400  if (batch_requested &
1403  /* Modifiers will only generate an orco layer if the mesh is deformed. */
1404  if (cache->cd_needed.orco != 0) {
1405  /* Orco is always extracted from final mesh. */
1406  Mesh *me_final = (me->edit_mesh) ? BKE_object_get_editmesh_eval_final(ob) : me;
1407  if (CustomData_get_layer(&me_final->vdata, CD_ORCO) == nullptr) {
1408  /* Skip orco calculation */
1409  cache->cd_needed.orco = 0;
1410  }
1411  }
1412 
1413  ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
1414 
1415  /* Verify that all surface batches have needed attribute layers.
1416  */
1417  /* TODO(fclem): We could be a bit smarter here and only do it per
1418  * material. */
1419  bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
1420  bool attr_overlap = drw_attributes_overlap(&cache->attr_used, &cache->attr_needed);
1421  if (cd_overlap == false || attr_overlap == false) {
1422  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1423  if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv) {
1424  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
1425  cd_uv_update = true;
1426  }
1427  if ((cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
1428  cache->cd_used.tan_orco != cache->cd_needed.tan_orco) {
1429  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.tan);
1430  }
1431  if (cache->cd_used.orco != cache->cd_needed.orco) {
1432  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.orco);
1433  }
1434  if (cache->cd_used.sculpt_overlays != cache->cd_needed.sculpt_overlays) {
1435  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.sculpt_data);
1436  }
1437  if (!drw_attributes_overlap(&cache->attr_used, &cache->attr_needed)) {
1438  for (int i = 0; i < GPU_MAX_ATTR; i++) {
1439  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.attr[i]);
1440  }
1441  }
1442  }
1443  /* We can't discard batches at this point as they have been
1444  * referenced for drawing. Just clear them in place. */
1445  for (int i = 0; i < cache->mat_len; i++) {
1447  }
1449  cache->batch_ready &= ~(MBC_SURFACE);
1450 
1451  mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
1452  drw_attributes_merge(&cache->attr_used, &cache->attr_needed, mesh_render_mutex);
1453  }
1456 
1457  drw_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, mesh_render_mutex);
1459  }
1460 
1461  if (batch_requested & MBC_EDITUV) {
1462  /* Discard UV batches if sync_selection changes */
1463  const bool is_uvsyncsel = ts && (ts->uv_flag & UV_SYNC_SELECTION);
1464  if (cd_uv_update || (cache->is_uvsyncsel != is_uvsyncsel)) {
1465  cache->is_uvsyncsel = is_uvsyncsel;
1466  FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1467  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.edituv_data);
1468  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_uv);
1469  GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.fdots_edituv_data);
1470  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_tris);
1471  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_lines);
1472  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_points);
1473  GPU_INDEXBUF_DISCARD_SAFE(mbc->buff.ibo.edituv_fdots);
1474  }
1475  /* We only clear the batches as they may already have been
1476  * referenced. */
1484  cache->batch_ready &= ~MBC_EDITUV;
1485  }
1486  }
1487 
1488  /* Second chance to early out */
1489  if ((batch_requested & ~cache->batch_ready) == 0) {
1490 #ifdef DEBUG
1491  drw_mesh_batch_cache_check_available(task_graph, me);
1492 #endif
1493  return;
1494  }
1495 
1496  /* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-PBVH).
1497  * This makes tools that sample the surface per step get wrong normals until a redraw happens.
1498  * Normal updates should be part of the brush loop and only run during the stroke when the
1499  * brush needs to sample the surface. The drawing code should only update the normals
1500  * per redraw when smooth shading is enabled. */
1501  const bool do_update_sculpt_normals = ob->sculpt && ob->sculpt->pbvh;
1502  if (do_update_sculpt_normals) {
1503  Mesh *mesh = static_cast<Mesh *>(ob->data);
1505  }
1506 
1507  cache->batch_ready |= batch_requested;
1508 
1509  bool do_cage = false, do_uvcage = false;
1510  if (is_editmode && is_mode_active) {
1511  Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(ob);
1512  Mesh *editmesh_eval_cage = BKE_object_get_editmesh_eval_cage(ob);
1513 
1514  do_cage = editmesh_eval_final != editmesh_eval_cage;
1515  do_uvcage = !(editmesh_eval_final->runtime.is_original &&
1516  editmesh_eval_final->runtime.wrapper_type == ME_WRAPPER_TYPE_BMESH);
1517  }
1518 
1519  const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(me);
1520 
1521  MeshBufferList *mbuflist = &cache->final.buff;
1522 
1523  /* Initialize batches and request VBO's & IBO's. */
1524  assert_deps_valid(MBC_SURFACE,
1525  {BUFFER_INDEX(ibo.tris),
1526  BUFFER_INDEX(vbo.lnor),
1527  BUFFER_INDEX(vbo.pos_nor),
1528  BUFFER_INDEX(vbo.uv),
1529  BUFFER_INDEX(vbo.attr[0]),
1530  BUFFER_INDEX(vbo.attr[1]),
1531  BUFFER_INDEX(vbo.attr[2]),
1532  BUFFER_INDEX(vbo.attr[3]),
1533  BUFFER_INDEX(vbo.attr[4]),
1534  BUFFER_INDEX(vbo.attr[5]),
1535  BUFFER_INDEX(vbo.attr[6]),
1536  BUFFER_INDEX(vbo.attr[7]),
1537  BUFFER_INDEX(vbo.attr[8]),
1538  BUFFER_INDEX(vbo.attr[9]),
1539  BUFFER_INDEX(vbo.attr[10]),
1540  BUFFER_INDEX(vbo.attr[11]),
1541  BUFFER_INDEX(vbo.attr[12]),
1542  BUFFER_INDEX(vbo.attr[13]),
1543  BUFFER_INDEX(vbo.attr[14])});
1545  DRW_ibo_request(cache->batch.surface, &mbuflist->ibo.tris);
1546  /* Order matters. First ones override latest VBO's attributes. */
1547  DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.lnor);
1548  DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.pos_nor);
1549  if (cache->cd_used.uv != 0) {
1550  DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.uv);
1551  }
1552  drw_add_attributes_vbo(cache->batch.surface, mbuflist, &cache->attr_used);
1553  }
1554  assert_deps_valid(MBC_ALL_VERTS, {BUFFER_INDEX(vbo.pos_nor)});
1556  DRW_vbo_request(cache->batch.all_verts, &mbuflist->vbo.pos_nor);
1557  }
1558  assert_deps_valid(
1560  {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.sculpt_data)});
1562  DRW_ibo_request(cache->batch.sculpt_overlays, &mbuflist->ibo.tris);
1563  DRW_vbo_request(cache->batch.sculpt_overlays, &mbuflist->vbo.pos_nor);
1564  DRW_vbo_request(cache->batch.sculpt_overlays, &mbuflist->vbo.sculpt_data);
1565  }
1566  assert_deps_valid(MBC_ALL_EDGES, {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor)});
1568  DRW_ibo_request(cache->batch.all_edges, &mbuflist->ibo.lines);
1569  DRW_vbo_request(cache->batch.all_edges, &mbuflist->vbo.pos_nor);
1570  }
1571  assert_deps_valid(MBC_LOOSE_EDGES, {BUFFER_INDEX(ibo.lines_loose), BUFFER_INDEX(vbo.pos_nor)});
1573  DRW_ibo_request(nullptr, &mbuflist->ibo.lines);
1574  DRW_ibo_request(cache->batch.loose_edges, &mbuflist->ibo.lines_loose);
1575  DRW_vbo_request(cache->batch.loose_edges, &mbuflist->vbo.pos_nor);
1576  }
1577  assert_deps_valid(MBC_EDGE_DETECTION,
1578  {BUFFER_INDEX(ibo.lines_adjacency), BUFFER_INDEX(vbo.pos_nor)});
1581  DRW_vbo_request(cache->batch.edge_detection, &mbuflist->vbo.pos_nor);
1582  }
1583  assert_deps_valid(
1585  {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.weights)});
1587  DRW_ibo_request(cache->batch.surface_weights, &mbuflist->ibo.tris);
1588  DRW_vbo_request(cache->batch.surface_weights, &mbuflist->vbo.pos_nor);
1589  DRW_vbo_request(cache->batch.surface_weights, &mbuflist->vbo.weights);
1590  }
1591  assert_deps_valid(
1593  {BUFFER_INDEX(ibo.lines_paint_mask), BUFFER_INDEX(vbo.lnor), BUFFER_INDEX(vbo.pos_nor)});
1595  DRW_ibo_request(cache->batch.wire_loops, &mbuflist->ibo.lines_paint_mask);
1596  /* Order matters. First ones override latest VBO's attributes. */
1597  DRW_vbo_request(cache->batch.wire_loops, &mbuflist->vbo.lnor);
1598  DRW_vbo_request(cache->batch.wire_loops, &mbuflist->vbo.pos_nor);
1599  }
1600  assert_deps_valid(
1602  {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edge_fac)});
1604  DRW_ibo_request(cache->batch.wire_edges, &mbuflist->ibo.lines);
1605  DRW_vbo_request(cache->batch.wire_edges, &mbuflist->vbo.pos_nor);
1606  DRW_vbo_request(cache->batch.wire_edges, &mbuflist->vbo.edge_fac);
1607  }
1608  assert_deps_valid(MBC_WIRE_LOOPS_UVS, {BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv)});
1610  DRW_ibo_request(cache->batch.wire_loops_uvs, &mbuflist->ibo.edituv_lines);
1611  /* For paint overlay. Active layer should have been queried. */
1612  if (cache->cd_used.uv != 0) {
1613  DRW_vbo_request(cache->batch.wire_loops_uvs, &mbuflist->vbo.uv);
1614  }
1615  }
1616  assert_deps_valid(
1618  {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.mesh_analysis)});
1620  DRW_ibo_request(cache->batch.edit_mesh_analysis, &mbuflist->ibo.tris);
1621  DRW_vbo_request(cache->batch.edit_mesh_analysis, &mbuflist->vbo.pos_nor);
1623  }
1624 
1625  /* Per Material */
1626  assert_deps_valid(
1628  {BUFFER_INDEX(vbo.lnor), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.uv),
1629  BUFFER_INDEX(vbo.tan), BUFFER_INDEX(vbo.orco), BUFFER_INDEX(vbo.attr[0]),
1630  BUFFER_INDEX(vbo.attr[1]), BUFFER_INDEX(vbo.attr[2]), BUFFER_INDEX(vbo.attr[3]),
1631  BUFFER_INDEX(vbo.attr[4]), BUFFER_INDEX(vbo.attr[5]), BUFFER_INDEX(vbo.attr[6]),
1632  BUFFER_INDEX(vbo.attr[7]), BUFFER_INDEX(vbo.attr[8]), BUFFER_INDEX(vbo.attr[9]),
1633  BUFFER_INDEX(vbo.attr[10]), BUFFER_INDEX(vbo.attr[11]), BUFFER_INDEX(vbo.attr[12]),
1634  BUFFER_INDEX(vbo.attr[13]), BUFFER_INDEX(vbo.attr[14])});
1635  assert_deps_valid(MBC_SURFACE_PER_MAT, {TRIS_PER_MAT_INDEX});
1636  for (int i = 0; i < cache->mat_len; i++) {
1638  DRW_ibo_request(cache->surface_per_mat[i], &cache->tris_per_mat[i]);
1639  /* Order matters. First ones override latest VBO's attributes. */
1640  DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.lnor);
1641  DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.pos_nor);
1642  if (cache->cd_used.uv != 0) {
1643  DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.uv);
1644  }
1645  if ((cache->cd_used.tan != 0) || (cache->cd_used.tan_orco != 0)) {
1646  DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.tan);
1647  }
1648  if (cache->cd_used.orco != 0) {
1649  DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.orco);
1650  }
1651  drw_add_attributes_vbo(cache->surface_per_mat[i], mbuflist, &cache->attr_used);
1652  }
1653  }
1654 
1655  mbuflist = (do_cage) ? &cache->cage.buff : &cache->final.buff;
1656 
1657  /* Edit Mesh */
1658  assert_deps_valid(
1660  {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edit_data)});
1662  DRW_ibo_request(cache->batch.edit_triangles, &mbuflist->ibo.tris);
1663  DRW_vbo_request(cache->batch.edit_triangles, &mbuflist->vbo.pos_nor);
1664  DRW_vbo_request(cache->batch.edit_triangles, &mbuflist->vbo.edit_data);
1665  }
1666  assert_deps_valid(
1668  {BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edit_data)});
1670  DRW_ibo_request(cache->batch.edit_vertices, &mbuflist->ibo.points);
1671  DRW_vbo_request(cache->batch.edit_vertices, &mbuflist->vbo.pos_nor);
1672  DRW_vbo_request(cache->batch.edit_vertices, &mbuflist->vbo.edit_data);
1673  }
1674  assert_deps_valid(
1676  {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edit_data)});
1678  DRW_ibo_request(cache->batch.edit_edges, &mbuflist->ibo.lines);
1679  DRW_vbo_request(cache->batch.edit_edges, &mbuflist->vbo.pos_nor);
1680  DRW_vbo_request(cache->batch.edit_edges, &mbuflist->vbo.edit_data);
1681  }
1682  assert_deps_valid(MBC_EDIT_VNOR, {BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos_nor)});
1684  DRW_ibo_request(cache->batch.edit_vnor, &mbuflist->ibo.points);
1685  DRW_vbo_request(cache->batch.edit_vnor, &mbuflist->vbo.pos_nor);
1686  }
1687  assert_deps_valid(MBC_EDIT_LNOR,
1688  {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.lnor)});
1690  DRW_ibo_request(cache->batch.edit_lnor, &mbuflist->ibo.tris);
1691  DRW_vbo_request(cache->batch.edit_lnor, &mbuflist->vbo.pos_nor);
1692  DRW_vbo_request(cache->batch.edit_lnor, &mbuflist->vbo.lnor);
1693  }
1694  assert_deps_valid(
1696  {BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdots_nor)});
1698  DRW_ibo_request(cache->batch.edit_fdots, &mbuflist->ibo.fdots);
1699  DRW_vbo_request(cache->batch.edit_fdots, &mbuflist->vbo.fdots_pos);
1700  DRW_vbo_request(cache->batch.edit_fdots, &mbuflist->vbo.fdots_nor);
1701  }
1702  assert_deps_valid(MBC_SKIN_ROOTS, {BUFFER_INDEX(vbo.skin_roots)});
1704  DRW_vbo_request(cache->batch.edit_skin_roots, &mbuflist->vbo.skin_roots);
1705  }
1706 
1707  /* Selection */
1708  assert_deps_valid(
1710  {BUFFER_INDEX(ibo.points), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.vert_idx)});
1712  DRW_ibo_request(cache->batch.edit_selection_verts, &mbuflist->ibo.points);
1715  }
1716  assert_deps_valid(
1718  {BUFFER_INDEX(ibo.lines), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.edge_idx)});
1720  DRW_ibo_request(cache->batch.edit_selection_edges, &mbuflist->ibo.lines);
1723  }
1724  assert_deps_valid(
1726  {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.poly_idx)});
1728  DRW_ibo_request(cache->batch.edit_selection_faces, &mbuflist->ibo.tris);
1731  }
1732  assert_deps_valid(
1734  {BUFFER_INDEX(ibo.fdots), BUFFER_INDEX(vbo.fdots_pos), BUFFER_INDEX(vbo.fdot_idx)});
1736  DRW_ibo_request(cache->batch.edit_selection_fdots, &mbuflist->ibo.fdots);
1739  }
1740 
1746  mbuflist = (do_uvcage) ? &cache->uv_cage.buff : &cache->final.buff;
1747 
1748  /* Edit UV */
1749  assert_deps_valid(
1751  {BUFFER_INDEX(ibo.edituv_tris), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
1753  DRW_ibo_request(cache->batch.edituv_faces, &mbuflist->ibo.edituv_tris);
1754  DRW_vbo_request(cache->batch.edituv_faces, &mbuflist->vbo.uv);
1755  DRW_vbo_request(cache->batch.edituv_faces, &mbuflist->vbo.edituv_data);
1756  }
1757  assert_deps_valid(MBC_EDITUV_FACES_STRETCH_AREA,
1758  {BUFFER_INDEX(ibo.edituv_tris),
1759  BUFFER_INDEX(vbo.uv),
1760  BUFFER_INDEX(vbo.edituv_data),
1761  BUFFER_INDEX(vbo.edituv_stretch_area)});
1767  }
1768  assert_deps_valid(MBC_EDITUV_FACES_STRETCH_ANGLE,
1769  {BUFFER_INDEX(ibo.edituv_tris),
1770  BUFFER_INDEX(vbo.uv),
1771  BUFFER_INDEX(vbo.edituv_data),
1772  BUFFER_INDEX(vbo.edituv_stretch_angle)});
1778  }
1779  assert_deps_valid(
1781  {BUFFER_INDEX(ibo.edituv_lines), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
1783  DRW_ibo_request(cache->batch.edituv_edges, &mbuflist->ibo.edituv_lines);
1784  DRW_vbo_request(cache->batch.edituv_edges, &mbuflist->vbo.uv);
1785  DRW_vbo_request(cache->batch.edituv_edges, &mbuflist->vbo.edituv_data);
1786  }
1787  assert_deps_valid(
1789  {BUFFER_INDEX(ibo.edituv_points), BUFFER_INDEX(vbo.uv), BUFFER_INDEX(vbo.edituv_data)});
1791  DRW_ibo_request(cache->batch.edituv_verts, &mbuflist->ibo.edituv_points);
1792  DRW_vbo_request(cache->batch.edituv_verts, &mbuflist->vbo.uv);
1793  DRW_vbo_request(cache->batch.edituv_verts, &mbuflist->vbo.edituv_data);
1794  }
1795  assert_deps_valid(MBC_EDITUV_FACEDOTS,
1796  {BUFFER_INDEX(ibo.edituv_fdots),
1797  BUFFER_INDEX(vbo.fdots_uv),
1798  BUFFER_INDEX(vbo.fdots_edituv_data)});
1800  DRW_ibo_request(cache->batch.edituv_fdots, &mbuflist->ibo.edituv_fdots);
1801  DRW_vbo_request(cache->batch.edituv_fdots, &mbuflist->vbo.fdots_uv);
1803  }
1804 
1805 #ifdef DEBUG
1806  auto assert_final_deps_valid = [&](const int buffer_index) {
1807  BLI_assert(batches_that_use_buffer(buffer_index) ==
1808  batches_that_use_buffer_local.lookup(buffer_index));
1809  };
1810  assert_final_deps_valid(BUFFER_INDEX(vbo.lnor));
1811  assert_final_deps_valid(BUFFER_INDEX(vbo.pos_nor));
1812  assert_final_deps_valid(BUFFER_INDEX(vbo.uv));
1813  assert_final_deps_valid(BUFFER_INDEX(vbo.sculpt_data));
1814  assert_final_deps_valid(BUFFER_INDEX(vbo.weights));
1815  assert_final_deps_valid(BUFFER_INDEX(vbo.edge_fac));
1816  assert_final_deps_valid(BUFFER_INDEX(vbo.mesh_analysis));
1817  assert_final_deps_valid(BUFFER_INDEX(vbo.tan));
1818  assert_final_deps_valid(BUFFER_INDEX(vbo.orco));
1819  assert_final_deps_valid(BUFFER_INDEX(vbo.edit_data));
1820  assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_pos));
1821  assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_nor));
1822  assert_final_deps_valid(BUFFER_INDEX(vbo.skin_roots));
1823  assert_final_deps_valid(BUFFER_INDEX(vbo.vert_idx));
1824  assert_final_deps_valid(BUFFER_INDEX(vbo.edge_idx));
1825  assert_final_deps_valid(BUFFER_INDEX(vbo.poly_idx));
1826  assert_final_deps_valid(BUFFER_INDEX(vbo.fdot_idx));
1827  assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_data));
1828  assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_area));
1829  assert_final_deps_valid(BUFFER_INDEX(vbo.edituv_stretch_angle));
1830  assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_uv));
1831  assert_final_deps_valid(BUFFER_INDEX(vbo.fdots_edituv_data));
1832  for (const int i : IndexRange(GPU_MAX_ATTR)) {
1833  assert_final_deps_valid(BUFFER_INDEX(vbo.attr[i]));
1834  }
1835 
1836  assert_final_deps_valid(BUFFER_INDEX(ibo.tris));
1837  assert_final_deps_valid(BUFFER_INDEX(ibo.lines));
1838  assert_final_deps_valid(BUFFER_INDEX(ibo.lines_loose));
1839  assert_final_deps_valid(BUFFER_INDEX(ibo.lines_adjacency));
1840  assert_final_deps_valid(BUFFER_INDEX(ibo.lines_paint_mask));
1841  assert_final_deps_valid(BUFFER_INDEX(ibo.points));
1842  assert_final_deps_valid(BUFFER_INDEX(ibo.fdots));
1843  assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_tris));
1844  assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_lines));
1845  assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_points));
1846  assert_final_deps_valid(BUFFER_INDEX(ibo.edituv_fdots));
1847 
1848  assert_final_deps_valid(TRIS_PER_MAT_INDEX);
1849 #endif
1850 
1851  if (do_uvcage) {
1853  cache,
1854  &cache->uv_cage,
1855  ob,
1856  me,
1857  is_editmode,
1858  is_paint_mode,
1859  is_mode_active,
1860  ob->obmat,
1861  false,
1862  true,
1863  scene,
1864  ts,
1865  true);
1866  }
1867 
1868  if (do_cage) {
1870  cache,
1871  &cache->cage,
1872  ob,
1873  me,
1874  is_editmode,
1875  is_paint_mode,
1876  is_mode_active,
1877  ob->obmat,
1878  false,
1879  false,
1880  scene,
1881  ts,
1882  true);
1883  }
1884 
1885  if (do_subdivision) {
1887  me,
1888  cache,
1889  &cache->final,
1890  is_editmode,
1891  is_paint_mode,
1892  is_mode_active,
1893  ob->obmat,
1894  true,
1895  false,
1896  do_cage,
1897  ts,
1898  use_hide);
1899  }
1900  else {
1901  /* The subsurf modifier may have been recently removed, or another modifier was added after it,
1902  * so free any potential subdivision cache as it is not needed anymore. */
1904  }
1905 
1907  cache,
1908  &cache->final,
1909  ob,
1910  me,
1911  is_editmode,
1912  is_paint_mode,
1913  is_mode_active,
1914  ob->obmat,
1915  true,
1916  false,
1917  scene,
1918  ts,
1919  use_hide);
1920 
1921  /* Ensure that all requested batches have finished.
1922  * Ideally we want to remove this sync, but there are cases where this doesn't work.
1923  * See T79038 for example.
1924  *
1925  * An idea to improve this is to separate the Object mode from the edit mode draw caches. And
1926  * based on the mode the correct one will be updated. Other option is to look into using
1927  * drw_batch_cache_generate_requested_delayed. */
1928  BLI_task_graph_work_and_wait(task_graph);
1929 #ifdef DEBUG
1930  drw_mesh_batch_cache_check_available(task_graph, me);
1931 #endif
1932 }
1933 
Generic geometry attributes built on CustomData.
@ ATTR_DOMAIN_POINT
Definition: BKE_attribute.h:27
@ ATTR_DOMAIN_FACE
Definition: BKE_attribute.h:29
@ ATTR_DOMAIN_CORNER
Definition: BKE_attribute.h:30
@ ATTR_DOMAIN_EDGE
Definition: BKE_attribute.h:28
struct CustomDataLayer * BKE_id_attributes_active_color_get(const struct ID *id)
struct CustomDataLayer * BKE_id_attributes_render_color_get(const struct ID *id)
void BKE_id_attribute_copy_domains_temp(short id_type, const struct CustomData *vdata, const struct CustomData *edata, const struct CustomData *ldata, const struct CustomData *pdata, const struct CustomData *cdata, struct ID *r_id)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_active_layer(const struct CustomData *data, int type)
int CustomData_get_stencil_layer(const struct CustomData *data, int type)
void * CustomData_get_layer(const struct CustomData *data, int type)
int CustomData_get_named_layer(const struct CustomData *data, int type, const char *name)
int CustomData_get_render_layer(const struct CustomData *data, int type)
support for deformation groups and hooks.
eMeshBatchDirtyMode
Definition: BKE_mesh_types.h:9
@ BKE_MESH_BATCH_DIRTY_UVEDIT_ALL
@ BKE_MESH_BATCH_DIRTY_SELECT_PAINT
@ BKE_MESH_BATCH_DIRTY_SHADING
@ BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT
@ BKE_MESH_BATCH_DIRTY_ALL
@ BKE_MESH_BATCH_DIRTY_SELECT
struct Mesh * BKE_object_get_editmesh_eval_final(const struct Object *object)
struct Mesh * BKE_object_get_editmesh_eval_cage(const struct Object *object)
Functions for dealing with objects and deform verts, used by painting and tools.
bool BKE_object_defgroup_check_lock_relative(const bool *lock_flags, const bool *validmap, int index)
void BKE_object_defgroup_split_locked_validmap(int defbase_tot, const bool *locked, const bool *deform, bool *r_locked, bool *r_unlocked)
bool * BKE_object_defgroup_validmap_get(struct Object *ob, int defbase_tot)
void BKE_object_defgroup_mirror_selection(struct Object *ob, int defbase_tot, const bool *selection, bool *dg_flags_sel, int *r_dg_flags_sel_tot)
bool BKE_object_defgroup_check_lock_relative_multi(int defbase_tot, const bool *lock_flags, const bool *selected, int sel_tot)
bool * BKE_object_defgroup_lock_flags_get(struct Object *ob, int defbase_tot)
bool * BKE_object_defgroup_selected_get(struct Object *ob, int defbase_tot, int *r_dg_flags_sel_tot)
A BVH for high poly meshes.
bool BKE_pbvh_is_drawing(const PBVH *pbvh)
Definition: pbvh.c:3245
void BKE_pbvh_update_normals(PBVH *pbvh, struct SubdivCCG *subdiv_ccg)
Definition: pbvh.c:2813
bool BKE_pbvh_draw_cache_invalid(const PBVH *pbvh)
Definition: pbvh.c:3250
bool BKE_subsurf_modifier_has_gpu_subdiv(const struct Mesh *mesh)
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:336
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
unsigned int uint
Definition: BLI_sys_types.h:67
void BLI_task_graph_work_and_wait(struct TaskGraph *task_graph)
Definition: task_graph.cc:108
pthread_mutex_t ThreadMutex
Definition: BLI_threads.h:82
#define UNUSED(x)
@ ID_ME
Definition: DNA_ID_enums.h:48
eCustomDataType
@ CD_PROP_BYTE_COLOR
@ CD_PROP_FLOAT
@ CD_MTFACE
@ CD_PROP_FLOAT3
@ CD_PROP_COLOR
@ CD_PROP_INT8
@ CD_PROP_INT32
@ CD_PROP_FLOAT2
@ CD_PROP_BOOL
@ CD_MLOOPUV
@ CD_AUTO_FROM_NAME
@ CD_TANGENT
@ ME_WRAPPER_TYPE_BMESH
#define ME_USING_MIRROR_X_VERTEX_GROUPS(_me)
@ OB_MODE_EDIT
Object is a sort of wrapper for general info.
@ OB_MESH
#define UV_SYNC_SELECTION
GPUBatch
Definition: GPU_batch.h:78
#define GPU_BATCH_CLEAR_SAFE(batch)
Definition: GPU_batch.h:224
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:216
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
ListBase GPU_material_attributes(GPUMaterial *material)
Definition: gpu_material.c:216
GPUPrimType
Definition: GPU_primitive.h:18
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:20
@ GPU_PRIM_POINTS
Definition: GPU_primitive.h:19
@ GPU_PRIM_LINES_ADJ
Definition: GPU_primitive.h:29
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:21
#define GPU_MAX_ATTR
Definition: GPU_shader.h:388
struct GPUVertBuf GPUVertBuf
#define GPU_VERTBUF_DISCARD_SAFE(verts)
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Map
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
unsigned int U
Definition: btGjkEpa3.h:78
const Value & lookup(const Key &key) const
Definition: BLI_map.hh:485
auto add_or_modify(const Key &key, const CreateValueF &create_value, const ModifyValueF &modify_value) -> decltype(create_value(nullptr))
Definition: BLI_map.hh:436
constexpr const char * c_str() const
Scene scene
void drw_attributes_merge(DRW_Attributes *dst, const DRW_Attributes *src, ThreadMutex *render_mutex)
bool drw_custom_data_match_attribute(const CustomData *custom_data, const char *name, int *r_layer_index, eCustomDataType *r_type)
DRW_AttributeRequest * drw_attributes_add_request(DRW_Attributes *attrs, const char *name, const eCustomDataType type, const int layer_index, const eAttrDomain domain)
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b)
void drw_attributes_clear(DRW_Attributes *attributes)
#define MBC_VBO_LEN
#define FOREACH_MESH_BUFFER_CACHE(batch_cache, mbc)
#define MBC_IBO_LEN
#define MBC_BATCH_LEN
#define MBC_EDITUV
BLI_INLINE int mesh_render_mat_len_get(const Object *object, const Mesh *me)
@ MBC_EDIT_VNOR
@ MBC_EDITUV_FACES_STRETCH_AREA
@ MBC_EDIT_EDGES
@ MBC_LOOSE_EDGES
@ MBC_EDITUV_EDGES
@ MBC_EDITUV_FACEDOTS
@ MBC_SCULPT_OVERLAYS
@ MBC_EDIT_SELECTION_EDGES
@ MBC_EDGE_DETECTION
@ MBC_WIRE_LOOPS_UVS
@ MBC_EDIT_LNOR
@ MBC_ALL_EDGES
@ MBC_EDITUV_FACES
@ MBC_EDITUV_FACES_STRETCH_ANGLE
@ MBC_EDIT_VERTICES
@ MBC_EDIT_MESH_ANALYSIS
@ MBC_EDIT_SELECTION_FACEDOTS
@ MBC_EDIT_FACEDOTS
@ MBC_SURFACE_WEIGHTS
@ MBC_SURFACE_PER_MAT
@ MBC_WIRE_LOOPS
@ MBC_EDITUV_VERTS
@ MBC_SKIN_ROOTS
@ MBC_SURFACE
@ MBC_ALL_VERTS
@ MBC_EDIT_SELECTION_VERTS
@ MBC_EDIT_TRIANGLES
@ MBC_WIRE_EDGES
@ MBC_EDIT_SELECTION_FACES
@ DRW_MESH_WEIGHT_STATE_MULTIPAINT
@ DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE
@ DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE
GPUVertBuf * DRW_mesh_batch_cache_pos_vertbuf_get(Mesh *me)
static void mesh_batch_cache_request_surface_batches(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_edit_facedots(Mesh *me)
int DRW_mesh_material_count_get(const Object *object, const Mesh *me)
BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache *cache, DRWBatchFlag new_flag)
static void mesh_batch_cache_discard_batch(MeshBatchCache *cache, const DRWBatchFlag batch_map)
GPUBatch * DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Object *object, Mesh *me, float **tot_area, float **tot_uv_area)
GPUBatch * DRW_mesh_batch_cache_get_surface_sculpt(Object *object, Mesh *me)
static void request_active_and_default_color_attributes(const Object &object, const Mesh &mesh, DRW_Attributes &attributes)
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_edit_vnors(Mesh *me)
void DRW_mesh_batch_cache_validate(Object *object, Mesh *me)
static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
GPUBatch ** DRW_mesh_batch_cache_get_surface_texpaint(Object *object, Mesh *me)
void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
GPUBatch * DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh *me)
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
void DRW_mesh_batch_cache_free(Mesh *me)
#define BATCH_MAP(...)
GPUBatch * DRW_mesh_batch_cache_get_sculpt_overlays(Mesh *me)
static bool mesh_batch_cache_valid(Object *object, Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edituv_facedots(Object *object, Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Object *object, Mesh *me)
static MeshBatchCache * mesh_batch_cache_get(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_surface_texpaint_single(Object *object, Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me)
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
static void mesh_batch_cache_init(Object *object, Mesh *me)
static void drw_add_attributes_vbo(GPUBatch *batch, MeshBufferList *mbuflist, DRW_Attributes *attr_used)
static void mesh_buffer_list_clear(MeshBufferList *mbuflist)
GPUBatch * DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edit_vertices(Mesh *me)
static void mesh_batch_cache_clear(Mesh *me)
#define BUFFER_INDEX(buff_name)
GPUBatch * DRW_mesh_batch_cache_get_surface_weights(Mesh *me)
static void texpaint_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
GPUBatch ** DRW_mesh_batch_cache_get_surface_shaded(Object *object, Mesh *me, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
static bool drw_mesh_weight_state_compare(const struct DRW_MeshWeightState *a, const struct DRW_MeshWeightState *b)
static void drw_mesh_weight_state_clear(struct DRW_MeshWeightState *wstate)
GPUBatch * DRW_mesh_batch_cache_get_edituv_edges(Object *object, Mesh *me)
static void edituv_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_surface(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edges_with_select_id(Mesh *me)
#define TRIS_PER_MAT_INDEX
static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_edit_edges(Mesh *me)
static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
GPUBatch * DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edge_detection(Mesh *me, bool *r_is_manifold)
GPUBatch * DRW_mesh_batch_cache_get_uv_edges(Object *object, Mesh *me)
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object, const Mesh *me, struct GPUMaterial **gpumat_array, int gpumat_array_len, DRW_Attributes *attributes)
GPUBatch * DRW_mesh_batch_cache_get_all_edges(Mesh *me)
static void mesh_cd_calc_active_mask_uv_layer(const Object *object, const Mesh *me, DRW_MeshCDMask *cd_used)
static void mesh_batch_cache_free_subdiv_cache(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_edituv_verts(Object *object, Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edit_skin_roots(Mesh *me)
static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache, const struct DRW_MeshWeightState *wstate)
static void mesh_buffer_cache_clear(MeshBufferCache *mbc)
GPUBatch * DRW_mesh_batch_cache_get_surface_edges(Object *object, Mesh *me)
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
GPUBatch * DRW_mesh_batch_cache_get_edit_triangles(Mesh *me)
static void drw_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst, const struct DRW_MeshWeightState *wstate_src)
void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
GPUBatch * DRW_mesh_batch_cache_get_edit_lnors(Mesh *me)
void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph, Object *ob, Mesh *me, const Scene *scene, const bool is_paint_mode, const bool use_hide)
static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
GPUBatch * DRW_mesh_batch_cache_get_surface_vertpaint(Object *object, Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_all_verts(Mesh *me)
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
static void drw_mesh_weight_state_extract(Object *ob, Mesh *me, const ToolSettings *ts, bool paint_mode, struct DRW_MeshWeightState *wstate)
static void mesh_cd_calc_active_uv_layer(const Object *object, const Mesh *me, DRW_MeshCDMask *cd_used)
GPUBatch * DRW_mesh_batch_cache_get_wireframes_face(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edituv_faces(Object *object, Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh *me)
void DRW_create_subdivision(Object *ob, Mesh *mesh, MeshBatchCache *batch_cache, MeshBufferCache *mbc, const bool is_editmode, const bool is_paint_mode, const bool is_mode_active, const float obmat[4][4], const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
void draw_subdiv_cache_free(DRWSubdivCache *cache)
BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
BLI_INLINE bool DRW_vbo_requested(GPUVertBuf *vbo)
BLI_INLINE void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
BLI_INLINE bool DRW_ibo_requested(GPUIndexBuf *ibo)
BLI_INLINE bool DRW_batch_requested(GPUBatch *batch, GPUPrimType prim_type)
BLI_INLINE GPUBatch * DRW_batch_request(GPUBatch **batch)
bool DRW_object_is_in_edit_mode(const Object *ob)
Definition: draw_manager.c:196
Extraction of Mesh data into VBO to feed to GPU.
BLI_INLINE const CustomData * mesh_cd_pdata_get_from_mesh(const Mesh *me)
BLI_INLINE const CustomData * mesh_cd_vdata_get_from_mesh(const Mesh *me)
BLI_INLINE const CustomData * mesh_cd_edata_get_from_mesh(const Mesh *me)
BLI_INLINE const Mesh * editmesh_final_or_this(const Object *object, const Mesh *me)
Definition: extract_mesh.hh:99
BLI_INLINE const CustomData * mesh_cd_ldata_get_from_mesh(const Mesh *me)
struct @653::@655 batch
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_dupallocN)(const void *vmemh)
Definition: mallocn.c:28
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
static unsigned a[3]
Definition: RandGen.cpp:78
bool active
all scheduled work for the GPU.
void mesh_buffer_cache_create_requested(TaskGraph *task_graph, MeshBatchCache *cache, MeshBufferCache *mbc, Object *object, Mesh *me, bool is_editmode, bool is_paint_mode, bool is_mode_active, const float obmat[4][4], bool do_final, bool do_uvedit, const Scene *scene, const ToolSettings *ts, bool use_hide)
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
unsigned int uint32_t
Definition: stdint.h:80
DRW_MeshCDMask cd_used
DRW_MeshWeightState weight_state
DRW_Attributes attr_used
DRW_MeshCDMask cd_needed
DRW_MeshCDMask cd_used_over_time
GPUBatch ** surface_per_mat
MeshBufferCache uv_cage
MeshBufferCache cage
DRWBatchFlag batch_requested
DRWSubdivCache * subdiv_cache
DRW_Attributes attr_needed
MeshBatchList batch
GPUIndexBuf ** tris_per_mat
DRWBatchFlag batch_ready
MeshBufferCache final
DRW_Attributes attr_used_over_time
GPUBatch * surface_weights
GPUBatch * edituv_faces_stretch_angle
GPUBatch * edituv_edges
GPUBatch * edit_selection_faces
GPUBatch * loose_edges
GPUBatch * edituv_faces
GPUBatch * edituv_faces_stretch_area
GPUBatch * edituv_verts
GPUBatch * edge_detection
GPUBatch * edit_mesh_analysis
GPUBatch * edit_selection_fdots
GPUBatch * edituv_fdots
GPUBatch * edit_skin_roots
GPUBatch * edit_selection_edges
GPUBatch * wire_loops_uvs
GPUBatch * edit_vertices
GPUBatch * sculpt_overlays
GPUBatch * edit_selection_verts
GPUBatch * edit_triangles
MeshExtractLooseGeom loose_geom
struct MeshBufferCache::@274 poly_sorted
MeshBufferList buff
GPUIndexBuf * lines_paint_mask
GPUVertBuf * edituv_stretch_area
GPUVertBuf * fdots_uv
GPUVertBuf * skin_roots
GPUIndexBuf * lines_loose
GPUVertBuf * attr[GPU_MAX_ATTR]
GPUIndexBuf * lines
GPUVertBuf * edge_idx
GPUVertBuf * pos_nor
GPUIndexBuf * tris
GPUVertBuf * fdots_edituv_data
GPUIndexBuf * edituv_points
GPUVertBuf * poly_idx
GPUVertBuf * edge_fac
GPUIndexBuf * edituv_fdots
GPUIndexBuf * edituv_tris
GPUIndexBuf * edituv_lines
GPUVertBuf * edit_data
GPUVertBuf * fdot_idx
GPUIndexBuf * fdots
GPUVertBuf * fdots_nor
struct MeshBufferList::@272 vbo
GPUVertBuf * weights
struct MeshBufferList::@273 ibo
GPUVertBuf * fdots_pos
GPUVertBuf * edituv_data
GPUVertBuf * edituv_stretch_angle
GPUIndexBuf * lines_adjacency
GPUVertBuf * vert_idx
GPUVertBuf * mesh_analysis
GPUIndexBuf * points
GPUVertBuf * sculpt_data
void * batch_cache
struct SubdivCCG * subdiv_ccg
void * render_mutex
struct BMEditMesh * edit_mesh
CustomData vdata
ListBase vertex_group_names
Mesh_Runtime runtime
int vertex_group_active_index
float obmat[4][4]
struct SculptSession * sculpt
void * data
struct ToolSettings * toolsettings
struct PBVH * pbvh
Definition: BKE_paint.h:550