Blender  V3.3
gpu_buffers.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2005 Blender Foundation. All rights reserved. */
3 
10 #include <limits.h>
11 #include <stddef.h>
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include "MEM_guardedalloc.h"
16 
17 #include "BLI_bitmap.h"
18 #include "BLI_ghash.h"
19 #include "BLI_math_color.h"
20 #include "BLI_utildefines.h"
21 
22 #include "DNA_mesh_types.h"
23 #include "DNA_meshdata_types.h"
24 
25 #include "BKE_DerivedMesh.h"
26 #include "BKE_attribute.h"
27 #include "BKE_ccg.h"
28 #include "BKE_customdata.h"
29 #include "BKE_mesh.h"
30 #include "BKE_paint.h"
31 #include "BKE_pbvh.h"
32 #include "BKE_subdiv_ccg.h"
33 
34 #include "GPU_batch.h"
35 #include "GPU_buffers.h"
36 
37 #include "DRW_engine.h"
38 
39 #include "gpu_private.h"
40 
41 #include "bmesh.h"
42 
47 
52 
53  /* mesh pointers in case buffer allocation fails */
54  const MPoly *mpoly;
55  const MLoop *mloop;
56  const MLoopTri *looptri;
57  const MVert *mvert;
58 
59  const int *face_indices;
61 
62  /* grid pointers */
67  const int *grid_indices;
68  int totgrid;
69 
70  bool use_bmesh;
72 
74 
76 
77  /* The PBVH ensures that either all faces in the node are
78  * smooth-shaded or all faces are flat-shaded */
79  bool smooth;
80 
82 };
83 
84 typedef struct GPUAttrRef {
87  int layer_idx;
89 
90 #define MAX_GPU_ATTR 256
91 
92 typedef struct PBVHGPUFormat {
97  int totcol, totuv;
98 
99  /* Upload only the active color and UV attributes,
100  * used for workbench mode. */
103 
105 {
106  PBVHGPUFormat *vbo_id = MEM_callocN(sizeof(PBVHGPUFormat), "PBVHGPUFormat");
107 
109 
110  return vbo_id;
111 }
112 
114 {
115  MEM_SAFE_FREE(vbo_id);
116 }
117 
118 static int gpu_pbvh_make_attr_offs(eAttrDomainMask domain_mask,
119  eCustomDataMask type_mask,
120  const CustomData *vdata,
121  const CustomData *edata,
122  const CustomData *ldata,
123  const CustomData *pdata,
124  GPUAttrRef r_cd_attrs[MAX_GPU_ATTR],
125  bool active_only,
126  int active_type,
127  int active_domain,
128  const CustomDataLayer *active_layer,
129  const CustomDataLayer *render_layer);
130 
133 /* -------------------------------------------------------------------- */
138 {
139 }
140 
142 {
143  /* Nothing to do. */
144 }
145 
147 {
148  int idx = CustomData_get_active_layer_index(cdata, type);
149  return idx != -1 ? cdata->layers + idx : NULL;
150 }
151 
153 {
154  int idx = CustomData_get_render_layer_index(cdata, type);
155  return idx != -1 ? cdata->layers + idx : NULL;
156 }
157 
158 /* Allocates a non-initialized buffer to be sent to GPU.
159  * Return is false it indicates that the memory map failed. */
161  GPU_PBVH_Buffers *buffers,
162  uint vert_len)
163 {
164  /* Keep so we can test #GPU_USAGE_DYNAMIC buffer use.
165  * Not that format initialization match in both blocks.
166  * Do this to keep braces balanced - otherwise indentation breaks. */
167 
168  if (buffers->vert_buf == NULL) {
169  /* Initialize vertex buffer (match 'VertexBufferFormat'). */
171  }
172  if (GPU_vertbuf_get_data(buffers->vert_buf) == NULL ||
173  GPU_vertbuf_get_vertex_len(buffers->vert_buf) != vert_len) {
174  /* Allocate buffer if not allocated yet or size changed. */
175  GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
176  }
177 
178  return GPU_vertbuf_get_data(buffers->vert_buf) != NULL;
179 }
180 
182 {
183  if (buffers->triangles == NULL) {
184  buffers->triangles = GPU_batch_create(prim,
185  buffers->vert_buf,
186  /* can be NULL if buffer is empty */
187  buffers->index_buf);
188  }
189 
190  if ((buffers->triangles_fast == NULL) && buffers->index_buf_fast) {
191  buffers->triangles_fast = GPU_batch_create(prim, buffers->vert_buf, buffers->index_buf_fast);
192  }
193 
194  if (buffers->lines == NULL) {
196  buffers->vert_buf,
197  /* can be NULL if buffer is empty */
198  buffers->index_lines_buf);
199  }
200 
201  if ((buffers->lines_fast == NULL) && buffers->index_lines_buf_fast) {
202  buffers->lines_fast = GPU_batch_create(
203  GPU_PRIM_LINES, buffers->vert_buf, buffers->index_lines_buf_fast);
204  }
205 }
206 
209 /* -------------------------------------------------------------------- */
213 static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt,
214  const MVert *mvert,
215  const MLoop *mloop,
216  const int *sculpt_face_sets)
217 {
218  return (!paint_is_face_hidden(lt, mvert, mloop) && sculpt_face_sets &&
219  sculpt_face_sets[lt->poly] > SCULPT_FACE_SET_NONE);
220 }
221 
223  GPU_PBVH_Buffers *buffers,
224  const MVert *mvert,
225  const CustomData *vdata,
226  const CustomData *ldata,
227  const float *vmask,
228  const int *sculpt_face_sets,
229  int face_sets_color_seed,
230  int face_sets_color_default,
231  int update_flags,
232  const float (*vert_normals)[3])
233 {
234  GPUAttrRef vcol_refs[MAX_GPU_ATTR];
235  GPUAttrRef cd_uvs[MAX_GPU_ATTR];
236 
237  Mesh me_query;
238  BKE_id_attribute_copy_domains_temp(ID_ME, vdata, NULL, ldata, NULL, NULL, &me_query.id);
239 
241  eAttrDomain actcol_domain = actcol ? BKE_id_attribute_domain(&me_query.id, actcol) :
243 
244  CustomDataLayer *rendercol = BKE_id_attributes_render_color_get(&me_query.id);
245 
246  int totcol;
247 
248  if (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) {
251  vdata,
252  NULL,
253  ldata,
254  NULL,
255  vcol_refs,
256  vbo_id->active_attrs_only,
257  actcol ? actcol->type : 0,
258  actcol_domain,
259  actcol,
260  rendercol);
261  }
262  else {
263  totcol = 0;
264  }
265 
268  NULL,
269  NULL,
270  ldata,
271  NULL,
272  cd_uvs,
273  vbo_id->active_attrs_only,
274  CD_MLOOPUV,
277  get_render_layer(ldata, CD_MLOOPUV));
278 
279  const bool show_mask = vmask && (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
280  const bool show_face_sets = sculpt_face_sets &&
281  (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
282  bool empty_mask = true;
283  bool default_face_set = true;
284 
285  {
286  const int totelem = buffers->tot_tri * 3;
287 
288  /* Build VBO */
289  if (gpu_pbvh_vert_buf_data_set(vbo_id, buffers, totelem)) {
290  GPUVertBufRaw pos_step = {0};
291  GPUVertBufRaw nor_step = {0};
292  GPUVertBufRaw msk_step = {0};
293  GPUVertBufRaw fset_step = {0};
294  GPUVertBufRaw col_step = {0};
295  GPUVertBufRaw uv_step = {0};
296 
297  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->pos, &pos_step);
298  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->nor, &nor_step);
299  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->msk, &msk_step);
300  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->fset, &fset_step);
301 
302  /* calculate normal for each polygon only once */
303  uint mpoly_prev = UINT_MAX;
304  short no[3] = {0, 0, 0};
305 
306  if (totuv > 0) {
307  for (int uv_i = 0; uv_i < totuv; uv_i++) {
308  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->uv[uv_i], &uv_step);
309 
310  GPUAttrRef *ref = cd_uvs + uv_i;
311  CustomDataLayer *layer = ldata->layers + ref->layer_idx;
312  MLoopUV *muv = layer->data;
313 
314  for (uint i = 0; i < buffers->face_indices_len; i++) {
315  const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
316 
317  if (!gpu_pbvh_is_looptri_visible(lt, mvert, buffers->mloop, sculpt_face_sets)) {
318  continue;
319  }
320 
321  for (uint j = 0; j < 3; j++) {
322  MLoopUV *muv2 = muv + lt->tri[j];
323 
324  memcpy(GPU_vertbuf_raw_step(&uv_step), muv2->uv, sizeof(muv2->uv));
325  }
326  }
327  }
328  }
329 
330  for (int col_i = 0; col_i < totcol; col_i++) {
331  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->col[col_i], &col_step);
332 
333  MPropCol *pcol = NULL;
334  MLoopCol *mcol = NULL;
335 
336  GPUAttrRef *ref = vcol_refs + col_i;
337  const CustomData *cdata = ref->domain == ATTR_DOMAIN_POINT ? vdata : ldata;
338  CustomDataLayer *layer = cdata->layers + ref->layer_idx;
339 
340  bool color_loops = ref->domain == ATTR_DOMAIN_CORNER;
341 
342  if (layer->type == CD_PROP_COLOR) {
343  pcol = (MPropCol *)layer->data;
344  }
345  else {
346  mcol = (MLoopCol *)layer->data;
347  }
348 
349  for (uint i = 0; i < buffers->face_indices_len; i++) {
350  const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
351  const uint vtri[3] = {
352  buffers->mloop[lt->tri[0]].v,
353  buffers->mloop[lt->tri[1]].v,
354  buffers->mloop[lt->tri[2]].v,
355  };
356 
357  if (!gpu_pbvh_is_looptri_visible(lt, mvert, buffers->mloop, sculpt_face_sets)) {
358  continue;
359  }
360 
361  for (uint j = 0; j < 3; j++) {
362  /* Vertex Colors. */
363  const uint loop_index = lt->tri[j];
364 
365  ushort scol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
366 
367  if (pcol) {
368  MPropCol *pcol2 = pcol + (color_loops ? loop_index : vtri[j]);
369 
370  scol[0] = unit_float_to_ushort_clamp(pcol2->color[0]);
371  scol[1] = unit_float_to_ushort_clamp(pcol2->color[1]);
372  scol[2] = unit_float_to_ushort_clamp(pcol2->color[2]);
373  scol[3] = unit_float_to_ushort_clamp(pcol2->color[3]);
374  }
375  else {
376  const MLoopCol *mcol2 = mcol + (color_loops ? loop_index : vtri[j]);
377 
381  scol[3] = unit_float_to_ushort_clamp(mcol2->a * (1.0f / 255.0f));
382  }
383 
384  memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
385  }
386  }
387  }
388 
389  for (uint i = 0; i < buffers->face_indices_len; i++) {
390  const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
391  const uint vtri[3] = {
392  buffers->mloop[lt->tri[0]].v,
393  buffers->mloop[lt->tri[1]].v,
394  buffers->mloop[lt->tri[2]].v,
395  };
396 
397  if (!gpu_pbvh_is_looptri_visible(lt, mvert, buffers->mloop, sculpt_face_sets)) {
398  continue;
399  }
400 
401  /* Face normal and mask */
402  if (lt->poly != mpoly_prev && !buffers->smooth) {
403  const MPoly *mp = &buffers->mpoly[lt->poly];
404  float fno[3];
405  BKE_mesh_calc_poly_normal(mp, &buffers->mloop[mp->loopstart], mvert, fno);
406  normal_float_to_short_v3(no, fno);
407  mpoly_prev = lt->poly;
408  }
409 
410  uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
411  if (show_face_sets) {
412  const int fset = abs(sculpt_face_sets[lt->poly]);
413  /* Skip for the default color Face Set to render it white. */
414  if (fset != face_sets_color_default) {
415  BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
416  default_face_set = false;
417  }
418  }
419 
420  float fmask = 0.0f;
421  uchar cmask = 0;
422  if (show_mask && !buffers->smooth) {
423  fmask = (vmask[vtri[0]] + vmask[vtri[1]] + vmask[vtri[2]]) / 3.0f;
424  cmask = (uchar)(fmask * 255);
425  }
426 
427  for (uint j = 0; j < 3; j++) {
428  const MVert *v = &mvert[vtri[j]];
429  copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), v->co);
430 
431  if (buffers->smooth) {
432  normal_float_to_short_v3(no, vert_normals[vtri[j]]);
433  }
434  copy_v3_v3_short(GPU_vertbuf_raw_step(&nor_step), no);
435 
436  if (show_mask && buffers->smooth) {
437  cmask = (uchar)(vmask[vtri[j]] * 255);
438  }
439 
440  *(uchar *)GPU_vertbuf_raw_step(&msk_step) = cmask;
441  empty_mask = empty_mask && (cmask == 0);
442  /* Face Sets. */
443  memcpy(GPU_vertbuf_raw_step(&fset_step), face_set_color, sizeof(uchar[3]));
444  }
445  }
446  }
447 
449  }
450 
451  /* Get material index from the first face of this buffer. */
452  const MLoopTri *lt = &buffers->looptri[buffers->face_indices[0]];
453  const MPoly *mp = &buffers->mpoly[lt->poly];
454  buffers->material_index = mp->mat_nr;
455 
456  buffers->show_overlay = !empty_mask || !default_face_set;
457  buffers->mvert = mvert;
458 }
459 
461  const MLoop *mloop,
462  const MLoopTri *looptri,
463  const MVert *mvert,
464  const int *face_indices,
465  const int *sculpt_face_sets,
466  const int face_indices_len,
467  const struct Mesh *mesh)
468 {
469  GPU_PBVH_Buffers *buffers;
470  int i, tottri;
471  int tot_real_edges = 0;
472 
473  buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
474 
475  /* smooth or flat for all */
476  buffers->smooth = mpoly[looptri[face_indices[0]].poly].flag & ME_SMOOTH;
477 
478  buffers->show_overlay = false;
479 
480  /* Count the number of visible triangles */
481  for (i = 0, tottri = 0; i < face_indices_len; i++) {
482  const MLoopTri *lt = &looptri[face_indices[i]];
483  if (gpu_pbvh_is_looptri_visible(lt, mvert, mloop, sculpt_face_sets)) {
484  int r_edges[3];
486  for (int j = 0; j < 3; j++) {
487  if (r_edges[j] != -1) {
488  tot_real_edges++;
489  }
490  }
491  tottri++;
492  }
493  }
494 
495  if (tottri == 0) {
496  buffers->tot_tri = 0;
497 
498  buffers->mpoly = mpoly;
499  buffers->mloop = mloop;
500  buffers->looptri = looptri;
501  buffers->face_indices = face_indices;
502  buffers->face_indices_len = 0;
503 
504  return buffers;
505  }
506 
507  /* Fill the only the line buffer. */
508  GPUIndexBufBuilder elb_lines;
509  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tot_real_edges, INT_MAX);
510  int vert_idx = 0;
511 
512  for (i = 0; i < face_indices_len; i++) {
513  const MLoopTri *lt = &looptri[face_indices[i]];
514 
515  /* Skip hidden faces */
516  if (!gpu_pbvh_is_looptri_visible(lt, mvert, mloop, sculpt_face_sets)) {
517  continue;
518  }
519 
520  int r_edges[3];
522  if (r_edges[0] != -1) {
523  GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 0, vert_idx * 3 + 1);
524  }
525  if (r_edges[1] != -1) {
526  GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 1, vert_idx * 3 + 2);
527  }
528  if (r_edges[2] != -1) {
529  GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 2, vert_idx * 3 + 0);
530  }
531 
532  vert_idx++;
533  }
534  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
535 
536  buffers->tot_tri = tottri;
537 
538  buffers->mpoly = mpoly;
539  buffers->mloop = mloop;
540  buffers->looptri = looptri;
541 
542  buffers->face_indices = face_indices;
543  buffers->face_indices_len = face_indices_len;
544 
545  return buffers;
546 }
547 
550 /* -------------------------------------------------------------------- */
555  SubdivCCG *UNUSED(subdiv_ccg),
556  const int *UNUSED(face_sets),
557  const int *grid_indices,
558  uint visible_quad_len,
559  int totgrid,
560  int gridsize)
561 {
562  GPUIndexBufBuilder elb, elb_lines;
563  GPUIndexBufBuilder elb_fast, elb_lines_fast;
564 
565  GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
566  GPU_indexbuf_init(&elb_fast, GPU_PRIM_TRIS, 2 * totgrid, INT_MAX);
567  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
568  GPU_indexbuf_init(&elb_lines_fast, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
569 
570  if (buffers->smooth) {
571  uint offset = 0;
572  const uint grid_vert_len = gridsize * gridsize;
573  for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
574  uint v0, v1, v2, v3;
575  bool grid_visible = false;
576 
577  BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
578 
579  for (int j = 0; j < gridsize - 1; j++) {
580  for (int k = 0; k < gridsize - 1; k++) {
581  /* Skip hidden grid face */
582  if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
583  continue;
584  }
585  /* Indices in a Clockwise QUAD disposition. */
586  v0 = offset + j * gridsize + k;
587  v1 = v0 + 1;
588  v2 = v1 + gridsize;
589  v3 = v2 - 1;
590 
591  GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
592  GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
593 
594  GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
595  GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
596 
597  if (j + 2 == gridsize) {
598  GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
599  }
600  grid_visible = true;
601  }
602 
603  if (grid_visible) {
604  GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
605  }
606  }
607 
608  if (grid_visible) {
609  /* Grid corners */
610  v0 = offset;
611  v1 = offset + gridsize - 1;
612  v2 = offset + grid_vert_len - 1;
613  v3 = offset + grid_vert_len - gridsize;
614 
615  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
616  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
617 
618  GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
619  GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
620  GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
621  GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
622  }
623  }
624  }
625  else {
626  uint offset = 0;
627  const uint grid_vert_len = square_uint(gridsize - 1) * 4;
628  for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
629  bool grid_visible = false;
630  BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
631 
632  uint v0, v1, v2, v3;
633  for (int j = 0; j < gridsize - 1; j++) {
634  for (int k = 0; k < gridsize - 1; k++) {
635  /* Skip hidden grid face */
636  if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
637  continue;
638  }
639  /* VBO data are in a Clockwise QUAD disposition. */
640  v0 = offset + (j * (gridsize - 1) + k) * 4;
641  v1 = v0 + 1;
642  v2 = v0 + 2;
643  v3 = v0 + 3;
644 
645  GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
646  GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
647 
648  GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
649  GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
650 
651  if (j + 2 == gridsize) {
652  GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
653  }
654  grid_visible = true;
655  }
656 
657  if (grid_visible) {
658  GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
659  }
660  }
661 
662  if (grid_visible) {
663  /* Grid corners */
664  v0 = offset;
665  v1 = offset + (gridsize - 1) * 4 - 3;
666  v2 = offset + grid_vert_len - 2;
667  v3 = offset + grid_vert_len - (gridsize - 1) * 4 + 3;
668 
669  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
670  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
671 
672  GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
673  GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
674  GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
675  GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
676  }
677  }
678  }
679 
680  buffers->index_buf = GPU_indexbuf_build(&elb);
681  buffers->index_buf_fast = GPU_indexbuf_build(&elb_fast);
682  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
683  buffers->index_lines_buf_fast = GPU_indexbuf_build(&elb_lines_fast);
684 }
685 
687  const struct DMFlagMat *grid_flag_mats,
688  const int *grid_indices)
689 {
690  const bool smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
691 
692  if (buffers->smooth != smooth) {
693  buffers->smooth = smooth;
696  GPU_BATCH_DISCARD_SAFE(buffers->lines);
698 
703  }
704 }
705 
707  GPU_PBVH_Buffers *buffers,
708  SubdivCCG *subdiv_ccg,
709  CCGElem **grids,
710  const struct DMFlagMat *grid_flag_mats,
711  int *grid_indices,
712  int totgrid,
713  const int *sculpt_face_sets,
714  const int face_sets_color_seed,
715  const int face_sets_color_default,
716  const struct CCGKey *key,
717  const int update_flags)
718 {
719  const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
720  const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
721  const bool show_face_sets = sculpt_face_sets &&
722  (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
723  bool empty_mask = true;
724  bool default_face_set = true;
725 
726  int i, j, k, x, y;
727 
728  /* Build VBO */
729  const int has_mask = key->has_mask;
730 
731  uint vert_per_grid = (buffers->smooth) ? key->grid_area : (square_i(key->grid_size - 1) * 4);
732  uint vert_count = totgrid * vert_per_grid;
733 
734  if (buffers->index_buf == NULL) {
735  uint visible_quad_len = BKE_pbvh_count_grid_quads(
736  (BLI_bitmap **)buffers->grid_hidden, grid_indices, totgrid, key->grid_size);
737 
738  /* totally hidden node, return here to avoid BufferData with zero below. */
739  if (visible_quad_len == 0) {
740  return;
741  }
742 
744  subdiv_ccg,
745  sculpt_face_sets,
746  grid_indices,
747  visible_quad_len,
748  totgrid,
749  key->grid_size);
750  }
751 
752  uint vbo_index_offset = 0;
753  /* Build VBO */
754  if (gpu_pbvh_vert_buf_data_set(vbo_id, buffers, vert_count)) {
755  GPUIndexBufBuilder elb_lines;
756 
757  if (buffers->index_lines_buf == NULL) {
758  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, totgrid * key->grid_area * 2, vert_count);
759  }
760 
761  for (i = 0; i < totgrid; i++) {
762  const int grid_index = grid_indices[i];
763  CCGElem *grid = grids[grid_index];
764  int vbo_index = vbo_index_offset;
765 
766  uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
767 
768  if (show_face_sets && subdiv_ccg && sculpt_face_sets) {
769  const int face_index = BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, grid_index);
770 
771  const int fset = abs(sculpt_face_sets[face_index]);
772  /* Skip for the default color Face Set to render it white. */
773  if (fset != face_sets_color_default) {
774  BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
775  default_face_set = false;
776  }
777  }
778 
779  if (buffers->smooth) {
780  for (y = 0; y < key->grid_size; y++) {
781  for (x = 0; x < key->grid_size; x++) {
782  CCGElem *elem = CCG_grid_elem(key, grid, x, y);
784  buffers->vert_buf, vbo_id->pos, vbo_index, CCG_elem_co(key, elem));
785 
786  short no_short[3];
787  normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
788  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index, no_short);
789 
790  if (has_mask && show_mask) {
791  float fmask = *CCG_elem_mask(key, elem);
792  uchar cmask = (uchar)(fmask * 255);
793  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index, &cmask);
794  empty_mask = empty_mask && (cmask == 0);
795  }
796 
797  if (show_vcol) {
798  const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
799  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index, &vcol);
800  }
801 
802  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index, &face_set_color);
803 
804  vbo_index += 1;
805  }
806  }
807  vbo_index_offset += key->grid_area;
808  }
809  else {
810  for (j = 0; j < key->grid_size - 1; j++) {
811  for (k = 0; k < key->grid_size - 1; k++) {
812  CCGElem *elems[4] = {
813  CCG_grid_elem(key, grid, k, j),
814  CCG_grid_elem(key, grid, k + 1, j),
815  CCG_grid_elem(key, grid, k + 1, j + 1),
816  CCG_grid_elem(key, grid, k, j + 1),
817  };
818  float *co[4] = {
819  CCG_elem_co(key, elems[0]),
820  CCG_elem_co(key, elems[1]),
821  CCG_elem_co(key, elems[2]),
822  CCG_elem_co(key, elems[3]),
823  };
824 
825  float fno[3];
826  short no_short[3];
827  /* NOTE: Clockwise indices ordering, that's why we invert order here. */
828  normal_quad_v3(fno, co[3], co[2], co[1], co[0]);
829  normal_float_to_short_v3(no_short, fno);
830 
831  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 0, co[0]);
832  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 0, no_short);
833  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 1, co[1]);
834  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 1, no_short);
835  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 2, co[2]);
836  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 2, no_short);
837  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 3, co[3]);
838  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 3, no_short);
839 
840  if (has_mask && show_mask) {
841  float fmask = (*CCG_elem_mask(key, elems[0]) + *CCG_elem_mask(key, elems[1]) +
842  *CCG_elem_mask(key, elems[2]) + *CCG_elem_mask(key, elems[3])) *
843  0.25f;
844  uchar cmask = (uchar)(fmask * 255);
845  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 0, &cmask);
846  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 1, &cmask);
847  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 2, &cmask);
848  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 3, &cmask);
849  empty_mask = empty_mask && (cmask == 0);
850  }
851 
852  const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
853  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 0, &vcol);
854  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 1, &vcol);
855  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 2, &vcol);
856  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 3, &vcol);
857 
858  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 0, &face_set_color);
859  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 1, &face_set_color);
860  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 2, &face_set_color);
861  GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 3, &face_set_color);
862 
863  vbo_index += 4;
864  }
865  }
866  vbo_index_offset += square_i(key->grid_size - 1) * 4;
867  }
868  }
869 
871  }
872 
873  /* Get material index from the first face of this buffer. */
874  buffers->material_index = grid_flag_mats[grid_indices[0]].mat_nr;
875 
876  buffers->grids = grids;
877  buffers->grid_indices = grid_indices;
878  buffers->totgrid = totgrid;
879  buffers->grid_flag_mats = grid_flag_mats;
880  buffers->gridkey = *key;
881  buffers->show_overlay = !empty_mask || !default_face_set;
882 }
883 
885 {
886  GPU_PBVH_Buffers *buffers;
887 
888  buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
889  buffers->grid_hidden = grid_hidden;
890  buffers->totgrid = totgrid;
891  buffers->smooth = smooth;
892 
893  buffers->show_overlay = false;
894 
895  return buffers;
896 }
897 
898 #undef FILL_QUAD_BUFFER
899 
902 /* -------------------------------------------------------------------- */
906 /* Output a BMVert into a VertexBufferFormat array at v_index. */
908  BMVert *v,
909  GPUVertBuf *vert_buf,
910  int v_index,
911  const float fno[3],
912  const float *fmask,
913  const int cd_vert_mask_offset,
914  const bool show_mask,
915  const bool show_vcol,
916  bool *empty_mask)
917 {
918  /* Vertex should always be visible if it's used by a visible face. */
920 
921  /* Set coord, normal, and mask */
922  GPU_vertbuf_attr_set(vert_buf, vbo_id->pos, v_index, v->co);
923 
924  short no_short[3];
925  normal_float_to_short_v3(no_short, fno ? fno : v->no);
926  GPU_vertbuf_attr_set(vert_buf, vbo_id->nor, v_index, no_short);
927 
928  if (show_mask) {
929  float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
930  uchar cmask = (uchar)(effective_mask * 255);
931  GPU_vertbuf_attr_set(vert_buf, vbo_id->msk, v_index, &cmask);
932  *empty_mask = *empty_mask && (cmask == 0);
933  }
934 
935  if (show_vcol) {
936  const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
937  GPU_vertbuf_attr_set(vert_buf, vbo_id->col[0], v_index, &vcol);
938  }
939 
940  /* Add default face sets color to avoid artifacts. */
941  const uchar face_set[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
942  GPU_vertbuf_attr_set(vert_buf, vbo_id->fset, v_index, &face_set);
943 }
944 
945 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
946 static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts, GSet *bm_other_verts)
947 {
948  GSetIterator gs_iter;
949  int totvert = 0;
950 
951  GSET_ITER (gs_iter, bm_unique_verts) {
952  BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
954  totvert++;
955  }
956  }
957  GSET_ITER (gs_iter, bm_other_verts) {
958  BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
960  totvert++;
961  }
962  }
963 
964  return totvert;
965 }
966 
967 /* Return the total number of visible faces */
968 static int gpu_bmesh_face_visible_count(GSet *bm_faces)
969 {
970  GSetIterator gh_iter;
971  int totface = 0;
972 
973  GSET_ITER (gh_iter, bm_faces) {
974  BMFace *f = BLI_gsetIterator_getKey(&gh_iter);
975 
977  totface++;
978  }
979  }
980 
981  return totface;
982 }
983 
985 {
986  if (buffers->smooth) {
987  /* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
989  GPU_BATCH_DISCARD_SAFE(buffers->lines);
992  }
993  else {
994  GPU_BATCH_DISCARD_SAFE(buffers->lines);
996  }
997 }
998 
1000  GPU_PBVH_Buffers *buffers,
1001  BMesh *bm,
1002  GSet *bm_faces,
1003  GSet *bm_unique_verts,
1004  GSet *bm_other_verts,
1005  const int update_flags)
1006 {
1007  const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
1008  const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
1009  int tottri, totvert;
1010  bool empty_mask = true;
1011  BMFace *f = NULL;
1012 
1013  /* Count visible triangles */
1014  tottri = gpu_bmesh_face_visible_count(bm_faces);
1015 
1016  if (buffers->smooth) {
1017  /* Count visible vertices */
1018  totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
1019  }
1020  else {
1021  totvert = tottri * 3;
1022  }
1023 
1024  if (!tottri) {
1025  if (BLI_gset_len(bm_faces) != 0) {
1026  /* Node is just hidden. */
1027  }
1028  else {
1029  buffers->clear_bmesh_on_flush = true;
1030  }
1031  buffers->tot_tri = 0;
1032  return;
1033  }
1034 
1035  /* TODO: make mask layer optional for bmesh buffer. */
1036  const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
1037 
1038  /* Fill vertex buffer */
1039  if (!gpu_pbvh_vert_buf_data_set(vbo_id, buffers, totvert)) {
1040  /* Memory map failed */
1041  return;
1042  }
1043 
1044  int v_index = 0;
1045 
1046  if (buffers->smooth) {
1047  /* Fill the vertex and triangle buffer in one pass over faces. */
1048  GPUIndexBufBuilder elb, elb_lines;
1049  GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, totvert);
1050  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
1051 
1052  GHash *bm_vert_to_index = BLI_ghash_int_new_ex("bm_vert_to_index", totvert);
1053 
1054  GSetIterator gs_iter;
1055  GSET_ITER (gs_iter, bm_faces) {
1056  f = BLI_gsetIterator_getKey(&gs_iter);
1057 
1058  if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
1059  BMVert *v[3];
1061 
1062  uint idx[3];
1063  for (int i = 0; i < 3; i++) {
1064  void **idx_p;
1065  if (!BLI_ghash_ensure_p(bm_vert_to_index, v[i], &idx_p)) {
1066  /* Add vertex to the vertex buffer each time a new one is encountered */
1067  *idx_p = POINTER_FROM_UINT(v_index);
1068 
1070  v[i],
1071  buffers->vert_buf,
1072  v_index,
1073  NULL,
1074  NULL,
1075  cd_vert_mask_offset,
1076  show_mask,
1077  show_vcol,
1078  &empty_mask);
1079 
1080  idx[i] = v_index;
1081  v_index++;
1082  }
1083  else {
1084  /* Vertex already in the vertex buffer, just get the index. */
1085  idx[i] = POINTER_AS_UINT(*idx_p);
1086  }
1087  }
1088 
1089  GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
1090 
1091  GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
1092  GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
1093  GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
1094  }
1095  }
1096 
1097  BLI_ghash_free(bm_vert_to_index, NULL, NULL);
1098 
1099  buffers->tot_tri = tottri;
1100  if (buffers->index_buf == NULL) {
1101  buffers->index_buf = GPU_indexbuf_build(&elb);
1102  }
1103  else {
1104  GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
1105  }
1106  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
1107  }
1108  else {
1109  GSetIterator gs_iter;
1110 
1111  GPUIndexBufBuilder elb_lines;
1112  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, tottri * 3);
1113 
1114  GSET_ITER (gs_iter, bm_faces) {
1115  f = BLI_gsetIterator_getKey(&gs_iter);
1116 
1117  BLI_assert(f->len == 3);
1118 
1119  if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
1120  BMVert *v[3];
1121  float fmask = 0.0f;
1122  int i;
1123 
1125 
1126  /* Average mask value */
1127  for (i = 0; i < 3; i++) {
1128  fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
1129  }
1130  fmask /= 3.0f;
1131 
1132  GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
1133  GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
1134  GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
1135 
1136  for (i = 0; i < 3; i++) {
1138  v[i],
1139  buffers->vert_buf,
1140  v_index++,
1141  f->no,
1142  &fmask,
1143  cd_vert_mask_offset,
1144  show_mask,
1145  show_vcol,
1146  &empty_mask);
1147  }
1148  }
1149  }
1150 
1151  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
1152  buffers->tot_tri = tottri;
1153  }
1154 
1155  /* Get material index from the last face we iterated on. */
1156  buffers->material_index = (f) ? f->mat_nr : 0;
1157 
1158  buffers->show_overlay = !empty_mask;
1159 
1161 }
1162 
1165 /* -------------------------------------------------------------------- */
1170 {
1171  GPU_PBVH_Buffers *buffers;
1172 
1173  buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
1174  buffers->use_bmesh = true;
1175  buffers->smooth = smooth_shading;
1176  buffers->show_overlay = true;
1177 
1178  return buffers;
1179 }
1180 
1190  eCustomDataMask type_mask,
1191  const CustomData *vdata,
1192  const CustomData *edata,
1193  const CustomData *ldata,
1194  const CustomData *pdata,
1195  GPUAttrRef r_cd_attrs[MAX_GPU_ATTR],
1196  bool active_only,
1197  int active_type,
1198  int active_domain,
1199  const CustomDataLayer *active_layer,
1200  const CustomDataLayer *render_layer)
1201 {
1202  const CustomData *cdata_active = active_domain == ATTR_DOMAIN_POINT ? vdata : ldata;
1203 
1204  if (!cdata_active) {
1205  return 0;
1206  }
1207 
1208  if (active_only) {
1209  int idx = active_layer ? active_layer - cdata_active->layers : -1;
1210 
1211  if (idx >= 0 && idx < cdata_active->totlayer) {
1212  r_cd_attrs[0].cd_offset = cdata_active->layers[idx].offset;
1213  r_cd_attrs[0].domain = active_domain;
1214  r_cd_attrs[0].type = active_type;
1215  r_cd_attrs[0].layer_idx = idx;
1216 
1217  return 1;
1218  }
1219 
1220  return 0;
1221  }
1222 
1223  const CustomData *datas[4] = {vdata, edata, pdata, ldata};
1224 
1225  int count = 0;
1226  for (eAttrDomain domain = 0; domain < 4; domain++) {
1227  const CustomData *cdata = datas[domain];
1228 
1229  if (!cdata || !((1 << domain) & domain_mask)) {
1230  continue;
1231  }
1232 
1233  CustomDataLayer *cl = cdata->layers;
1234 
1235  for (int i = 0; count < MAX_GPU_ATTR && i < cdata->totlayer; i++, cl++) {
1236  if ((CD_TYPE_AS_MASK(cl->type) & type_mask) && !(cl->flag & CD_FLAG_TEMPORARY)) {
1237  GPUAttrRef *ref = r_cd_attrs + count;
1238 
1239  ref->cd_offset = cl->offset;
1240  ref->type = cl->type;
1241  ref->layer_idx = i;
1242  ref->domain = domain;
1243 
1244  count++;
1245  }
1246  }
1247  }
1248 
1249  /* ensure render layer is last
1250  draw cache code seems to need this
1251  */
1252 
1253  for (int i = 0; i < count; i++) {
1254  GPUAttrRef *ref = r_cd_attrs + i;
1255  const CustomData *cdata = datas[ref->domain];
1256 
1257  if (cdata->layers + ref->layer_idx == render_layer) {
1258  SWAP(GPUAttrRef, r_cd_attrs[i], r_cd_attrs[count - 1]);
1259  break;
1260  }
1261  }
1262 
1263  return count;
1264 }
1265 
1267 {
1268  bool bad = false;
1269 
1270  bad |= a->active_attrs_only != b->active_attrs_only;
1271 
1272  bad |= a->pos != b->pos;
1273  bad |= a->fset != b->fset;
1274  bad |= a->msk != b->msk;
1275  bad |= a->nor != b->nor;
1276 
1277  for (int i = 0; i < MIN2(a->totuv, b->totuv); i++) {
1278  bad |= a->uv[i] != b->uv[i];
1279  }
1280 
1281  for (int i = 0; i < MIN2(a->totcol, b->totcol); i++) {
1282  bad |= a->col[i] != b->col[i];
1283  }
1284 
1285  bad |= a->totuv != b->totuv;
1286  bad |= a->totcol != b->totcol;
1287 
1288  return !bad;
1289 }
1290 
1292  PBVHGPUFormat *vbo_id,
1293  const CustomData *vdata,
1294  const CustomData *ldata,
1295  bool active_attrs_only)
1296 {
1297  const bool active_only = active_attrs_only;
1298  PBVHGPUFormat old_format = *vbo_id;
1299 
1300  GPU_vertformat_clear(&vbo_id->format);
1301 
1302  vbo_id->active_attrs_only = active_attrs_only;
1303 
1304  if (vbo_id->format.attr_len == 0) {
1305  vbo_id->pos = GPU_vertformat_attr_add(
1306  &vbo_id->format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1307  vbo_id->nor = GPU_vertformat_attr_add(
1308  &vbo_id->format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
1309 
1310  /* TODO: Do not allocate these `.msk` and `.col` when they are not used. */
1311  vbo_id->msk = GPU_vertformat_attr_add(
1312  &vbo_id->format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
1313 
1314  vbo_id->totcol = 0;
1315  if (pbvh_type == PBVH_FACES) {
1316  int ci = 0;
1317 
1318  Mesh me_query;
1319 
1320  BKE_id_attribute_copy_domains_temp(ID_ME, vdata, NULL, ldata, NULL, NULL, &me_query.id);
1321 
1322  CustomDataLayer *active_color_layer = BKE_id_attributes_active_color_get(&me_query.id);
1323  CustomDataLayer *render_color_layer = BKE_id_attributes_render_color_get(&me_query.id);
1324  eAttrDomain active_color_domain = active_color_layer ?
1325  BKE_id_attribute_domain(&me_query.id,
1326  active_color_layer) :
1328 
1329  GPUAttrRef vcol_layers[MAX_GPU_ATTR];
1332  vdata,
1333  NULL,
1334  ldata,
1335  NULL,
1336  vcol_layers,
1337  active_only,
1338  active_color_layer ? active_color_layer->type : -1,
1339  active_color_domain,
1340  active_color_layer,
1341  render_color_layer);
1342 
1343  for (int i = 0; i < totlayer; i++) {
1344  GPUAttrRef *ref = vcol_layers + i;
1345  const CustomData *cdata = ref->domain == ATTR_DOMAIN_POINT ? vdata : ldata;
1346 
1347  const CustomDataLayer *layer = cdata->layers + ref->layer_idx;
1348 
1349  if (vbo_id->totcol < MAX_GPU_ATTR) {
1350  vbo_id->col[ci++] = GPU_vertformat_attr_add(
1351  &vbo_id->format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
1352  vbo_id->totcol++;
1353 
1354  bool is_render = render_color_layer == layer;
1355  bool is_active = active_color_layer == layer;
1356 
1357  DRW_cdlayer_attr_aliases_add(&vbo_id->format, "c", cdata, layer, is_render, is_active);
1358  }
1359  }
1360  }
1361 
1362  /* ensure at least one vertex color layer */
1363  if (vbo_id->totcol == 0) {
1364  vbo_id->col[0] = GPU_vertformat_attr_add(
1365  &vbo_id->format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
1366  vbo_id->totcol = 1;
1367 
1368  GPU_vertformat_alias_add(&vbo_id->format, "ac");
1369  }
1370 
1371  vbo_id->fset = GPU_vertformat_attr_add(
1372  &vbo_id->format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
1373 
1374  vbo_id->totuv = 0;
1375  if (pbvh_type == PBVH_FACES && ldata && CustomData_has_layer(ldata, CD_MLOOPUV)) {
1376  GPUAttrRef uv_layers[MAX_GPU_ATTR];
1377  CustomDataLayer *active = NULL, *render = NULL;
1378 
1380  render = get_render_layer(ldata, CD_MLOOPUV);
1381 
1384  NULL,
1385  NULL,
1386  ldata,
1387  NULL,
1388  uv_layers,
1389  active_only,
1390  CD_MLOOPUV,
1392  active,
1393  render);
1394 
1395  vbo_id->totuv = totlayer;
1396 
1397  for (int i = 0; i < totlayer; i++) {
1398  GPUAttrRef *ref = uv_layers + i;
1399 
1400  vbo_id->uv[i] = GPU_vertformat_attr_add(
1401  &vbo_id->format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
1402 
1403  CustomDataLayer *cl = ldata->layers + ref->layer_idx;
1404  bool is_active = ref->layer_idx == CustomData_get_active_layer_index(ldata, CD_MLOOPUV);
1405 
1406  DRW_cdlayer_attr_aliases_add(&vbo_id->format, "u", ldata, cl, cl == render, is_active);
1407 
1408  /* Apparently the render attribute is 'a' while active is 'au',
1409  * at least going by the draw cache extractor code.
1410  */
1411  if (cl == render) {
1412  GPU_vertformat_alias_add(&vbo_id->format, "a");
1413  }
1414  }
1415  }
1416  }
1417 
1418  if (!gpu_pbvh_format_equals(&old_format, vbo_id)) {
1419  return true;
1420  }
1421 
1422  return false;
1423 }
1424 
1425 GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
1426 {
1427  if (wires) {
1428  return (fast && buffers->lines_fast) ? buffers->lines_fast : buffers->lines;
1429  }
1430 
1431  return (fast && buffers->triangles_fast) ? buffers->triangles_fast : buffers->triangles;
1432 }
1433 
1435 {
1436  return buffers->show_overlay;
1437 }
1438 
1440 {
1441  return buffers->material_index;
1442 }
1443 
1445 {
1446  GPU_BATCH_DISCARD_SAFE(buffers->lines);
1455 }
1456 
1458 {
1459  /* Free empty bmesh node buffers. */
1460  if (buffers->clear_bmesh_on_flush) {
1461  gpu_pbvh_buffers_clear(buffers);
1462  buffers->clear_bmesh_on_flush = false;
1463  }
1464 
1465  /* Force flushing to the GPU. */
1466  if (buffers->vert_buf && GPU_vertbuf_get_data(buffers->vert_buf)) {
1467  GPU_vertbuf_use(buffers->vert_buf);
1468  }
1469 }
1470 
1472 {
1473  if (buffers) {
1474  gpu_pbvh_buffers_clear(buffers);
1475  MEM_freeN(buffers);
1476  }
1477 }
1478 
Generic geometry attributes built on CustomData.
eAttrDomain
Definition: BKE_attribute.h:25
@ ATTR_DOMAIN_POINT
Definition: BKE_attribute.h:27
@ ATTR_DOMAIN_CORNER
Definition: BKE_attribute.h:30
@ ATTR_DOMAIN_AUTO
Definition: BKE_attribute.h:26
struct CustomDataLayer * BKE_id_attributes_active_color_get(const struct ID *id)
eAttrDomainMask
Definition: BKE_attribute.h:36
@ ATTR_DOMAIN_MASK_CORNER
Definition: BKE_attribute.h:40
struct CustomDataLayer * BKE_id_attributes_render_color_get(const struct ID *id)
eAttrDomain BKE_id_attribute_domain(const struct ID *id, const struct CustomDataLayer *layer)
#define ATTR_DOMAIN_MASK_COLOR
Definition: BKE_attribute.h:48
void BKE_id_attribute_copy_domains_temp(short id_type, const struct CustomData *vdata, const struct CustomData *edata, const struct CustomData *ldata, const struct CustomData *pdata, const struct CustomData *cdata, struct ID *r_id)
#define ATTR_DOMAIN_NUM
Definition: BKE_attribute.h:34
BLI_INLINE CCGElem * CCG_grid_elem(const CCGKey *key, CCGElem *elem, int x, int y)
Definition: BKE_ccg.h:108
BLI_INLINE float * CCG_elem_mask(const CCGKey *key, CCGElem *elem)
Definition: BKE_ccg.h:97
BLI_INLINE float * CCG_elem_no(const CCGKey *key, CCGElem *elem)
Definition: BKE_ccg.h:91
struct CCGElem CCGElem
Definition: BKE_ccg.h:30
BLI_INLINE float * CCG_elem_co(const CCGKey *key, CCGElem *elem)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_active_layer_index(const struct CustomData *data, int type)
int CustomData_get_render_layer_index(const struct CustomData *data, int type)
bool CustomData_has_layer(const struct CustomData *data, int type)
uint64_t eCustomDataMask
#define CD_TYPE_AS_MASK(_type)
int CustomData_get_offset(const struct CustomData *data, int type)
void BKE_mesh_looptri_get_real_edges(const struct Mesh *mesh, const struct MLoopTri *looptri, int r_edges[3])
void BKE_mesh_calc_poly_normal(const struct MPoly *mpoly, const struct MLoop *loopstart, const struct MVert *mvarray, float r_no[3])
bool paint_is_face_hidden(const struct MLoopTri *lt, const struct MVert *mvert, const struct MLoop *mloop)
void BKE_paint_face_set_overlay_color_get(int face_set, int seed, uchar r_color[4])
Definition: paint.c:2336
#define SCULPT_FACE_SET_NONE
Definition: BKE_paint.h:267
bool paint_is_grid_face_hidden(const unsigned int *grid_hidden, int gridsize, int x, int y)
Definition: paint.c:1253
A BVH for high poly meshes.
int BKE_pbvh_count_grid_quads(BLI_bitmap **grid_hidden, const int *grid_indices, int totgrid, int gridsize)
Definition: pbvh.c:339
PBVHType
Definition: BKE_pbvh.h:233
@ PBVH_FACES
Definition: BKE_pbvh.h:234
int BKE_subdiv_ccg_grid_to_face_index(const SubdivCCG *subdiv_ccg, int grid_index)
Definition: subdiv_ccg.c:1940
#define BLI_assert(a)
Definition: BLI_assert.h:46
unsigned int BLI_bitmap
Definition: BLI_bitmap.h:16
struct GSet GSet
Definition: BLI_ghash.h:340
GHash * BLI_ghash_int_new_ex(const char *info, unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
unsigned int BLI_gset_len(const GSet *gs) ATTR_WARN_UNUSED_RESULT
Definition: BLI_ghash.c:957
#define GSET_ITER(gs_iter_, gset_)
Definition: BLI_ghash.h:471
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition: BLI_ghash.c:863
BLI_INLINE void * BLI_gsetIterator_getKey(GSetIterator *gsi)
Definition: BLI_ghash.h:458
bool BLI_ghash_ensure_p(GHash *gh, void *key, void ***r_val) ATTR_WARN_UNUSED_RESULT
Definition: BLI_ghash.c:755
MINLINE int square_i(int a)
MINLINE unsigned int square_uint(unsigned int a)
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition: math_geom.c:50
MINLINE void normal_float_to_short_v3(short r[3], const float n[3])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void copy_v3_v3_short(short r[3], const short a[3])
unsigned char uchar
Definition: BLI_sys_types.h:70
unsigned int uint
Definition: BLI_sys_types.h:67
unsigned short ushort
Definition: BLI_sys_types.h:68
#define SWAP(type, a, b)
#define POINTER_AS_UINT(i)
#define UNUSED(x)
#define MIN2(a, b)
#define POINTER_FROM_UINT(i)
@ ID_ME
Definition: DNA_ID_enums.h:48
@ CD_FLAG_TEMPORARY
#define CD_MASK_COLOR_ALL
@ CD_PAINT_MASK
@ CD_PROP_COLOR
@ CD_MLOOPUV
#define CD_MASK_MLOOPUV
@ ME_SMOOTH
void DRW_cdlayer_attr_aliases_add(struct GPUVertFormat *format, const char *base_name, const struct CustomData *data, const struct CustomDataLayer *cl, bool is_active_render, bool is_active_layer)
GPUBatch
Definition: GPU_batch.h:78
#define GPU_batch_create(prim, verts, elem)
Definition: GPU_batch.h:95
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:216
@ GPU_PBVH_BUFFERS_SHOW_MASK
Definition: GPU_buffers.h:82
@ GPU_PBVH_BUFFERS_SHOW_VCOL
Definition: GPU_buffers.h:83
@ GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS
Definition: GPU_buffers.h:84
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
GPUIndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *, GPUIndexBuf *)
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
GPUPrimType
Definition: GPU_primitive.h:18
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:20
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:21
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_use(GPUVertBuf *)
GPUVertBuf * GPU_vertbuf_create_with_format_ex(const GPUVertFormat *, GPUUsageType)
GPU_INLINE void * GPU_vertbuf_raw_step(GPUVertBufRaw *a)
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *, uint a_idx, GPUVertBufRaw *access)
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data)
@ GPU_USAGE_STATIC
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
void GPU_vertformat_clear(GPUVertFormat *)
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias)
@ GPU_COMP_U16
@ GPU_COMP_F32
@ GPU_COMP_I16
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
#define BM_ELEM_CD_GET_FLOAT(ele, offset)
Definition: bmesh_class.h:553
@ BM_ELEM_HIDDEN
Definition: bmesh_class.h:472
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:12
ATTR_WARN_UNUSED_RESULT BMesh * bm
void BM_face_as_array_vert_tri(BMFace *f, BMVert *r_verts[3])
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1439
static CustomDataLayer * get_render_layer(const CustomData *cdata, int type)
Definition: gpu_buffers.c:152
static bool gpu_pbvh_vert_buf_data_set(PBVHGPUFormat *vbo_id, GPU_PBVH_Buffers *buffers, uint vert_len)
Definition: gpu_buffers.c:160
static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
Definition: gpu_buffers.c:181
static void gpu_bmesh_vert_to_buffer_copy(PBVHGPUFormat *vbo_id, BMVert *v, GPUVertBuf *vert_buf, int v_index, const float fno[3], const float *fmask, const int cd_vert_mask_offset, const bool show_mask, const bool show_vcol, bool *empty_mask)
Definition: gpu_buffers.c:907
static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts, GSet *bm_other_verts)
Definition: gpu_buffers.c:946
struct GPUAttrRef GPUAttrRef
void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1471
GPU_PBVH_Buffers * GPU_pbvh_grid_buffers_build(int totgrid, BLI_bitmap **grid_hidden, bool smooth)
Definition: gpu_buffers.c:884
struct PBVHGPUFormat PBVHGPUFormat
static bool gpu_pbvh_format_equals(PBVHGPUFormat *a, PBVHGPUFormat *b)
Definition: gpu_buffers.c:1266
static void gpu_pbvh_grid_fill_index_buffers(GPU_PBVH_Buffers *buffers, SubdivCCG *UNUSED(subdiv_ccg), const int *UNUSED(face_sets), const int *grid_indices, uint visible_quad_len, int totgrid, int gridsize)
Definition: gpu_buffers.c:554
void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:984
static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt, const MVert *mvert, const MLoop *mloop, const int *sculpt_face_sets)
Definition: gpu_buffers.c:213
void GPU_pbvh_free_format(PBVHGPUFormat *vbo_id)
Definition: gpu_buffers.c:113
bool GPU_pbvh_attribute_names_update(PBVHType pbvh_type, PBVHGPUFormat *vbo_id, const CustomData *vdata, const CustomData *ldata, bool active_attrs_only)
Definition: gpu_buffers.c:1291
bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1434
void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1457
static int gpu_bmesh_face_visible_count(GSet *bm_faces)
Definition: gpu_buffers.c:968
void GPU_pbvh_bmesh_buffers_update(PBVHGPUFormat *vbo_id, GPU_PBVH_Buffers *buffers, BMesh *bm, GSet *bm_faces, GSet *bm_unique_verts, GSet *bm_other_verts, const int update_flags)
Definition: gpu_buffers.c:999
static void gpu_pbvh_buffers_clear(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1444
GPUBatch * GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
Definition: gpu_buffers.c:1425
static CustomDataLayer * get_active_layer(const CustomData *cdata, int type)
Definition: gpu_buffers.c:146
GPU_PBVH_Buffers * GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
Definition: gpu_buffers.c:1169
static int gpu_pbvh_make_attr_offs(eAttrDomainMask domain_mask, eCustomDataMask type_mask, const CustomData *vdata, const CustomData *edata, const CustomData *ldata, const CustomData *pdata, GPUAttrRef r_cd_attrs[MAX_GPU_ATTR], bool active_only, int active_type, int active_domain, const CustomDataLayer *active_layer, const CustomDataLayer *render_layer)
Definition: gpu_buffers.c:1189
#define MAX_GPU_ATTR
Definition: gpu_buffers.c:90
void GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers *buffers, const struct DMFlagMat *grid_flag_mats, const int *grid_indices)
Definition: gpu_buffers.c:686
void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id, GPU_PBVH_Buffers *buffers, const MVert *mvert, const CustomData *vdata, const CustomData *ldata, const float *vmask, const int *sculpt_face_sets, int face_sets_color_seed, int face_sets_color_default, int update_flags, const float(*vert_normals)[3])
Definition: gpu_buffers.c:222
PBVHGPUFormat * GPU_pbvh_make_format(void)
Definition: gpu_buffers.c:104
GPU_PBVH_Buffers * GPU_pbvh_mesh_buffers_build(const MPoly *mpoly, const MLoop *mloop, const MLoopTri *looptri, const MVert *mvert, const int *face_indices, const int *sculpt_face_sets, const int face_indices_len, const struct Mesh *mesh)
Definition: gpu_buffers.c:460
void gpu_pbvh_exit()
Definition: gpu_buffers.c:141
void gpu_pbvh_init()
Definition: gpu_buffers.c:137
void GPU_pbvh_grid_buffers_update(PBVHGPUFormat *vbo_id, GPU_PBVH_Buffers *buffers, SubdivCCG *subdiv_ccg, CCGElem **grids, const struct DMFlagMat *grid_flag_mats, int *grid_indices, int totgrid, const int *sculpt_face_sets, const int face_sets_color_seed, const int face_sets_color_default, const struct CCGKey *key, const int update_flags)
Definition: gpu_buffers.c:706
#define UINT_MAX
Definition: hash_md5.c:43
int count
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
MINLINE unsigned short unit_float_to_ushort_clamp(float val)
float BLI_color_from_srgb_table[256]
Definition: math_color.c:517
static unsigned a[3]
Definition: RandGen.cpp:78
bool active
all scheduled work for the GPU.
T abs(const T &a)
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
smooth(Type::FLOAT, "mask_weight")
short mat_nr
Definition: bmesh_class.h:281
int len
Definition: bmesh_class.h:267
float no[3]
Definition: bmesh_class.h:271
float co[3]
Definition: bmesh_class.h:87
float no[3]
Definition: bmesh_class.h:88
CustomData vdata
Definition: bmesh_class.h:337
Definition: BKE_ccg.h:32
int has_mask
Definition: BKE_ccg.h:55
int grid_size
Definition: BKE_ccg.h:40
int grid_area
Definition: BKE_ccg.h:42
CustomDataLayer * layers
uchar domain
Definition: gpu_buffers.c:85
ushort cd_offset
Definition: gpu_buffers.c:86
uchar type
Definition: gpu_buffers.c:85
int layer_idx
Definition: gpu_buffers.c:87
const int * grid_indices
Definition: gpu_buffers.c:67
const int * face_indices
Definition: gpu_buffers.c:59
const MVert * mvert
Definition: gpu_buffers.c:57
const MPoly * mpoly
Definition: gpu_buffers.c:54
GPUIndexBuf * index_lines_buf_fast
Definition: gpu_buffers.c:45
GPUBatch * lines_fast
Definition: gpu_buffers.c:49
CCGElem ** grids
Definition: gpu_buffers.c:64
GPUBatch * triangles_fast
Definition: gpu_buffers.c:51
GPUIndexBuf * index_lines_buf
Definition: gpu_buffers.c:45
GPUVertBuf * vert_buf
Definition: gpu_buffers.c:46
const MLoopTri * looptri
Definition: gpu_buffers.c:56
const MLoop * mloop
Definition: gpu_buffers.c:55
bool clear_bmesh_on_flush
Definition: gpu_buffers.c:71
GPUBatch * triangles
Definition: gpu_buffers.c:50
GPUBatch * lines
Definition: gpu_buffers.c:48
GPUIndexBuf * index_buf_fast
Definition: gpu_buffers.c:44
const DMFlagMat * grid_flag_mats
Definition: gpu_buffers.c:65
short material_index
Definition: gpu_buffers.c:75
GPUIndexBuf * index_buf
Definition: gpu_buffers.c:44
BLI_bitmap *const * grid_hidden
Definition: gpu_buffers.c:66
unsigned char a
unsigned char b
unsigned char r
unsigned char g
unsigned int poly
unsigned int tri[3]
unsigned int v
short mat_nr
float color[4]
bool active_attrs_only
Definition: gpu_buffers.c:101
GPUVertFormat format
Definition: gpu_buffers.c:93
uint col[MAX_GPU_ATTR]
Definition: gpu_buffers.c:95
uint uv[MAX_GPU_ATTR]
Definition: gpu_buffers.c:96