Blender  V3.3
extract_mesh_vbo_pos_nor.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2021 Blender Foundation. All rights reserved. */
3 
8 #include "MEM_guardedalloc.h"
9 
10 #include "extract_mesh.hh"
11 
12 #include "draw_subdivision.h"
13 
14 namespace blender::draw {
15 
16 /* ---------------------------------------------------------------------- */
20 struct PosNorLoop {
21  float pos[3];
23 };
24 
28 };
29 
30 static void extract_pos_nor_init(const MeshRenderData *mr,
31  MeshBatchCache *UNUSED(cache),
32  void *buf,
33  void *tls_data)
34 {
35  GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
36  static GPUVertFormat format = {0};
37  if (format.attr_len == 0) {
38  /* WARNING Adjust #PosNorLoop struct accordingly. */
42  }
45 
46  /* Pack normals per vert, reduce amount of computation. */
47  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(tls_data);
48  data->vbo_data = static_cast<PosNorLoop *>(GPU_vertbuf_get_data(vbo));
49  data->normals = (GPUNormal *)MEM_mallocN(sizeof(GPUNormal) * mr->vert_len, __func__);
50 
51  /* Quicker than doing it for each loop. */
52  if (mr->extract_type == MR_EXTRACT_BMESH) {
53  BMIter iter;
54  BMVert *eve;
55  int v;
56  BM_ITER_MESH_INDEX (eve, &iter, mr->bm, BM_VERTS_OF_MESH, v) {
57  data->normals[v].low = GPU_normal_convert_i10_v3(bm_vert_no_get(mr, eve));
58  }
59  }
60  else {
61  for (int v = 0; v < mr->vert_len; v++) {
62  data->normals[v].low = GPU_normal_convert_i10_v3(mr->vert_normals[v]);
63  }
64  }
65 }
66 
68  const BMFace *f,
69  const int UNUSED(f_index),
70  void *_data)
71 {
72  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
73  BMLoop *l_iter, *l_first;
74  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
75  do {
76  const int l_index = BM_elem_index_get(l_iter);
77  PosNorLoop *vert = &data->vbo_data[l_index];
78  copy_v3_v3(vert->pos, bm_vert_co_get(mr, l_iter->v));
79  vert->nor = data->normals[BM_elem_index_get(l_iter->v)].low;
80  vert->nor.w = BM_elem_flag_test(f, BM_ELEM_HIDDEN) ? -1 : 0;
81  } while ((l_iter = l_iter->next) != l_first);
82 }
83 
85  const MPoly *mp,
86  const int UNUSED(mp_index),
87  void *_data)
88 {
89  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
90 
91  const MLoop *mloop = mr->mloop;
92  const int ml_index_end = mp->loopstart + mp->totloop;
93  for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {
94  const MLoop *ml = &mloop[ml_index];
95 
96  PosNorLoop *vert = &data->vbo_data[ml_index];
97  const MVert *mv = &mr->mvert[ml->v];
98  copy_v3_v3(vert->pos, mv->co);
99  vert->nor = data->normals[ml->v].low;
100  /* Flag for paint mode overlay. */
101  if (mp->flag & ME_HIDE || mv->flag & ME_HIDE ||
102  ((mr->extract_type == MR_EXTRACT_MAPPED) && (mr->v_origindex) &&
103  (mr->v_origindex[ml->v] == ORIGINDEX_NONE))) {
104  vert->nor.w = -1;
105  }
106  else if (mv->flag & SELECT) {
107  vert->nor.w = 1;
108  }
109  else {
110  vert->nor.w = 0;
111  }
112  }
113 }
114 
116  const BMEdge *eed,
117  const int ledge_index,
118  void *_data)
119 {
120  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
121 
122  int l_index = mr->loop_len + ledge_index * 2;
123  PosNorLoop *vert = &data->vbo_data[l_index];
124  copy_v3_v3(vert[0].pos, bm_vert_co_get(mr, eed->v1));
125  copy_v3_v3(vert[1].pos, bm_vert_co_get(mr, eed->v2));
126  vert[0].nor = data->normals[BM_elem_index_get(eed->v1)].low;
127  vert[1].nor = data->normals[BM_elem_index_get(eed->v2)].low;
128 }
129 
131  const MEdge *med,
132  const int ledge_index,
133  void *_data)
134 {
135  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
136  const int ml_index = mr->loop_len + ledge_index * 2;
137  PosNorLoop *vert = &data->vbo_data[ml_index];
138  copy_v3_v3(vert[0].pos, mr->mvert[med->v1].co);
139  copy_v3_v3(vert[1].pos, mr->mvert[med->v2].co);
140  vert[0].nor = data->normals[med->v1].low;
141  vert[1].nor = data->normals[med->v2].low;
142 }
143 
145  const BMVert *eve,
146  const int lvert_index,
147  void *_data)
148 {
149  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
150  const int offset = mr->loop_len + (mr->edge_loose_len * 2);
151 
152  const int l_index = offset + lvert_index;
153  PosNorLoop *vert = &data->vbo_data[l_index];
154  copy_v3_v3(vert->pos, bm_vert_co_get(mr, eve));
155  vert->nor = data->normals[BM_elem_index_get(eve)].low;
156 }
157 
159  const MVert *mv,
160  const int lvert_index,
161  void *_data)
162 {
163  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
164  const int offset = mr->loop_len + (mr->edge_loose_len * 2);
165 
166  const int ml_index = offset + lvert_index;
167  const int v_index = mr->lverts[lvert_index];
168  PosNorLoop *vert = &data->vbo_data[ml_index];
169  copy_v3_v3(vert->pos, mv->co);
170  vert->nor = data->normals[v_index].low;
171 }
172 
174  MeshBatchCache *UNUSED(cache),
175  void *UNUSED(buf),
176  void *_data)
177 {
178  MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
179  MEM_freeN(data->normals);
180 }
181 
183 {
184  static GPUVertFormat format = {0};
185  if (format.attr_len == 0) {
188  }
189  return &format;
190 }
191 
193 {
194  static GPUVertFormat format = {0};
195  if (format.attr_len == 0) {
198  }
199  return &format;
200 }
201 
202 static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
203  const MeshRenderData *UNUSED(mr),
204  MeshBatchCache *cache,
205  void *buffer,
206  void *UNUSED(data))
207 {
208  GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
209  const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
210 
211  /* Initialize the vertex buffer, it was already allocated. */
213  vbo, draw_subdiv_get_pos_nor_format(), subdiv_cache->num_subdiv_loops + loose_geom.loop_len);
214 
215  if (subdiv_cache->num_subdiv_loops == 0) {
216  return;
217  }
218 
219  GPUVertBuf *orco_vbo = cache->final.buff.vbo.orco;
220 
221  if (orco_vbo) {
222  static GPUVertFormat format = {0};
223  if (format.attr_len == 0) {
224  /* FIXME(fclem): We use the last component as a way to differentiate from generic vertex
225  * attributes. This is a substantial waste of video-ram and should be done another way.
226  * Unfortunately, at the time of writing, I did not found any other "non disruptive"
227  * alternative. */
229  }
230  GPU_vertbuf_init_build_on_device(orco_vbo, &format, subdiv_cache->num_subdiv_loops);
231  }
232 
233  draw_subdiv_extract_pos_nor(subdiv_cache, vbo, orco_vbo);
234 
235  if (subdiv_cache->use_custom_loop_normals) {
236  Mesh *coarse_mesh = subdiv_cache->mesh;
237  const float(*lnors)[3] = static_cast<const float(*)[3]>(
238  CustomData_get_layer(&coarse_mesh->ldata, CD_NORMAL));
239  BLI_assert(lnors != nullptr);
240 
241  GPUVertBuf *src_custom_normals = GPU_vertbuf_calloc();
243  GPU_vertbuf_data_alloc(src_custom_normals, coarse_mesh->totloop);
244 
245  memcpy(
246  GPU_vertbuf_get_data(src_custom_normals), lnors, sizeof(float[3]) * coarse_mesh->totloop);
247 
248  GPUVertBuf *dst_custom_normals = GPU_vertbuf_calloc();
250  dst_custom_normals, get_custom_normals_format(), subdiv_cache->num_subdiv_loops);
251 
253  subdiv_cache, src_custom_normals, dst_custom_normals, 3, 0, false);
254 
255  draw_subdiv_finalize_custom_normals(subdiv_cache, dst_custom_normals, vbo);
256 
257  GPU_vertbuf_discard(src_custom_normals);
258  GPU_vertbuf_discard(dst_custom_normals);
259  }
260  else {
261  /* We cannot evaluate vertex normals using the limit surface, so compute them manually. */
262  GPUVertBuf *subdiv_loop_subdiv_vert_index = draw_subdiv_build_origindex_buffer(
263  subdiv_cache->subdiv_loop_subdiv_vert_index, subdiv_cache->num_subdiv_loops);
264 
265  GPUVertBuf *vertex_normals = GPU_vertbuf_calloc();
267  vertex_normals, get_normals_format(), subdiv_cache->num_subdiv_verts);
268 
269  draw_subdiv_accumulate_normals(subdiv_cache,
270  vbo,
272  subdiv_cache->subdiv_vertex_face_adjacency,
273  subdiv_loop_subdiv_vert_index,
274  vertex_normals);
275 
276  draw_subdiv_finalize_normals(subdiv_cache, vertex_normals, subdiv_loop_subdiv_vert_index, vbo);
277 
278  GPU_vertbuf_discard(vertex_normals);
279  GPU_vertbuf_discard(subdiv_loop_subdiv_vert_index);
280  }
281 }
282 
283 static void extract_pos_nor_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
284  const MeshRenderData *UNUSED(mr),
285  void *buffer,
286  void *UNUSED(data))
287 {
288  const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
289  if (loose_geom.loop_len == 0) {
290  return;
291  }
292 
293  GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
294  uint offset = subdiv_cache->num_subdiv_loops;
295 
296  /* TODO(@kevindietrich): replace this when compressed normals are supported. */
297  struct SubdivPosNorLoop {
298  float pos[3];
299  float nor[3];
300  float flag;
301  };
302 
303  /* Make sure buffer is active for sending loose data. */
304  GPU_vertbuf_use(vbo);
305 
307 
308  SubdivPosNorLoop edge_data[2];
309  memset(edge_data, 0, sizeof(SubdivPosNorLoop) * 2);
310  for (const DRWSubdivLooseEdge &loose_edge : loose_edges) {
311  const DRWSubdivLooseVertex &v1 = loose_geom.verts[loose_edge.loose_subdiv_v1_index];
312  const DRWSubdivLooseVertex &v2 = loose_geom.verts[loose_edge.loose_subdiv_v2_index];
313 
314  copy_v3_v3(edge_data[0].pos, v1.co);
315  copy_v3_v3(edge_data[1].pos, v2.co);
316 
318  vbo, offset * sizeof(SubdivPosNorLoop), sizeof(SubdivPosNorLoop) * 2, &edge_data);
319 
320  offset += 2;
321  }
322 
323  SubdivPosNorLoop vert_data;
324  memset(&vert_data, 0, sizeof(SubdivPosNorLoop));
326  subdiv_cache);
327 
328  for (const DRWSubdivLooseVertex &loose_vert : loose_verts) {
329  copy_v3_v3(vert_data.pos, loose_vert.co);
330 
332  vbo, offset * sizeof(SubdivPosNorLoop), sizeof(SubdivPosNorLoop), &vert_data);
333 
334  offset += 1;
335  }
336 }
337 
339 {
340  MeshExtract extractor = {nullptr};
341  extractor.init = extract_pos_nor_init;
348  extractor.finish = extract_pos_nor_finish;
351  extractor.data_type = MR_DATA_NONE;
352  extractor.data_size = sizeof(MeshExtract_PosNor_Data);
353  extractor.use_threading = true;
354  extractor.mesh_buffer_offset = offsetof(MeshBufferList, vbo.pos_nor);
355  return extractor;
356 }
357 
360 /* ---------------------------------------------------------------------- */
364 struct PosNorHQLoop {
365  float pos[3];
366  short nor[4];
367 };
368 
372 };
373 
375  MeshBatchCache *UNUSED(cache),
376  void *buf,
377  void *tls_data)
378 {
379  GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
380  static GPUVertFormat format = {0};
381  if (format.attr_len == 0) {
382  /* WARNING Adjust #PosNorHQLoop struct accordingly. */
386  }
389 
390  /* Pack normals per vert, reduce amount of computation. */
391  MeshExtract_PosNorHQ_Data *data = static_cast<MeshExtract_PosNorHQ_Data *>(tls_data);
392  data->vbo_data = static_cast<PosNorHQLoop *>(GPU_vertbuf_get_data(vbo));
393  data->normals = (GPUNormal *)MEM_mallocN(sizeof(GPUNormal) * mr->vert_len, __func__);
394 
395  /* Quicker than doing it for each loop. */
396  if (mr->extract_type == MR_EXTRACT_BMESH) {
397  BMIter iter;
398  BMVert *eve;
399  int v;
400  BM_ITER_MESH_INDEX (eve, &iter, mr->bm, BM_VERTS_OF_MESH, v) {
401  normal_float_to_short_v3(data->normals[v].high, bm_vert_no_get(mr, eve));
402  }
403  }
404  else {
405  for (int v = 0; v < mr->vert_len; v++) {
406  normal_float_to_short_v3(data->normals[v].high, mr->vert_normals[v]);
407  }
408  }
409 }
410 
412  const BMFace *f,
413  const int UNUSED(f_index),
414  void *_data)
415 {
417  BMLoop *l_iter, *l_first;
418  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
419  do {
420  const int l_index = BM_elem_index_get(l_iter);
421  PosNorHQLoop *vert = &data->vbo_data[l_index];
422  copy_v3_v3(vert->pos, bm_vert_co_get(mr, l_iter->v));
423  copy_v3_v3_short(vert->nor, data->normals[BM_elem_index_get(l_iter->v)].high);
424 
425  vert->nor[3] = BM_elem_flag_test(f, BM_ELEM_HIDDEN) ? -1 : 0;
426  } while ((l_iter = l_iter->next) != l_first);
427 }
428 
430  const MPoly *mp,
431  const int UNUSED(mp_index),
432  void *_data)
433 {
435  const MLoop *mloop = mr->mloop;
436  const int ml_index_end = mp->loopstart + mp->totloop;
437  for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {
438  const MLoop *ml = &mloop[ml_index];
439 
440  PosNorHQLoop *vert = &data->vbo_data[ml_index];
441  const MVert *mv = &mr->mvert[ml->v];
442  copy_v3_v3(vert->pos, mv->co);
443  copy_v3_v3_short(vert->nor, data->normals[ml->v].high);
444 
445  /* Flag for paint mode overlay. */
446  if (mp->flag & ME_HIDE || mv->flag & ME_HIDE ||
447  ((mr->extract_type == MR_EXTRACT_MAPPED) && (mr->v_origindex) &&
448  (mr->v_origindex[ml->v] == ORIGINDEX_NONE))) {
449  vert->nor[3] = -1;
450  }
451  else if (mv->flag & SELECT) {
452  vert->nor[3] = 1;
453  }
454  else {
455  vert->nor[3] = 0;
456  }
457  }
458 }
459 
461  const BMEdge *eed,
462  const int ledge_index,
463  void *_data)
464 {
466  int l_index = mr->loop_len + ledge_index * 2;
467  PosNorHQLoop *vert = &data->vbo_data[l_index];
468  copy_v3_v3(vert[0].pos, bm_vert_co_get(mr, eed->v1));
469  copy_v3_v3(vert[1].pos, bm_vert_co_get(mr, eed->v2));
470  copy_v3_v3_short(vert[0].nor, data->normals[BM_elem_index_get(eed->v1)].high);
471  vert[0].nor[3] = 0;
472  copy_v3_v3_short(vert[1].nor, data->normals[BM_elem_index_get(eed->v2)].high);
473  vert[1].nor[3] = 0;
474 }
475 
477  const MEdge *med,
478  const int ledge_index,
479  void *_data)
480 {
482  const int ml_index = mr->loop_len + ledge_index * 2;
483  PosNorHQLoop *vert = &data->vbo_data[ml_index];
484  copy_v3_v3(vert[0].pos, mr->mvert[med->v1].co);
485  copy_v3_v3(vert[1].pos, mr->mvert[med->v2].co);
486  copy_v3_v3_short(vert[0].nor, data->normals[med->v1].high);
487  vert[0].nor[3] = 0;
488  copy_v3_v3_short(vert[1].nor, data->normals[med->v2].high);
489  vert[1].nor[3] = 0;
490 }
491 
493  const BMVert *eve,
494  const int lvert_index,
495  void *_data)
496 {
498  const int offset = mr->loop_len + (mr->edge_loose_len * 2);
499 
500  const int l_index = offset + lvert_index;
501  PosNorHQLoop *vert = &data->vbo_data[l_index];
502  copy_v3_v3(vert->pos, bm_vert_co_get(mr, eve));
503  copy_v3_v3_short(vert->nor, data->normals[BM_elem_index_get(eve)].high);
504  vert->nor[3] = 0;
505 }
506 
508  const MVert *mv,
509  const int lvert_index,
510  void *_data)
511 {
513  const int offset = mr->loop_len + (mr->edge_loose_len * 2);
514 
515  const int ml_index = offset + lvert_index;
516  const int v_index = mr->lverts[lvert_index];
517  PosNorHQLoop *vert = &data->vbo_data[ml_index];
518  copy_v3_v3(vert->pos, mv->co);
519  copy_v3_v3_short(vert->nor, data->normals[v_index].high);
520  vert->nor[3] = 0;
521 }
522 
524  MeshBatchCache *UNUSED(cache),
525  void *UNUSED(buf),
526  void *_data)
527 {
529  MEM_freeN(data->normals);
530 }
531 
533 {
534  MeshExtract extractor = {nullptr};
535  extractor.init = extract_pos_nor_hq_init;
543  extractor.finish = extract_pos_nor_hq_finish;
544  extractor.data_type = MR_DATA_NONE;
545  extractor.data_size = sizeof(MeshExtract_PosNorHQ_Data);
546  extractor.use_threading = true;
547  extractor.mesh_buffer_offset = offsetof(MeshBufferList, vbo.pos_nor);
548  return extractor;
549 }
550 
553 } // namespace blender::draw
554 
typedef float(TangentPoint)[2]
#define ORIGINDEX_NONE
void * CustomData_get_layer(const struct CustomData *data, int type)
#define BLI_assert(a)
Definition: BLI_assert.h:46
MINLINE void normal_float_to_short_v3(short r[3], const float n[3])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void copy_v3_v3_short(short r[3], const short a[3])
unsigned int uint
Definition: BLI_sys_types.h:67
#define UNUSED(x)
@ ME_HIDE
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
void GPU_vertbuf_discard(GPUVertBuf *)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_update_sub(GPUVertBuf *verts, uint start, uint len, const void *data)
GPUVertBuf * GPU_vertbuf_calloc(void)
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
#define GPU_vertbuf_init_with_format(verts, format)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
void GPU_vertbuf_init_build_on_device(GPUVertBuf *verts, GPUVertFormat *format, uint v_len)
void GPU_vertbuf_use(GPUVertBuf *)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
BLI_INLINE GPUPackedNormal GPU_normal_convert_i10_v3(const float data[3])
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias)
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I16
Read Guarded memory(de)allocation.
__forceinline ssef high(const avxf &a)
Definition: avxf.h:268
#define BM_FACE_FIRST_LOOP(p)
Definition: bmesh_class.h:622
@ BM_ELEM_HIDDEN
Definition: bmesh_class.h:472
#define BM_elem_index_get(ele)
Definition: bmesh_inline.h:110
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:12
#define BM_ITER_MESH_INDEX(ele, iter, bm, itype, indexvar)
@ BM_VERTS_OF_MESH
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
#define SELECT
@ MR_DATA_NONE
blender::Span< DRWSubdivLooseVertex > draw_subdiv_cache_get_loose_verts(const DRWSubdivCache *cache)
void draw_subdiv_accumulate_normals(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *face_adjacency_offsets, GPUVertBuf *face_adjacency_lists, GPUVertBuf *vertex_loop_map, GPUVertBuf *vertex_normals)
void draw_subdiv_interp_custom_data(const DRWSubdivCache *cache, GPUVertBuf *src_data, GPUVertBuf *dst_data, int dimensions, int dst_offset, bool compress_to_u16)
void draw_subdiv_finalize_custom_normals(const DRWSubdivCache *cache, GPUVertBuf *src_custom_normals, GPUVertBuf *pos_nor)
GPUVertFormat * draw_subdiv_get_pos_nor_format()
void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache, GPUVertBuf *pos_nor, GPUVertBuf *orco)
GPUVertBuf * draw_subdiv_build_origindex_buffer(int *vert_origindex, uint num_loops)
void draw_subdiv_finalize_normals(const DRWSubdivCache *cache, GPUVertBuf *vertex_normals, GPUVertBuf *subdiv_loop_subdiv_vert_index, GPUVertBuf *pos_nor)
blender::Span< DRWSubdivLooseEdge > draw_subdiv_cache_get_loose_edges(const DRWSubdivCache *cache)
Extraction of Mesh data into VBO to feed to GPU.
BLI_INLINE const float * bm_vert_co_get(const MeshRenderData *mr, const BMVert *eve)
@ MR_EXTRACT_BMESH
Definition: extract_mesh.hh:31
@ MR_EXTRACT_MAPPED
Definition: extract_mesh.hh:32
BLI_INLINE const float * bm_vert_no_get(const MeshRenderData *mr, const BMVert *eve)
const MeshExtract extract_pos_nor_hq
const MeshExtract extract_pos_nor
uint pos
uint nor
ccl_global float * buffer
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
format
Definition: logImageCore.h:38
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
static void extract_pos_nor_hq_iter_ledge_bm(const MeshRenderData *mr, const BMEdge *eed, const int ledge_index, void *_data)
static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache, const MeshRenderData *UNUSED(mr), MeshBatchCache *cache, void *buffer, void *UNUSED(data))
static void extract_pos_nor_iter_poly_mesh(const MeshRenderData *mr, const MPoly *mp, const int UNUSED(mp_index), void *_data)
static void extract_pos_nor_iter_lvert_mesh(const MeshRenderData *mr, const MVert *mv, const int lvert_index, void *_data)
constexpr MeshExtract create_extractor_pos_nor()
static void extract_pos_nor_iter_ledge_mesh(const MeshRenderData *mr, const MEdge *med, const int ledge_index, void *_data)
static void extract_pos_nor_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache, const MeshRenderData *UNUSED(mr), void *buffer, void *UNUSED(data))
static void extract_pos_nor_iter_poly_bm(const MeshRenderData *mr, const BMFace *f, const int UNUSED(f_index), void *_data)
static void extract_pos_nor_hq_iter_lvert_bm(const MeshRenderData *mr, const BMVert *eve, const int lvert_index, void *_data)
static GPUVertFormat * get_custom_normals_format()
static void extract_pos_nor_hq_iter_lvert_mesh(const MeshRenderData *mr, const MVert *mv, const int lvert_index, void *_data)
static void extract_pos_nor_hq_iter_ledge_mesh(const MeshRenderData *mr, const MEdge *med, const int ledge_index, void *_data)
constexpr MeshExtract create_extractor_pos_nor_hq()
static void extract_pos_nor_iter_lvert_bm(const MeshRenderData *mr, const BMVert *eve, const int lvert_index, void *_data)
static void extract_pos_nor_hq_iter_poly_mesh(const MeshRenderData *mr, const MPoly *mp, const int UNUSED(mp_index), void *_data)
static void extract_pos_nor_init(const MeshRenderData *mr, MeshBatchCache *UNUSED(cache), void *buf, void *tls_data)
static GPUVertFormat * get_normals_format()
static void extract_pos_nor_finish(const MeshRenderData *UNUSED(mr), MeshBatchCache *UNUSED(cache), void *UNUSED(buf), void *_data)
static void extract_pos_nor_hq_iter_poly_bm(const MeshRenderData *mr, const BMFace *f, const int UNUSED(f_index), void *_data)
static void extract_pos_nor_iter_ledge_bm(const MeshRenderData *mr, const BMEdge *eed, const int ledge_index, void *_data)
static void extract_pos_nor_hq_finish(const MeshRenderData *UNUSED(mr), MeshBatchCache *UNUSED(cache), void *UNUSED(buf), void *_data)
static void extract_pos_nor_hq_init(const MeshRenderData *mr, MeshBatchCache *UNUSED(cache), void *buf, void *tls_data)
BMVert * v1
Definition: bmesh_class.h:122
BMVert * v2
Definition: bmesh_class.h:122
struct BMVert * v
Definition: bmesh_class.h:153
struct BMLoop * next
Definition: bmesh_class.h:233
float co[3]
Definition: bmesh_class.h:87
struct GPUVertBuf * subdiv_vertex_face_adjacency_offsets
int * subdiv_loop_subdiv_vert_index
struct Mesh * mesh
DRWSubdivLooseGeom loose_geom
struct GPUVertBuf * subdiv_vertex_face_adjacency
DRWSubdivLooseVertex * verts
unsigned int v1
unsigned int v2
unsigned int v
float co[3]
MeshBufferCache final
MeshBufferList buff
GPUVertBuf * pos_nor
struct MeshBufferList::@272 vbo
size_t mesh_buffer_offset
eMRDataType data_type
ExtractFinishFn * finish
ExtractLVertBMeshFn * iter_lvert_bm
ExtractLVertMeshFn * iter_lvert_mesh
ExtractLEdgeBMeshFn * iter_ledge_bm
ExtractInitSubdivFn * init_subdiv
size_t data_size
ExtractPolyBMeshFn * iter_poly_bm
ExtractLEdgeMeshFn * iter_ledge_mesh
ExtractPolyMeshFn * iter_poly_mesh
ExtractLooseGeomSubdivFn * iter_loose_geom_subdiv
bool use_threading
ExtractInitFn * init
eMRExtractType extract_type
Definition: extract_mesh.hh:37
const MLoop * mloop
Definition: extract_mesh.hh:76
const int * v_origindex
Definition: extract_mesh.hh:66
const float(* vert_normals)[3]
Definition: extract_mesh.hh:84
const MVert * mvert
Definition: extract_mesh.hh:74
int totloop
CustomData ldata