Blender  V3.3
bmesh_mesh_normals.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
11 #include "MEM_guardedalloc.h"
12 
13 #include "DNA_scene_types.h"
14 
15 #include "BLI_bitmap.h"
16 #include "BLI_linklist_stack.h"
17 #include "BLI_math.h"
18 #include "BLI_stack.h"
19 #include "BLI_task.h"
20 #include "BLI_utildefines.h"
21 
22 #include "BKE_customdata.h"
23 #include "BKE_editmesh.h"
24 #include "BKE_global.h"
25 #include "BKE_mesh.h"
26 
27 #include "intern/bmesh_private.h"
28 
29 /* Smooth angle to use when tagging edges is disabled entirely. */
30 #define EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS -FLT_MAX
31 
32 static void bm_edge_tag_from_smooth_and_set_sharp(const float (*fnos)[3],
33  BMEdge *e,
34  const float split_angle_cos);
35 static void bm_edge_tag_from_smooth(const float (*fnos)[3],
36  BMEdge *e,
37  const float split_angle_cos);
38 
39 /* -------------------------------------------------------------------- */
47 /* We use that existing internal API flag,
48  * assuming no other tool using it would run concurrently to clnors editing. */
49 #define BM_LNORSPACE_UPDATE _FLAG_MF
50 
52  /* Read-only data. */
53  const float (*fnos)[3];
54  const float (*vcos)[3];
55 
56  /* Write data. */
57  float (*vnos)[3];
59 
61  const float e1diff[3],
62  const float e2diff[3],
63  const float f_no[3],
64  float v_no[3])
65 {
66  /* Calculate the dot product of the two edges that meet at the loop's vertex. */
67  /* Edge vectors are calculated from `e->v1` to `e->v2`, so adjust the dot product if one but not
68  * both loops actually runs from `e->v2` to `e->v1`. */
69  float dotprod = dot_v3v3(e1diff, e2diff);
70  if ((l_iter->prev->e->v1 == l_iter->prev->v) ^ (l_iter->e->v1 == l_iter->v)) {
71  dotprod = -dotprod;
72  }
73  const float fac = saacos(-dotprod);
74  /* Shouldn't happen as normalizing edge-vectors cause degenerate values to be zeroed out. */
75  BLI_assert(!isnan(fac));
76  madd_v3_v3fl(v_no, f_no, fac);
77 }
78 
80 {
81  /* Note on redundant unit-length edge-vector calculation:
82  *
83  * This functions calculates unit-length edge-vector for every loop edge
84  * in practice this means 2x `sqrt` calls per face-corner connected to each vertex.
85  *
86  * Previously (2.9x and older), the edge vectors were calculated and stored for reuse.
87  * However the overhead of did not perform well (~16% slower - single & multi-threaded)
88  * when compared with calculating the values as they are needed.
89  *
90  * For simple grid topologies this function calculates the edge-vectors 4x times.
91  * There is some room for improved performance by storing the edge-vectors for reuse locally
92  * in this function, reducing the number of redundant `sqrtf` in half (2x instead of 4x).
93  * so face loops that share an edge would not calculate it multiple times.
94  * From my tests the performance improvements are so small they're difficult to measure,
95  * the time saved removing `sqrtf` calls is lost on storing and looking up the information,
96  * even in the case of `BLI_smallhash.h` & small inline lookup tables.
97  *
98  * Further, local data structures would need to support cases where
99  * stack memory isn't sufficient - adding additional complexity for corner-cases
100  * (a vertex that has thousands of connected edges for example).
101  * Unless there are important use-cases that benefit from edge-vector caching,
102  * keep this simple and calculate ~4x as many edge-vectors.
103  *
104  * In conclusion, the cost of caching & looking up edge-vectors both globally or per-vertex
105  * doesn't save enough time to make it worthwhile.
106  * - Campbell. */
107 
108  float *v_no = v->no;
109  zero_v3(v_no);
110 
111  BMEdge *e_first = v->e;
112  if (e_first != NULL) {
113  float e1diff[3], e2diff[3];
114  BMEdge *e_iter = e_first;
115  do {
116  BMLoop *l_first = e_iter->l;
117  if (l_first != NULL) {
118  sub_v3_v3v3(e2diff, e_iter->v1->co, e_iter->v2->co);
119  normalize_v3(e2diff);
120 
121  BMLoop *l_iter = l_first;
122  do {
123  if (l_iter->v == v) {
124  BMEdge *e_prev = l_iter->prev->e;
125  sub_v3_v3v3(e1diff, e_prev->v1->co, e_prev->v2->co);
126  normalize_v3(e1diff);
127 
128  bm_vert_calc_normals_accum_loop(l_iter, e1diff, e2diff, l_iter->f->no, v_no);
129  }
130  } while ((l_iter = l_iter->radial_next) != l_first);
131  }
132  } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
133 
134  if (LIKELY(normalize_v3(v_no) != 0.0f)) {
135  return;
136  }
137  }
138  /* Fallback normal. */
139  normalize_v3_v3(v_no, v->co);
140 }
141 
142 static void bm_vert_calc_normals_cb(void *UNUSED(userdata),
143  MempoolIterData *mp_v,
144  const TaskParallelTLS *__restrict UNUSED(tls))
145 {
146  BMVert *v = (BMVert *)mp_v;
148 }
149 
151 {
152  /* See #bm_vert_calc_normals_impl note on performance. */
153  float *v_no = data->vnos[BM_elem_index_get(v)];
154  zero_v3(v_no);
155 
156  /* Loop over edges. */
157  BMEdge *e_first = v->e;
158  if (e_first != NULL) {
159  float e1diff[3], e2diff[3];
160  BMEdge *e_iter = e_first;
161  do {
162  BMLoop *l_first = e_iter->l;
163  if (l_first != NULL) {
164  sub_v3_v3v3(e2diff,
165  data->vcos[BM_elem_index_get(e_iter->v1)],
166  data->vcos[BM_elem_index_get(e_iter->v2)]);
167  normalize_v3(e2diff);
168 
169  BMLoop *l_iter = l_first;
170  do {
171  if (l_iter->v == v) {
172  BMEdge *e_prev = l_iter->prev->e;
173  sub_v3_v3v3(e1diff,
174  data->vcos[BM_elem_index_get(e_prev->v1)],
175  data->vcos[BM_elem_index_get(e_prev->v2)]);
176  normalize_v3(e1diff);
177 
179  l_iter, e1diff, e2diff, data->fnos[BM_elem_index_get(l_iter->f)], v_no);
180  }
181  } while ((l_iter = l_iter->radial_next) != l_first);
182  }
183  } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
184 
185  if (LIKELY(normalize_v3(v_no) != 0.0f)) {
186  return;
187  }
188  }
189  /* Fallback normal. */
190  normalize_v3_v3(v_no, data->vcos[BM_elem_index_get(v)]);
191 }
192 
193 static void bm_vert_calc_normals_with_coords_cb(void *userdata,
194  MempoolIterData *mp_v,
195  const TaskParallelTLS *__restrict UNUSED(tls))
196 {
198  BMVert *v = (BMVert *)mp_v;
200 }
201 
203  const float (*fnos)[3],
204  const float (*vcos)[3],
205  float (*vnos)[3])
206 {
207  BM_mesh_elem_index_ensure(bm, BM_FACE | ((vnos || vcos) ? BM_VERT : 0));
208 
209  TaskParallelSettings settings;
211  settings.use_threading = bm->totvert >= BM_OMP_LIMIT;
212 
213  if (vcos == NULL) {
214  BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_cb, NULL, &settings);
215  }
216  else {
217  BLI_assert(!ELEM(NULL, fnos, vnos));
219  .fnos = fnos,
220  .vcos = vcos,
221  .vnos = vnos,
222  };
223  BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_with_coords_cb, &data, &settings);
224  }
225 }
226 
227 static void bm_face_calc_normals_cb(void *UNUSED(userdata),
228  MempoolIterData *mp_f,
229  const TaskParallelTLS *__restrict UNUSED(tls))
230 {
231  BMFace *f = (BMFace *)mp_f;
232 
233  BM_face_calc_normal(f, f->no);
234 }
235 
237 {
238  if (params->face_normals) {
239  /* Calculate all face normals. */
240  TaskParallelSettings settings;
242  settings.use_threading = bm->totedge >= BM_OMP_LIMIT;
243 
244  BM_iter_parallel(bm, BM_FACES_OF_MESH, bm_face_calc_normals_cb, NULL, &settings);
245  }
246 
247  /* Add weighted face normals to vertices, and normalize vert normals. */
249 }
250 
252 {
254  &(const struct BMeshNormalsUpdate_Params){
255  .face_normals = true,
256  });
257 }
258 
261 /* -------------------------------------------------------------------- */
266  void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
267 {
268  BMFace *f = ((BMFace **)userdata)[iter];
269  BM_face_calc_normal(f, f->no);
270 }
271 
273  void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
274 {
275  BMVert *v = ((BMVert **)userdata)[iter];
277 }
278 
280  const BMPartialUpdate *bmpinfo,
281  const struct BMeshNormalsUpdate_Params *params)
282 {
283  BLI_assert(bmpinfo->params.do_normals);
284  /* While harmless, exit early if there is nothing to do. */
285  if (UNLIKELY((bmpinfo->verts_len == 0) && (bmpinfo->faces_len == 0))) {
286  return;
287  }
288 
289  BMVert **verts = bmpinfo->verts;
290  BMFace **faces = bmpinfo->faces;
291  const int verts_len = bmpinfo->verts_len;
292  const int faces_len = bmpinfo->faces_len;
293 
294  TaskParallelSettings settings;
296 
297  /* Faces. */
298  if (params->face_normals) {
300  0, faces_len, faces, bm_partial_faces_parallel_range_calc_normals_cb, &settings);
301  }
302 
303  /* Verts. */
305  0, verts_len, verts, bm_partial_verts_parallel_range_calc_normal_cb, &settings);
306 }
307 
309 {
311  bmpinfo,
312  &(const struct BMeshNormalsUpdate_Params){
313  .face_normals = true,
314  });
315 }
316 
319 /* -------------------------------------------------------------------- */
324  const float (*fnos)[3],
325  const float (*vcos)[3],
326  float (*vnos)[3])
327 {
328  /* Add weighted face normals to vertices, and normalize vert normals. */
329  bm_mesh_verts_calc_normals(bm, fnos, vcos, vnos);
330 }
331 
334 /* -------------------------------------------------------------------- */
338 void BM_normals_loops_edges_tag(BMesh *bm, const bool do_edges)
339 {
340  BMFace *f;
341  BMEdge *e;
342  BMIter fiter, eiter;
343  BMLoop *l_curr, *l_first;
344 
345  if (do_edges) {
346  int index_edge;
347  BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, index_edge) {
348  BMLoop *l_a, *l_b;
349 
350  BM_elem_index_set(e, index_edge); /* set_inline */
352  if (BM_edge_loop_pair(e, &l_a, &l_b)) {
353  if (BM_elem_flag_test(e, BM_ELEM_SMOOTH) && l_a->v != l_b->v) {
355  }
356  }
357  }
359  }
360 
361  int index_face, index_loop = 0;
362  BM_ITER_MESH_INDEX (f, &fiter, bm, BM_FACES_OF_MESH, index_face) {
363  BM_elem_index_set(f, index_face); /* set_inline */
364  l_curr = l_first = BM_FACE_FIRST_LOOP(f);
365  do {
366  BM_elem_index_set(l_curr, index_loop++); /* set_inline */
368  } while ((l_curr = l_curr->next) != l_first);
369  }
371 }
372 
377  const float (*fnos)[3],
378  float split_angle_cos,
379  const bool do_sharp_edges_tag)
380 {
381  BMIter eiter;
382  BMEdge *e;
383  int i;
384 
385  if (fnos) {
387  }
388 
389  if (do_sharp_edges_tag) {
390  BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, i) {
391  BM_elem_index_set(e, i); /* set_inline */
392  if (e->l != NULL) {
393  bm_edge_tag_from_smooth_and_set_sharp(fnos, e, split_angle_cos);
394  }
395  }
396  }
397  else {
398  BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, i) {
399  BM_elem_index_set(e, i); /* set_inline */
400  if (e->l != NULL) {
401  bm_edge_tag_from_smooth(fnos, e, split_angle_cos);
402  }
403  }
404  }
405 
407 }
408 
409 void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
410 {
411  if (split_angle >= (float)M_PI) {
412  /* Nothing to do! */
413  return;
414  }
415 
416  bm_mesh_edges_sharp_tag(bm, NULL, cosf(split_angle), true);
417 }
418 
421 /* -------------------------------------------------------------------- */
426 {
427  BMLoop *lfan_pivot_next = l_curr;
428  BMEdge *e_next = l_curr->e;
429 
430  BLI_assert(!BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG));
431  BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
432 
433  while (true) {
434  /* Much simpler than in sibling code with basic Mesh data! */
435  lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot_next, &e_next);
436 
437  if (!lfan_pivot_next || !BM_elem_flag_test(e_next, BM_ELEM_TAG)) {
438  /* Sharp loop/edge, so not a cyclic smooth fan... */
439  return false;
440  }
441  /* Smooth loop/edge... */
442  if (BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG)) {
443  if (lfan_pivot_next == l_curr) {
444  /* We walked around a whole cyclic smooth fan
445  * without finding any already-processed loop,
446  * means we can use initial l_curr/l_prev edge as start for this smooth fan. */
447  return true;
448  }
449  /* ... already checked in some previous looping, we can abort. */
450  return false;
451  }
452  /* ... we can skip it in future, and keep checking the smooth fan. */
453  BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
454  }
455 }
456 
470  const float (*vcos)[3],
471  const float (*fnos)[3],
472  const short (*clnors_data)[2],
473  const int cd_loop_clnors_offset,
474  const bool has_clnors,
475  /* Cache. */
476  BLI_Stack *edge_vectors,
477  /* Iterate. */
478  BMLoop *l_curr,
479  /* Result. */
480  float (*r_lnos)[3],
481  MLoopNorSpaceArray *r_lnors_spacearr)
482 {
484  BLI_assert((fnos == NULL) || ((bm->elem_index_dirty & BM_FACE) == 0));
485  BLI_assert((vcos == NULL) || ((bm->elem_index_dirty & BM_VERT) == 0));
487 
488  int handled = 0;
489 
490  /* Temp normal stack. */
491  BLI_SMALLSTACK_DECLARE(normal, float *);
492  /* Temp clnors stack. */
493  BLI_SMALLSTACK_DECLARE(clnors, short *);
494  /* Temp edge vectors stack, only used when computing lnor spacearr. */
495 
496  /* A smooth edge, we have to check for cyclic smooth fan case.
497  * If we find a new, never-processed cyclic smooth fan, we can do it now using that loop/edge
498  * as 'entry point', otherwise we can skip it. */
499 
500  /* NOTE: In theory, we could make bm_mesh_loop_check_cyclic_smooth_fan() store
501  * mlfan_pivot's in a stack, to avoid having to fan again around
502  * the vert during actual computation of clnor & clnorspace. However, this would complicate
503  * the code, add more memory usage, and
504  * BM_vert_step_fan_loop() is quite cheap in term of CPU cycles,
505  * so really think it's not worth it. */
506  if (BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
508  }
509  else if (!BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
510  !BM_elem_flag_test(l_curr->prev->e, BM_ELEM_TAG)) {
511  /* Simple case (both edges around that vertex are sharp in related polygon),
512  * this vertex just takes its poly normal.
513  */
514  const int l_curr_index = BM_elem_index_get(l_curr);
515  const float *no = fnos ? fnos[BM_elem_index_get(l_curr->f)] : l_curr->f->no;
516  copy_v3_v3(r_lnos[l_curr_index], no);
517 
518  /* If needed, generate this (simple!) lnor space. */
519  if (r_lnors_spacearr) {
520  float vec_curr[3], vec_prev[3];
521  MLoopNorSpace *lnor_space = BKE_lnor_space_create(r_lnors_spacearr);
522 
523  {
524  const BMVert *v_pivot = l_curr->v;
525  const float *co_pivot = vcos ? vcos[BM_elem_index_get(v_pivot)] : v_pivot->co;
526  const BMVert *v_1 = l_curr->next->v;
527  const float *co_1 = vcos ? vcos[BM_elem_index_get(v_1)] : v_1->co;
528  const BMVert *v_2 = l_curr->prev->v;
529  const float *co_2 = vcos ? vcos[BM_elem_index_get(v_2)] : v_2->co;
530 
531  BLI_assert(v_1 == BM_edge_other_vert(l_curr->e, v_pivot));
532  BLI_assert(v_2 == BM_edge_other_vert(l_curr->prev->e, v_pivot));
533 
534  sub_v3_v3v3(vec_curr, co_1, co_pivot);
535  normalize_v3(vec_curr);
536  sub_v3_v3v3(vec_prev, co_2, co_pivot);
537  normalize_v3(vec_prev);
538  }
539 
540  BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, NULL);
541  /* We know there is only one loop in this space,
542  * no need to create a linklist in this case... */
543  BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, l_curr, true);
544 
545  if (has_clnors) {
546  const short(*clnor)[2] = clnors_data ? &clnors_data[l_curr_index] :
547  (const void *)BM_ELEM_CD_GET_VOID_P(
548  l_curr, cd_loop_clnors_offset);
549  BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor, r_lnos[l_curr_index]);
550  }
551  }
552  handled = 1;
553  }
554  /* We *do not need* to check/tag loops as already computed!
555  * Due to the fact a loop only links to one of its two edges,
556  * a same fan *will never be walked more than once!*
557  * Since we consider edges having neighbor faces with inverted (flipped) normals as sharp,
558  * we are sure that no fan will be skipped, even only considering the case
559  * (sharp curr_edge, smooth prev_edge), and not the alternative
560  * (smooth curr_edge, sharp prev_edge).
561  * All this due/thanks to link between normals and loop ordering.
562  */
563  else {
564  /* We have to fan around current vertex, until we find the other non-smooth edge,
565  * and accumulate face normals into the vertex!
566  * Note in case this vertex has only one sharp edge,
567  * this is a waste because the normal is the same as the vertex normal,
568  * but I do not see any easy way to detect that (would need to count number of sharp edges
569  * per vertex, I doubt the additional memory usage would be worth it, especially as it
570  * should not be a common case in real-life meshes anyway).
571  */
572  BMVert *v_pivot = l_curr->v;
573  BMEdge *e_next;
574  const BMEdge *e_org = l_curr->e;
575  BMLoop *lfan_pivot, *lfan_pivot_next;
576  int lfan_pivot_index;
577  float lnor[3] = {0.0f, 0.0f, 0.0f};
578  float vec_curr[3], vec_next[3], vec_org[3];
579 
580  /* We validate clnors data on the fly - cheapest way to do! */
581  int clnors_avg[2] = {0, 0};
582  const short(*clnor_ref)[2] = NULL;
583  int clnors_count = 0;
584  bool clnors_invalid = false;
585 
586  const float *co_pivot = vcos ? vcos[BM_elem_index_get(v_pivot)] : v_pivot->co;
587 
588  MLoopNorSpace *lnor_space = r_lnors_spacearr ? BKE_lnor_space_create(r_lnors_spacearr) : NULL;
589 
590  BLI_assert((edge_vectors == NULL) || BLI_stack_is_empty(edge_vectors));
591 
592  lfan_pivot = l_curr;
593  lfan_pivot_index = BM_elem_index_get(lfan_pivot);
594  e_next = lfan_pivot->e; /* Current edge here, actually! */
595 
596  /* Only need to compute previous edge's vector once,
597  * then we can just reuse old current one! */
598  {
599  const BMVert *v_2 = lfan_pivot->next->v;
600  const float *co_2 = vcos ? vcos[BM_elem_index_get(v_2)] : v_2->co;
601 
602  BLI_assert(v_2 == BM_edge_other_vert(e_next, v_pivot));
603 
604  sub_v3_v3v3(vec_org, co_2, co_pivot);
605  normalize_v3(vec_org);
606  copy_v3_v3(vec_curr, vec_org);
607 
608  if (r_lnors_spacearr) {
609  BLI_stack_push(edge_vectors, vec_org);
610  }
611  }
612 
613  while (true) {
614  /* Much simpler than in sibling code with basic Mesh data! */
615  lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot, &e_next);
616  if (lfan_pivot_next) {
617  BLI_assert(lfan_pivot_next->v == v_pivot);
618  }
619  else {
620  /* next edge is non-manifold, we have to find it ourselves! */
621  e_next = (lfan_pivot->e == e_next) ? lfan_pivot->prev->e : lfan_pivot->e;
622  }
623 
624  /* Compute edge vector.
625  * NOTE: We could pre-compute those into an array, in the first iteration,
626  * instead of computing them twice (or more) here.
627  * However, time gained is not worth memory and time lost,
628  * given the fact that this code should not be called that much in real-life meshes.
629  */
630  {
631  const BMVert *v_2 = BM_edge_other_vert(e_next, v_pivot);
632  const float *co_2 = vcos ? vcos[BM_elem_index_get(v_2)] : v_2->co;
633 
634  sub_v3_v3v3(vec_next, co_2, co_pivot);
635  normalize_v3(vec_next);
636  }
637 
638  {
639  /* Code similar to accumulate_vertex_normals_poly_v3. */
640  /* Calculate angle between the two poly edges incident on this vertex. */
641  const BMFace *f = lfan_pivot->f;
642  const float fac = saacos(dot_v3v3(vec_next, vec_curr));
643  const float *no = fnos ? fnos[BM_elem_index_get(f)] : f->no;
644  /* Accumulate */
645  madd_v3_v3fl(lnor, no, fac);
646 
647  if (has_clnors) {
648  /* Accumulate all clnors, if they are not all equal we have to fix that! */
649  const short(*clnor)[2] = clnors_data ? &clnors_data[lfan_pivot_index] :
650  (const void *)BM_ELEM_CD_GET_VOID_P(
651  lfan_pivot, cd_loop_clnors_offset);
652  if (clnors_count) {
653  clnors_invalid |= ((*clnor_ref)[0] != (*clnor)[0] || (*clnor_ref)[1] != (*clnor)[1]);
654  }
655  else {
656  clnor_ref = clnor;
657  }
658  clnors_avg[0] += (*clnor)[0];
659  clnors_avg[1] += (*clnor)[1];
660  clnors_count++;
661  /* We store here a pointer to all custom lnors processed. */
662  BLI_SMALLSTACK_PUSH(clnors, (short *)*clnor);
663  }
664  }
665 
666  /* We store here a pointer to all loop-normals processed. */
667  BLI_SMALLSTACK_PUSH(normal, (float *)r_lnos[lfan_pivot_index]);
668 
669  if (r_lnors_spacearr) {
670  /* Assign current lnor space to current 'vertex' loop. */
671  BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, lfan_pivot_index, lfan_pivot, false);
672  if (e_next != e_org) {
673  /* We store here all edges-normalized vectors processed. */
674  BLI_stack_push(edge_vectors, vec_next);
675  }
676  }
677 
678  handled += 1;
679 
680  if (!BM_elem_flag_test(e_next, BM_ELEM_TAG) || (e_next == e_org)) {
681  /* Next edge is sharp, we have finished with this fan of faces around this vert! */
682  break;
683  }
684 
685  /* Copy next edge vector to current one. */
686  copy_v3_v3(vec_curr, vec_next);
687  /* Next pivot loop to current one. */
688  lfan_pivot = lfan_pivot_next;
689  lfan_pivot_index = BM_elem_index_get(lfan_pivot);
690  }
691 
692  {
693  float lnor_len = normalize_v3(lnor);
694 
695  /* If we are generating lnor spacearr, we can now define the one for this fan. */
696  if (r_lnors_spacearr) {
697  if (UNLIKELY(lnor_len == 0.0f)) {
698  /* Use vertex normal as fallback! */
699  copy_v3_v3(lnor, r_lnos[lfan_pivot_index]);
700  lnor_len = 1.0f;
701  }
702 
703  BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_next, edge_vectors);
704 
705  if (has_clnors) {
706  if (clnors_invalid) {
707  short *clnor;
708 
709  clnors_avg[0] /= clnors_count;
710  clnors_avg[1] /= clnors_count;
711  /* Fix/update all clnors of this fan with computed average value. */
712 
713  /* Prints continuously when merge custom normals, so commenting. */
714  // printf("Invalid clnors in this fan!\n");
715 
716  while ((clnor = BLI_SMALLSTACK_POP(clnors))) {
717  // print_v2("org clnor", clnor);
718  clnor[0] = (short)clnors_avg[0];
719  clnor[1] = (short)clnors_avg[1];
720  }
721  // print_v2("new clnors", clnors_avg);
722  }
723  else {
724  /* We still have to consume the stack! */
725  while (BLI_SMALLSTACK_POP(clnors)) {
726  /* pass */
727  }
728  }
729  BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor);
730  }
731  }
732 
733  /* In case we get a zero normal here, just use vertex normal already set! */
734  if (LIKELY(lnor_len != 0.0f)) {
735  /* Copy back the final computed normal into all related loop-normals. */
736  float *nor;
737 
738  while ((nor = BLI_SMALLSTACK_POP(normal))) {
739  copy_v3_v3(nor, lnor);
740  }
741  }
742  else {
743  /* We still have to consume the stack! */
744  while (BLI_SMALLSTACK_POP(normal)) {
745  /* pass */
746  }
747  }
748  }
749 
750  /* Tag related vertex as sharp, to avoid fanning around it again
751  * (in case it was a smooth one). */
752  if (r_lnors_spacearr) {
754  }
755  }
756  return handled;
757 }
758 
759 static int bm_loop_index_cmp(const void *a, const void *b)
760 {
763  return -1;
764  }
765  return 1;
766 }
767 
777  const BMLoop *l_a,
778  const BMLoop *l_b)
779 {
780  BLI_assert(l_a->radial_next == l_b);
781  return (
782  /* The face is manifold. */
783  (l_b->radial_next == l_a) &&
784  /* Faces have winding that faces the same way. */
785  (l_a->v != l_b->v) &&
786  /* The edge is smooth. */
788  /* Both faces are smooth. */
790 }
791 
792 static void bm_edge_tag_from_smooth(const float (*fnos)[3], BMEdge *e, const float split_angle_cos)
793 {
794  BLI_assert(e->l != NULL);
795  BMLoop *l_a = e->l, *l_b = l_a->radial_next;
796  bool is_smooth = false;
798  if (split_angle_cos != -1.0f) {
799  const float dot = (fnos == NULL) ? dot_v3v3(l_a->f->no, l_b->f->no) :
800  dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
801  fnos[BM_elem_index_get(l_b->f)]);
802  if (dot >= split_angle_cos) {
803  is_smooth = true;
804  }
805  }
806  else {
807  is_smooth = true;
808  }
809  }
810 
811  /* Perform `BM_elem_flag_set(e, BM_ELEM_TAG, is_smooth)`
812  * NOTE: This will be set by multiple threads however it will be set to the same value. */
813 
814  /* No need for atomics here as this is a single byte. */
815  char *hflag_p = &e->head.hflag;
816  if (is_smooth) {
817  *hflag_p = *hflag_p | BM_ELEM_TAG;
818  }
819  else {
820  *hflag_p = *hflag_p & ~BM_ELEM_TAG;
821  }
822 }
823 
831 static void bm_edge_tag_from_smooth_and_set_sharp(const float (*fnos)[3],
832  BMEdge *e,
833  const float split_angle_cos)
834 {
835  BLI_assert(e->l != NULL);
836  BMLoop *l_a = e->l, *l_b = l_a->radial_next;
837  bool is_smooth = false;
839  if (split_angle_cos != -1.0f) {
840  const float dot = (fnos == NULL) ? dot_v3v3(l_a->f->no, l_b->f->no) :
841  dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
842  fnos[BM_elem_index_get(l_b->f)]);
843  if (dot >= split_angle_cos) {
844  is_smooth = true;
845  }
846  else {
847  /* Note that we do not care about the other sharp-edge cases
848  * (sharp poly, non-manifold edge, etc.),
849  * only tag edge as sharp when it is due to angle threshold. */
851  }
852  }
853  else {
854  is_smooth = true;
855  }
856  }
857 
858  BM_elem_flag_set(e, BM_ELEM_TAG, is_smooth);
859 }
860 
867  const float (*vcos)[3],
868  const float (*fnos)[3],
869  float (*r_lnos)[3],
870  const short (*clnors_data)[2],
871  const int cd_loop_clnors_offset,
872  const bool do_rebuild,
873  const float split_angle_cos,
874  /* TLS */
875  MLoopNorSpaceArray *r_lnors_spacearr,
876  BLI_Stack *edge_vectors,
877  /* Iterate over. */
878  BMVert *v)
879 {
880  /* Respecting face order is necessary so the initial starting loop is consistent
881  * with looping over loops of all faces.
882  *
883  * Logically we could sort the loops by their index & loop over them
884  * however it's faster to use the lowest index of an un-ordered list
885  * since it's common that smooth vertices only ever need to pick one loop
886  * which then handles all the others.
887  *
888  * Sorting is only performed when multiple fans are found. */
889  const bool has_clnors = true;
890  LinkNode *loops_of_vert = NULL;
891  int loops_of_vert_count = 0;
892  /* When false the caller must have already tagged the edges. */
893  const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
894 
895  /* The loop with the lowest index. */
896  {
897  LinkNode *link_best;
898  uint index_best = UINT_MAX;
899  BMEdge *e_curr_iter = v->e;
900  do { /* Edges of vertex. */
901  BMLoop *l_curr = e_curr_iter->l;
902  if (l_curr == NULL) {
903  continue;
904  }
905 
906  if (do_edge_tag) {
907  bm_edge_tag_from_smooth(fnos, e_curr_iter, split_angle_cos);
908  }
909 
910  do { /* Radial loops. */
911  if (l_curr->v != v) {
912  continue;
913  }
914  if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
916  continue;
917  }
919  BLI_linklist_prepend_alloca(&loops_of_vert, l_curr);
920  loops_of_vert_count += 1;
921 
922  const uint index_test = (uint)BM_elem_index_get(l_curr);
923  if (index_best > index_test) {
924  index_best = index_test;
925  link_best = loops_of_vert;
926  }
927  } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
928  } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
929 
930  if (UNLIKELY(loops_of_vert == NULL)) {
931  return;
932  }
933 
934  /* Immediately pop the best element.
935  * The order doesn't matter, so swap the links as it's simpler than tracking
936  * reference to `link_best`. */
937  if (link_best != loops_of_vert) {
938  SWAP(void *, link_best->link, loops_of_vert->link);
939  }
940  }
941 
942  bool loops_of_vert_is_sorted = false;
943 
944  /* Keep track of the number of loops that have been assigned. */
945  int loops_of_vert_handled = 0;
946 
947  while (loops_of_vert != NULL) {
948  BMLoop *l_best = loops_of_vert->link;
949  loops_of_vert = loops_of_vert->next;
950 
951  BLI_assert(l_best->v == v);
952  loops_of_vert_handled += bm_mesh_loops_calc_normals_for_loop(bm,
953  vcos,
954  fnos,
955  clnors_data,
956  cd_loop_clnors_offset,
957  has_clnors,
958  edge_vectors,
959  l_best,
960  r_lnos,
961  r_lnors_spacearr);
962 
963  /* Check if an early exit is possible without an exhaustive inspection of every loop
964  * where 1 loop's fan extends out to all remaining loops.
965  * This is a common case for smooth vertices. */
966  BLI_assert(loops_of_vert_handled <= loops_of_vert_count);
967  if (loops_of_vert_handled == loops_of_vert_count) {
968  break;
969  }
970 
971  /* Note on sorting, in some cases it will be faster to scan for the lowest index each time.
972  * However in the worst case this is `O(N^2)`, so use a single sort call instead. */
973  if (!loops_of_vert_is_sorted) {
974  if (loops_of_vert && loops_of_vert->next) {
975  loops_of_vert = BLI_linklist_sort(loops_of_vert, bm_loop_index_cmp);
976  loops_of_vert_is_sorted = true;
977  }
978  }
979  }
980 }
981 
987  BMesh *bm,
988  const float (*vcos)[3],
989  const float (*fnos)[3],
990  float (*r_lnos)[3],
991  const bool do_rebuild,
992  const float split_angle_cos,
993  /* TLS */
994  MLoopNorSpaceArray *r_lnors_spacearr,
995  BLI_Stack *edge_vectors,
996  /* Iterate over. */
997  BMVert *v)
998 {
999  const bool has_clnors = false;
1000  const short(*clnors_data)[2] = NULL;
1001  /* When false the caller must have already tagged the edges. */
1002  const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
1003  const int cd_loop_clnors_offset = -1;
1004 
1005  BMEdge *e_curr_iter;
1006 
1007  /* Unfortunately a loop is needed just to clear loop-tags. */
1008  e_curr_iter = v->e;
1009  do { /* Edges of vertex. */
1010  BMLoop *l_curr = e_curr_iter->l;
1011  if (l_curr == NULL) {
1012  continue;
1013  }
1014 
1015  if (do_edge_tag) {
1016  bm_edge_tag_from_smooth(fnos, e_curr_iter, split_angle_cos);
1017  }
1018 
1019  do { /* Radial loops. */
1020  if (l_curr->v != v) {
1021  continue;
1022  }
1024  } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
1025  } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
1026 
1027  e_curr_iter = v->e;
1028  do { /* Edges of vertex. */
1029  BMLoop *l_curr = e_curr_iter->l;
1030  if (l_curr == NULL) {
1031  continue;
1032  }
1033  do { /* Radial loops. */
1034  if (l_curr->v != v) {
1035  continue;
1036  }
1037  if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
1039  continue;
1040  }
1042  vcos,
1043  fnos,
1044  clnors_data,
1045  cd_loop_clnors_offset,
1046  has_clnors,
1047  edge_vectors,
1048  l_curr,
1049  r_lnos,
1050  r_lnors_spacearr);
1051  } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
1052  } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
1053 }
1054 
1064  const float (*vcos)[3],
1065  const float (*fnos)[3],
1066  float (*r_lnos)[3],
1067  MLoopNorSpaceArray *r_lnors_spacearr,
1068  const short (*clnors_data)[2],
1069  const int cd_loop_clnors_offset,
1070  const bool do_rebuild,
1071  const float split_angle_cos)
1072 {
1073  BMIter fiter;
1074  BMFace *f_curr;
1075  const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1076  /* When false the caller must have already tagged the edges. */
1077  const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
1078 
1079  MLoopNorSpaceArray _lnors_spacearr = {NULL};
1080 
1081  BLI_Stack *edge_vectors = NULL;
1082 
1083  {
1084  char htype = 0;
1085  if (vcos) {
1086  htype |= BM_VERT;
1087  }
1088  /* Face/Loop indices are set inline below. */
1089  BM_mesh_elem_index_ensure(bm, htype);
1090  }
1091 
1092  if (!r_lnors_spacearr && has_clnors) {
1093  /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1094  r_lnors_spacearr = &_lnors_spacearr;
1095  }
1096  if (r_lnors_spacearr) {
1098  edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
1099  }
1100 
1101  /* Clear all loops' tags (means none are to be skipped for now). */
1102  int index_face, index_loop = 0;
1103  BM_ITER_MESH_INDEX (f_curr, &fiter, bm, BM_FACES_OF_MESH, index_face) {
1104  BMLoop *l_curr, *l_first;
1105 
1106  BM_elem_index_set(f_curr, index_face); /* set_inline */
1107 
1108  l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1109  do {
1110  BM_elem_index_set(l_curr, index_loop++); /* set_inline */
1112  } while ((l_curr = l_curr->next) != l_first);
1113  }
1115 
1116  /* Always tag edges based on winding & sharp edge flag
1117  * (even when the auto-smooth angle doesn't need to be calculated). */
1118  if (do_edge_tag) {
1119  bm_mesh_edges_sharp_tag(bm, fnos, has_clnors ? -1.0f : split_angle_cos, false);
1120  }
1121 
1122  /* We now know edges that can be smoothed (they are tagged),
1123  * and edges that will be hard (they aren't).
1124  * Now, time to generate the normals.
1125  */
1126  BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
1127  BMLoop *l_curr, *l_first;
1128 
1129  l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1130  do {
1131  if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
1133  continue;
1134  }
1136  vcos,
1137  fnos,
1138  clnors_data,
1139  cd_loop_clnors_offset,
1140  has_clnors,
1141  edge_vectors,
1142  l_curr,
1143  r_lnos,
1144  r_lnors_spacearr);
1145  } while ((l_curr = l_curr->next) != l_first);
1146  }
1147 
1148  if (r_lnors_spacearr) {
1149  BLI_stack_free(edge_vectors);
1150  if (r_lnors_spacearr == &_lnors_spacearr) {
1151  BKE_lnor_spacearr_free(r_lnors_spacearr);
1152  }
1153  }
1154 }
1155 
1157  /* Read-only data. */
1158  const float (*fnos)[3];
1159  const float (*vcos)[3];
1161  const short (*clnors_data)[2];
1163  const bool do_rebuild;
1164  const float split_angle_cos;
1165 
1166  /* Output. */
1167  float (*r_lnos)[3];
1170 
1173 
1178 
1179 static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict userdata,
1180  void *__restrict chunk)
1181 {
1182  const BMLoopsCalcNormalsWithCoordsData *data = userdata;
1183  BMLoopsCalcNormalsWithCoords_TLS *tls_data = chunk;
1184  if (data->r_lnors_spacearr) {
1185  tls_data->edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
1186  BKE_lnor_spacearr_tls_init(data->r_lnors_spacearr, &tls_data->lnors_spacearr_buf);
1187  tls_data->lnors_spacearr = &tls_data->lnors_spacearr_buf;
1188  }
1189  else {
1190  tls_data->lnors_spacearr = NULL;
1191  }
1192 }
1193 
1194 static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict userdata,
1195  void *__restrict UNUSED(chunk_join),
1196  void *__restrict chunk)
1197 {
1198  const BMLoopsCalcNormalsWithCoordsData *data = userdata;
1199  BMLoopsCalcNormalsWithCoords_TLS *tls_data = chunk;
1200 
1201  if (data->r_lnors_spacearr) {
1202  BKE_lnor_spacearr_tls_join(data->r_lnors_spacearr, tls_data->lnors_spacearr);
1203  }
1204 }
1205 
1206 static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict userdata,
1207  void *__restrict chunk)
1208 {
1209  const BMLoopsCalcNormalsWithCoordsData *data = userdata;
1210  BMLoopsCalcNormalsWithCoords_TLS *tls_data = chunk;
1211 
1212  if (data->r_lnors_spacearr) {
1213  BLI_stack_free(tls_data->edge_vectors);
1214  }
1215 }
1216 
1218  void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
1219 {
1220  BMVert *v = (BMVert *)mp_v;
1221  if (v->e == NULL) {
1222  return;
1223  }
1225  BMLoopsCalcNormalsWithCoords_TLS *tls_data = tls->userdata_chunk;
1227  data->vcos,
1228  data->fnos,
1229  data->r_lnos,
1230 
1231  data->clnors_data,
1232  data->cd_loop_clnors_offset,
1233  data->do_rebuild,
1234  data->split_angle_cos,
1235  /* Thread local. */
1236  tls_data->lnors_spacearr,
1237  tls_data->edge_vectors,
1238  /* Iterate over. */
1239  v);
1240 }
1241 
1243  void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
1244 {
1245  BMVert *v = (BMVert *)mp_v;
1246  if (v->e == NULL) {
1247  return;
1248  }
1250  BMLoopsCalcNormalsWithCoords_TLS *tls_data = tls->userdata_chunk;
1252  data->vcos,
1253  data->fnos,
1254  data->r_lnos,
1255 
1256  data->do_rebuild,
1257  data->split_angle_cos,
1258  /* Thread local. */
1259  tls_data->lnors_spacearr,
1260  tls_data->edge_vectors,
1261  /* Iterate over. */
1262  v);
1263 }
1264 
1266  const float (*vcos)[3],
1267  const float (*fnos)[3],
1268  float (*r_lnos)[3],
1269  MLoopNorSpaceArray *r_lnors_spacearr,
1270  const short (*clnors_data)[2],
1271  const int cd_loop_clnors_offset,
1272  const bool do_rebuild,
1273  const float split_angle_cos)
1274 {
1275  const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1276  MLoopNorSpaceArray _lnors_spacearr = {NULL};
1277 
1278  {
1279  char htype = BM_LOOP;
1280  if (vcos) {
1281  htype |= BM_VERT;
1282  }
1283  if (fnos) {
1284  htype |= BM_FACE;
1285  }
1286  /* Face/Loop indices are set inline below. */
1287  BM_mesh_elem_index_ensure(bm, htype);
1288  }
1289 
1290  if (!r_lnors_spacearr && has_clnors) {
1291  /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1292  r_lnors_spacearr = &_lnors_spacearr;
1293  }
1294  if (r_lnors_spacearr) {
1296  }
1297 
1298  /* We now know edges that can be smoothed (they are tagged),
1299  * and edges that will be hard (they aren't).
1300  * Now, time to generate the normals.
1301  */
1302 
1303  TaskParallelSettings settings;
1305 
1307 
1308  settings.userdata_chunk = &tls;
1309  settings.userdata_chunk_size = sizeof(tls);
1310 
1314 
1316  .bm = bm,
1317  .vcos = vcos,
1318  .fnos = fnos,
1319  .r_lnos = r_lnos,
1320  .r_lnors_spacearr = r_lnors_spacearr,
1321  .clnors_data = clnors_data,
1322  .cd_loop_clnors_offset = cd_loop_clnors_offset,
1323  .do_rebuild = do_rebuild,
1324  .split_angle_cos = split_angle_cos,
1325  };
1326 
1327  BM_iter_parallel(bm,
1331  &data,
1332  &settings);
1333 
1334  if (r_lnors_spacearr) {
1335  if (r_lnors_spacearr == &_lnors_spacearr) {
1336  BKE_lnor_spacearr_free(r_lnors_spacearr);
1337  }
1338  }
1339 }
1340 
1342  const float (*vcos)[3],
1343  const float (*fnos)[3],
1344  float (*r_lnos)[3],
1345  MLoopNorSpaceArray *r_lnors_spacearr,
1346  const short (*clnors_data)[2],
1347  const int cd_loop_clnors_offset,
1348  const bool do_rebuild,
1349  const float split_angle_cos)
1350 {
1351  if (bm->totloop < BM_OMP_LIMIT) {
1353  vcos,
1354  fnos,
1355  r_lnos,
1356  r_lnors_spacearr,
1357  clnors_data,
1358  cd_loop_clnors_offset,
1359  do_rebuild,
1360  split_angle_cos);
1361  }
1362  else {
1364  vcos,
1365  fnos,
1366  r_lnos,
1367  r_lnors_spacearr,
1368  clnors_data,
1369  cd_loop_clnors_offset,
1370  do_rebuild,
1371  split_angle_cos);
1372  }
1373 }
1374 
1375 /* This threshold is a bit touchy (usual float precision issue), this value seems OK. */
1376 #define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f)
1377 
1383  MLoopNorSpaceArray *lnors_spacearr,
1384  const float (*new_lnors)[3])
1385 {
1386  BLI_bitmap *done_loops = BLI_BITMAP_NEW((size_t)bm->totloop, __func__);
1387  bool changed = false;
1388 
1389  BLI_assert(lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
1390 
1391  for (int i = 0; i < bm->totloop; i++) {
1392  if (!lnors_spacearr->lspacearr[i]) {
1393  /* This should not happen in theory, but in some rare case (probably ugly geometry)
1394  * we can get some NULL loopspacearr at this point. :/
1395  * Maybe we should set those loops' edges as sharp?
1396  */
1397  BLI_BITMAP_ENABLE(done_loops, i);
1398  if (G.debug & G_DEBUG) {
1399  printf("WARNING! Getting invalid NULL loop space for loop %d!\n", i);
1400  }
1401  continue;
1402  }
1403 
1404  if (!BLI_BITMAP_TEST(done_loops, i)) {
1405  /* Notes:
1406  * * In case of mono-loop smooth fan, we have nothing to do.
1407  * * Loops in this linklist are ordered (in reversed order compared to how they were
1408  * discovered by BKE_mesh_normals_loop_split(), but this is not a problem).
1409  * Which means if we find a mismatching clnor,
1410  * we know all remaining loops will have to be in a new, different smooth fan/lnor space.
1411  * * In smooth fan case, we compare each clnor against a ref one,
1412  * to avoid small differences adding up into a real big one in the end!
1413  */
1414  if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1415  BLI_BITMAP_ENABLE(done_loops, i);
1416  continue;
1417  }
1418 
1419  LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1420  BMLoop *prev_ml = NULL;
1421  const float *org_nor = NULL;
1422 
1423  while (loops) {
1424  BMLoop *ml = loops->link;
1425  const int lidx = BM_elem_index_get(ml);
1426  const float *nor = new_lnors[lidx];
1427 
1428  if (!org_nor) {
1429  org_nor = nor;
1430  }
1431  else if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1432  /* Current normal differs too much from org one, we have to tag the edge between
1433  * previous loop's face and current's one as sharp.
1434  * We know those two loops do not point to the same edge,
1435  * since we do not allow reversed winding in a same smooth fan.
1436  */
1437  BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1438 
1440  changed = true;
1441 
1442  org_nor = nor;
1443  }
1444 
1445  prev_ml = ml;
1446  loops = loops->next;
1447  BLI_BITMAP_ENABLE(done_loops, lidx);
1448  }
1449 
1450  /* We also have to check between last and first loops,
1451  * otherwise we may miss some sharp edges here!
1452  * This is just a simplified version of above while loop.
1453  * See T45984. */
1454  loops = lnors_spacearr->lspacearr[i]->loops;
1455  if (loops && org_nor) {
1456  BMLoop *ml = loops->link;
1457  const int lidx = BM_elem_index_get(ml);
1458  const float *nor = new_lnors[lidx];
1459 
1460  if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1461  BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1462 
1464  changed = true;
1465  }
1466  }
1467  }
1468  }
1469 
1470  MEM_freeN(done_loops);
1471  return changed;
1472 }
1473 
1479  MLoopNorSpaceArray *lnors_spacearr,
1480  short (*r_clnors_data)[2],
1481  const int cd_loop_clnors_offset,
1482  const float (*new_lnors)[3])
1483 {
1484  BLI_bitmap *done_loops = BLI_BITMAP_NEW((size_t)bm->totloop, __func__);
1485 
1486  BLI_SMALLSTACK_DECLARE(clnors_data, short *);
1487 
1488  BLI_assert(lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
1489 
1490  for (int i = 0; i < bm->totloop; i++) {
1491  if (!lnors_spacearr->lspacearr[i]) {
1492  BLI_BITMAP_ENABLE(done_loops, i);
1493  if (G.debug & G_DEBUG) {
1494  printf("WARNING! Still getting invalid NULL loop space in second loop for loop %d!\n", i);
1495  }
1496  continue;
1497  }
1498 
1499  if (!BLI_BITMAP_TEST(done_loops, i)) {
1500  /* Note we accumulate and average all custom normals in current smooth fan,
1501  * to avoid getting different clnors data (tiny differences in plain custom normals can
1502  * give rather huge differences in computed 2D factors).
1503  */
1504  LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1505 
1506  if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1507  BMLoop *ml = (BMLoop *)loops;
1508  const int lidx = BM_elem_index_get(ml);
1509 
1510  BLI_assert(lidx == i);
1511 
1512  const float *nor = new_lnors[lidx];
1513  short *clnor = r_clnors_data ? &r_clnors_data[lidx] :
1514  BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset);
1515 
1516  BKE_lnor_space_custom_normal_to_data(lnors_spacearr->lspacearr[i], nor, clnor);
1517  BLI_BITMAP_ENABLE(done_loops, i);
1518  }
1519  else {
1520  int avg_nor_count = 0;
1521  float avg_nor[3];
1522  short clnor_data_tmp[2], *clnor_data;
1523 
1524  zero_v3(avg_nor);
1525 
1526  while (loops) {
1527  BMLoop *ml = loops->link;
1528  const int lidx = BM_elem_index_get(ml);
1529  const float *nor = new_lnors[lidx];
1530  short *clnor = r_clnors_data ? &r_clnors_data[lidx] :
1531  BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset);
1532 
1533  avg_nor_count++;
1534  add_v3_v3(avg_nor, nor);
1535  BLI_SMALLSTACK_PUSH(clnors_data, clnor);
1536 
1537  loops = loops->next;
1538  BLI_BITMAP_ENABLE(done_loops, lidx);
1539  }
1540 
1541  mul_v3_fl(avg_nor, 1.0f / (float)avg_nor_count);
1543  lnors_spacearr->lspacearr[i], avg_nor, clnor_data_tmp);
1544 
1545  while ((clnor_data = BLI_SMALLSTACK_POP(clnors_data))) {
1546  clnor_data[0] = clnor_data_tmp[0];
1547  clnor_data[1] = clnor_data_tmp[1];
1548  }
1549  }
1550  }
1551  }
1552 
1553  MEM_freeN(done_loops);
1554 }
1555 
1564  const float (*vcos)[3],
1565  const float (*fnos)[3],
1566  MLoopNorSpaceArray *r_lnors_spacearr,
1567  short (*r_clnors_data)[2],
1568  const int cd_loop_clnors_offset,
1569  float (*new_lnors)[3],
1570  const int cd_new_lnors_offset,
1571  bool do_split_fans)
1572 {
1573  BMFace *f;
1574  BMLoop *l;
1575  BMIter liter, fiter;
1576  float(*cur_lnors)[3] = MEM_mallocN(sizeof(*cur_lnors) * bm->totloop, __func__);
1577 
1578  BKE_lnor_spacearr_clear(r_lnors_spacearr);
1579 
1580  /* Tag smooth edges and set lnos from vnos when they might be completely smooth...
1581  * When using custom loop normals, disable the angle feature! */
1582  bm_mesh_edges_sharp_tag(bm, fnos, -1.0f, false);
1583 
1584  /* Finish computing lnos by accumulating face normals
1585  * in each fan of faces defined by sharp edges. */
1587  vcos,
1588  fnos,
1589  cur_lnors,
1590  r_lnors_spacearr,
1591  r_clnors_data,
1592  cd_loop_clnors_offset,
1593  false,
1595 
1596  /* Extract new normals from the data layer if necessary. */
1597  float(*custom_lnors)[3] = new_lnors;
1598 
1599  if (new_lnors == NULL) {
1600  custom_lnors = MEM_mallocN(sizeof(*new_lnors) * bm->totloop, __func__);
1601 
1602  BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1603  BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1604  const float *normal = BM_ELEM_CD_GET_VOID_P(l, cd_new_lnors_offset);
1605  copy_v3_v3(custom_lnors[BM_elem_index_get(l)], normal);
1606  }
1607  }
1608  }
1609 
1610  /* Validate the new normals. */
1611  for (int i = 0; i < bm->totloop; i++) {
1612  if (is_zero_v3(custom_lnors[i])) {
1613  copy_v3_v3(custom_lnors[i], cur_lnors[i]);
1614  }
1615  else {
1616  normalize_v3(custom_lnors[i]);
1617  }
1618  }
1619 
1620  /* Now, check each current smooth fan (one lnor space per smooth fan!),
1621  * and if all its matching custom lnors are not equal, add sharp edges as needed. */
1622  if (do_split_fans && bm_mesh_loops_split_lnor_fans(bm, r_lnors_spacearr, custom_lnors)) {
1623  /* If any sharp edges were added, run bm_mesh_loops_calc_normals() again to get lnor
1624  * spacearr/smooth fans matching the given custom lnors. */
1625  BKE_lnor_spacearr_clear(r_lnors_spacearr);
1626 
1628  vcos,
1629  fnos,
1630  cur_lnors,
1631  r_lnors_spacearr,
1632  r_clnors_data,
1633  cd_loop_clnors_offset,
1634  false,
1636  }
1637 
1638  /* And we just have to convert plain object-space custom normals to our
1639  * lnor space-encoded ones. */
1641  bm, r_lnors_spacearr, r_clnors_data, cd_loop_clnors_offset, custom_lnors);
1642 
1643  MEM_freeN(cur_lnors);
1644 
1645  if (custom_lnors != new_lnors) {
1646  MEM_freeN(custom_lnors);
1647  }
1648 }
1649 
1651  const float (*vnos)[3],
1652  const float (*fnos)[3],
1653  float (*r_lnos)[3])
1654 {
1655  BMIter fiter;
1656  BMFace *f_curr;
1657 
1658  {
1659  char htype = BM_LOOP;
1660  if (vnos) {
1661  htype |= BM_VERT;
1662  }
1663  if (fnos) {
1664  htype |= BM_FACE;
1665  }
1666  BM_mesh_elem_index_ensure(bm, htype);
1667  }
1668 
1669  BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
1670  BMLoop *l_curr, *l_first;
1671  const bool is_face_flat = !BM_elem_flag_test(f_curr, BM_ELEM_SMOOTH);
1672 
1673  l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1674  do {
1675  const float *no = is_face_flat ? (fnos ? fnos[BM_elem_index_get(f_curr)] : f_curr->no) :
1676  (vnos ? vnos[BM_elem_index_get(l_curr->v)] : l_curr->v->no);
1677  copy_v3_v3(r_lnos[BM_elem_index_get(l_curr)], no);
1678 
1679  } while ((l_curr = l_curr->next) != l_first);
1680  }
1681 }
1682 
1684  const float (*vcos)[3],
1685  const float (*vnos)[3],
1686  const float (*fnos)[3],
1687  const bool use_split_normals,
1688  const float split_angle,
1689  float (*r_lnos)[3],
1690  MLoopNorSpaceArray *r_lnors_spacearr,
1691  short (*clnors_data)[2],
1692  const int cd_loop_clnors_offset,
1693  const bool do_rebuild)
1694 {
1695  const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1696 
1697  if (use_split_normals) {
1699  vcos,
1700  fnos,
1701  r_lnos,
1702  r_lnors_spacearr,
1703  clnors_data,
1704  cd_loop_clnors_offset,
1705  do_rebuild,
1706  has_clnors ? -1.0f : cosf(split_angle));
1707  }
1708  else {
1709  BLI_assert(!r_lnors_spacearr);
1710  bm_mesh_loops_calc_normals_no_autosmooth(bm, vnos, fnos, r_lnos);
1711  }
1712 }
1713 
1716 /* -------------------------------------------------------------------- */
1720 void BM_lnorspacearr_store(BMesh *bm, float (*r_lnors)[3])
1721 {
1723 
1726  }
1727 
1728  int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1729 
1731  NULL,
1732  NULL,
1733  NULL,
1734  true,
1735  M_PI,
1736  r_lnors,
1737  bm->lnor_spacearr,
1738  NULL,
1739  cd_loop_clnors_offset,
1740  false);
1742 }
1743 
1744 #define CLEAR_SPACEARRAY_THRESHOLD(x) ((x) / 2)
1745 
1746 void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
1747 {
1749  return;
1750  }
1751  if (do_invalidate_all || bm->totvertsel > CLEAR_SPACEARRAY_THRESHOLD(bm->totvert)) {
1753  return;
1754  }
1755  if (bm->lnor_spacearr == NULL) {
1757  return;
1758  }
1759 
1760  BMVert *v;
1761  BMLoop *l;
1762  BMIter viter, liter;
1763  /* NOTE: we could use temp tag of BMItem for that,
1764  * but probably better not use it in such a low-level func?
1765  * --mont29 */
1766  BLI_bitmap *done_verts = BLI_BITMAP_NEW(bm->totvert, __func__);
1767 
1769 
1770  /* When we affect a given vertex, we may affect following smooth fans:
1771  * - all smooth fans of said vertex;
1772  * - all smooth fans of all immediate loop-neighbors vertices;
1773  * This can be simplified as 'all loops of selected vertices and their immediate neighbors'
1774  * need to be tagged for update.
1775  */
1776  BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
1778  BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1780 
1781  /* Note that we only handle unselected neighbor vertices here, main loop will take care of
1782  * selected ones. */
1783  if ((!BM_elem_flag_test(l->prev->v, BM_ELEM_SELECT)) &&
1784  !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->prev->v))) {
1785 
1786  BMLoop *l_prev;
1787  BMIter liter_prev;
1788  BM_ITER_ELEM (l_prev, &liter_prev, l->prev->v, BM_LOOPS_OF_VERT) {
1790  }
1791  BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_prev->v));
1792  }
1793 
1794  if ((!BM_elem_flag_test(l->next->v, BM_ELEM_SELECT)) &&
1795  !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->next->v))) {
1796 
1797  BMLoop *l_next;
1798  BMIter liter_next;
1799  BM_ITER_ELEM (l_next, &liter_next, l->next->v, BM_LOOPS_OF_VERT) {
1801  }
1802  BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_next->v));
1803  }
1804  }
1805 
1806  BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(v));
1807  }
1808  }
1809 
1810  MEM_freeN(done_verts);
1812 }
1813 
1814 void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
1815 {
1817 
1819  return;
1820  }
1821  BMFace *f;
1822  BMLoop *l;
1823  BMIter fiter, liter;
1824 
1825  float(*r_lnors)[3] = MEM_callocN(sizeof(*r_lnors) * bm->totloop, __func__);
1826  float(*oldnors)[3] = preserve_clnor ? MEM_mallocN(sizeof(*oldnors) * bm->totloop, __func__) :
1827  NULL;
1828 
1829  int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1830 
1832 
1833  if (preserve_clnor) {
1835 
1836  BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1837  BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1840  short(*clnor)[2] = BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset);
1841  int l_index = BM_elem_index_get(l);
1842 
1844  bm->lnor_spacearr->lspacearr[l_index], *clnor, oldnors[l_index]);
1845  }
1846  }
1847  }
1848  }
1849 
1852  }
1854  NULL,
1855  NULL,
1856  NULL,
1857  true,
1858  M_PI,
1859  r_lnors,
1860  bm->lnor_spacearr,
1861  NULL,
1862  cd_loop_clnors_offset,
1863  true);
1864  MEM_freeN(r_lnors);
1865 
1866  BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1867  BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1870  if (preserve_clnor) {
1871  short(*clnor)[2] = BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset);
1872  int l_index = BM_elem_index_get(l);
1874  bm->lnor_spacearr->lspacearr[l_index], oldnors[l_index], *clnor);
1875  }
1877  }
1878  }
1879  }
1880 
1881  MEM_SAFE_FREE(oldnors);
1883 
1884 #ifndef NDEBUG
1886 #endif
1887 }
1888 
1890 {
1891  if (bm->lnor_spacearr == NULL) {
1892  bm->lnor_spacearr = MEM_callocN(sizeof(*bm->lnor_spacearr), __func__);
1893  }
1894  if (bm->lnor_spacearr->lspacearr == NULL) {
1895  float(*lnors)[3] = MEM_callocN(sizeof(*lnors) * bm->totloop, __func__);
1896 
1897  BM_lnorspacearr_store(bm, lnors);
1898 
1899  MEM_freeN(lnors);
1900  }
1902  BM_lnorspace_rebuild(bm, false);
1903  }
1904 }
1905 
1908 /* -------------------------------------------------------------------- */
1919 #ifndef NDEBUG
1921 {
1923  bool clear = true;
1924 
1925  MLoopNorSpaceArray *temp = MEM_callocN(sizeof(*temp), __func__);
1926  temp->lspacearr = NULL;
1927 
1929 
1930  int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1931  float(*lnors)[3] = MEM_callocN(sizeof(*lnors) * bm->totloop, __func__);
1933  bm, NULL, NULL, NULL, true, M_PI, lnors, temp, NULL, cd_loop_clnors_offset, true);
1934 
1935  for (int i = 0; i < bm->totloop; i++) {
1936  int j = 0;
1937  j += compare_ff(
1938  temp->lspacearr[i]->ref_alpha, bm->lnor_spacearr->lspacearr[i]->ref_alpha, 1e-4f);
1939  j += compare_ff(
1940  temp->lspacearr[i]->ref_beta, bm->lnor_spacearr->lspacearr[i]->ref_beta, 1e-4f);
1941  j += compare_v3v3(
1942  temp->lspacearr[i]->vec_lnor, bm->lnor_spacearr->lspacearr[i]->vec_lnor, 1e-4f);
1943  j += compare_v3v3(
1944  temp->lspacearr[i]->vec_ortho, bm->lnor_spacearr->lspacearr[i]->vec_ortho, 1e-4f);
1945  j += compare_v3v3(
1946  temp->lspacearr[i]->vec_ref, bm->lnor_spacearr->lspacearr[i]->vec_ref, 1e-4f);
1947 
1948  if (j != 5) {
1949  clear = false;
1950  break;
1951  }
1952  }
1953  BKE_lnor_spacearr_free(temp);
1954  MEM_freeN(temp);
1955  MEM_freeN(lnors);
1956  BLI_assert(clear);
1957 
1959 }
1960 #endif
1961 
1963  BLI_bitmap *loops,
1964  MLoopNorSpaceArray *lnor_spacearr,
1965  int *totloopsel,
1966  const bool do_all_loops_of_vert)
1967 {
1968  if (l != NULL) {
1969  const int l_idx = BM_elem_index_get(l);
1970 
1971  if (!BLI_BITMAP_TEST(loops, l_idx)) {
1972  /* If vert and face selected share a loop, mark it for editing. */
1973  BLI_BITMAP_ENABLE(loops, l_idx);
1974  (*totloopsel)++;
1975 
1976  if (do_all_loops_of_vert) {
1977  /* If required, also mark all loops shared by that vertex.
1978  * This is needed when loop spaces may change
1979  * (i.e. when some faces or edges might change of smooth/sharp status). */
1980  BMIter liter;
1981  BMLoop *lfan;
1982  BM_ITER_ELEM (lfan, &liter, l->v, BM_LOOPS_OF_VERT) {
1983  const int lfan_idx = BM_elem_index_get(lfan);
1984  if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
1985  BLI_BITMAP_ENABLE(loops, lfan_idx);
1986  (*totloopsel)++;
1987  }
1988  }
1989  }
1990  else {
1991  /* Mark all loops in same loop normal space (aka smooth fan). */
1992  if ((lnor_spacearr->lspacearr[l_idx]->flags & MLNOR_SPACE_IS_SINGLE) == 0) {
1993  for (LinkNode *node = lnor_spacearr->lspacearr[l_idx]->loops; node; node = node->next) {
1994  const int lfan_idx = BM_elem_index_get((BMLoop *)node->link);
1995  if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
1996  BLI_BITMAP_ENABLE(loops, lfan_idx);
1997  (*totloopsel)++;
1998  }
1999  }
2000  }
2001  }
2002  }
2003  }
2004 }
2005 
2006 /* Mark the individual clnors to be edited, if multiple selection methods are used. */
2007 static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
2008 {
2009  BMEditSelection *ese, *ese_prev;
2010  int totloopsel = 0;
2011 
2012  const bool sel_verts = (bm->selectmode & SCE_SELECT_VERTEX) != 0;
2013  const bool sel_edges = (bm->selectmode & SCE_SELECT_EDGE) != 0;
2014  const bool sel_faces = (bm->selectmode & SCE_SELECT_FACE) != 0;
2015  const bool use_sel_face_history = sel_faces && (sel_edges || sel_verts);
2016 
2018 
2021 
2022  if (use_sel_face_history) {
2023  /* Using face history allows to select a single loop from a single face...
2024  * Note that this is O(n^2) piece of code,
2025  * but it is not designed to be used with huge selection sets,
2026  * rather with only a few items selected at most. */
2027  /* Goes from last selected to the first selected element. */
2028  for (ese = bm->selected.last; ese; ese = ese->prev) {
2029  if (ese->htype == BM_FACE) {
2030  /* If current face is selected,
2031  * then any verts to be edited must have been selected before it. */
2032  for (ese_prev = ese->prev; ese_prev; ese_prev = ese_prev->prev) {
2033  if (ese_prev->htype == BM_VERT) {
2035  BM_face_vert_share_loop((BMFace *)ese->ele, (BMVert *)ese_prev->ele),
2036  loops,
2037  bm->lnor_spacearr,
2038  &totloopsel,
2039  do_all_loops_of_vert);
2040  }
2041  else if (ese_prev->htype == BM_EDGE) {
2042  BMEdge *e = (BMEdge *)ese_prev->ele;
2044  loops,
2045  bm->lnor_spacearr,
2046  &totloopsel,
2047  do_all_loops_of_vert);
2048 
2050  loops,
2051  bm->lnor_spacearr,
2052  &totloopsel,
2053  do_all_loops_of_vert);
2054  }
2055  }
2056  }
2057  }
2058  }
2059  else {
2060  if (sel_faces) {
2061  /* Only select all loops of selected faces. */
2062  BMLoop *l;
2063  BMFace *f;
2064  BMIter liter, fiter;
2065  BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
2067  BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
2069  l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2070  }
2071  }
2072  }
2073  }
2074  if (sel_edges) {
2075  /* Only select all loops of selected edges. */
2076  BMLoop *l;
2077  BMEdge *e;
2078  BMIter liter, eiter;
2079  BM_ITER_MESH (e, &eiter, bm, BM_EDGES_OF_MESH) {
2081  BM_ITER_ELEM (l, &liter, e, BM_LOOPS_OF_EDGE) {
2083  l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2084  /* Loops actually 'have' two edges, or said otherwise, a selected edge actually selects
2085  * *two* loops in each of its faces. We have to find the other one too. */
2086  if (BM_vert_in_edge(e, l->next->v)) {
2088  l->next, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2089  }
2090  else {
2093  l->prev, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2094  }
2095  }
2096  }
2097  }
2098  }
2099  if (sel_verts) {
2100  /* Select all loops of selected verts. */
2101  BMLoop *l;
2102  BMVert *v;
2103  BMIter liter, viter;
2104  BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
2106  BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
2108  l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2109  }
2110  }
2111  }
2112  }
2113  }
2114 
2115  return totloopsel;
2116 }
2117 
2119  BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
2120 {
2123 
2124  const int l_index = BM_elem_index_get(l);
2125  short *clnors_data = BM_ELEM_CD_GET_VOID_P(l, offset);
2126 
2127  lnor_ed->loop_index = l_index;
2128  lnor_ed->loop = l;
2129 
2130  float custom_normal[3];
2132  bm->lnor_spacearr->lspacearr[l_index], clnors_data, custom_normal);
2133 
2134  lnor_ed->clnors_data = clnors_data;
2135  copy_v3_v3(lnor_ed->nloc, custom_normal);
2136  copy_v3_v3(lnor_ed->niloc, custom_normal);
2137 
2138  lnor_ed->loc = v->co;
2139 }
2140 
2142  const bool do_all_loops_of_vert)
2143 {
2144  BMLoop *l;
2145  BMVert *v;
2146  BMIter liter, viter;
2147 
2148  int totloopsel = 0;
2149 
2150  BLI_assert(bm->spacearr_dirty == 0);
2151 
2152  BMLoopNorEditDataArray *lnors_ed_arr = MEM_callocN(sizeof(*lnors_ed_arr), __func__);
2153  lnors_ed_arr->lidx_to_lnor_editdata = MEM_callocN(
2154  sizeof(*lnors_ed_arr->lidx_to_lnor_editdata) * bm->totloop, __func__);
2155 
2158  }
2159  const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
2160 
2162 
2163  BLI_bitmap *loops = BLI_BITMAP_NEW(bm->totloop, __func__);
2164 
2165  /* This function define loop normals to edit, based on selection modes and history. */
2166  totloopsel = bm_loop_normal_mark_indiv(bm, loops, do_all_loops_of_vert);
2167 
2168  if (totloopsel) {
2169  BMLoopNorEditData *lnor_ed = lnors_ed_arr->lnor_editdata = MEM_mallocN(
2170  sizeof(*lnor_ed) * totloopsel, __func__);
2171 
2172  BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
2173  BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
2174  if (BLI_BITMAP_TEST(loops, BM_elem_index_get(l))) {
2175  loop_normal_editdata_init(bm, lnor_ed, v, l, cd_custom_normal_offset);
2176  lnors_ed_arr->lidx_to_lnor_editdata[BM_elem_index_get(l)] = lnor_ed;
2177  lnor_ed++;
2178  }
2179  }
2180  }
2181  lnors_ed_arr->totloop = totloopsel;
2182  }
2183 
2184  MEM_freeN(loops);
2185  lnors_ed_arr->cd_custom_normal_offset = cd_custom_normal_offset;
2186  return lnors_ed_arr;
2187 }
2188 
2190 {
2191  MEM_SAFE_FREE(lnors_ed_arr->lnor_editdata);
2192  MEM_SAFE_FREE(lnors_ed_arr->lidx_to_lnor_editdata);
2193  MEM_freeN(lnors_ed_arr);
2194 }
2195 
2198 /* -------------------------------------------------------------------- */
2203 {
2204  BMFace *f;
2205  BMLoop *l;
2206  BMIter liter, fiter;
2207 
2209  return false;
2210  }
2211 
2213 
2214  /* Create a loop normal layer. */
2217 
2219  }
2220 
2221  const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
2222  const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
2223 
2224  int l_index = 0;
2225  BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
2226  BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
2227  const short *clnors_data = BM_ELEM_CD_GET_VOID_P(l, cd_custom_normal_offset);
2228  float *normal = BM_ELEM_CD_GET_VOID_P(l, cd_normal_offset);
2229 
2231  bm->lnor_spacearr->lspacearr[l_index], clnors_data, normal);
2232  l_index += 1;
2233  }
2234  }
2235 
2236  return true;
2237 }
2238 
2240 {
2243  return;
2244  }
2245 
2246  const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
2247  const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
2248 
2249  if (bm->lnor_spacearr == NULL) {
2250  bm->lnor_spacearr = MEM_callocN(sizeof(*bm->lnor_spacearr), __func__);
2251  }
2252 
2254  NULL,
2255  NULL,
2256  bm->lnor_spacearr,
2257  NULL,
2258  cd_custom_normal_offset,
2259  NULL,
2260  cd_normal_offset,
2261  add_sharp_edges);
2262 
2264 }
2265 
typedef float(TangentPoint)[2]
CustomData interface, see also DNA_customdata_types.h.
bool CustomData_has_layer(const struct CustomData *data, int type)
void CustomData_set_layer_flag(struct CustomData *data, int type, int flag)
Definition: customdata.cc:2626
int CustomData_get_offset(const struct CustomData *data, int type)
@ G_DEBUG
Definition: BKE_global.h:174
MLoopNorSpace * BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr)
@ MLNOR_SPACEARR_BMLOOP_PTR
Definition: BKE_mesh.h:572
void BKE_lnor_space_define(MLoopNorSpace *lnor_space, const float lnor[3], float vec_ref[3], float vec_other[3], struct BLI_Stack *edge_vectors)
void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, int numLoops, char data_type)
void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, int ml_index, void *bm_loop, bool is_single)
@ MLNOR_SPACE_IS_SINGLE
Definition: BKE_mesh.h:553
void BKE_lnor_spacearr_free(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_space_custom_data_to_normal(MLoopNorSpace *lnor_space, const short clnor_data[2], float r_custom_lnor[3])
void BKE_lnor_spacearr_tls_join(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
void BKE_lnor_spacearr_tls_init(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
void BKE_lnor_space_custom_normal_to_data(MLoopNorSpace *lnor_space, const float custom_lnor[3], short r_clnor_data[2])
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_BITMAP_NEW(_num, _alloc_string)
Definition: BLI_bitmap.h:40
#define BLI_BITMAP_TEST(_bitmap, _index)
Definition: BLI_bitmap.h:64
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition: BLI_bitmap.h:81
unsigned int BLI_bitmap
Definition: BLI_bitmap.h:16
#define BLI_INLINE
MINLINE float saacos(float fac)
#define M_PI
Definition: BLI_math_base.h:20
MINLINE int compare_ff(float a, float b, float max_diff)
MINLINE bool compare_v3v3(const float a[3], const float b[3], float limit) ATTR_WARN_UNUSED_RESULT
MINLINE void madd_v3_v3fl(float r[3], const float a[3], float f)
MINLINE float normalize_v3(float r[3])
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE bool is_zero_v3(const float a[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float normalize_v3_v3(float r[3], const float a[3])
MINLINE void zero_v3(float r[3])
MINLINE void add_v3_v3(float r[3], const float a[3])
void BLI_stack_push(BLI_Stack *stack, const void *src) ATTR_NONNULL()
Definition: stack.c:129
bool BLI_stack_is_empty(const BLI_Stack *stack) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: stack.c:247
void BLI_stack_free(BLI_Stack *stack) ATTR_NONNULL()
Definition: stack.c:94
#define BLI_stack_new(esize, descr)
unsigned int uint
Definition: BLI_sys_types.h:67
struct MempoolIterData MempoolIterData
Definition: BLI_task.h:272
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition: task_range.cc:94
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition: BLI_task.h:293
BLI_INLINE void BLI_parallel_mempool_settings_defaults(TaskParallelSettings *settings)
Definition: BLI_task.h:301
#define SWAP(type, a, b)
#define UNUSED_VARS_NDEBUG(...)
#define UNUSED(x)
#define UNLIKELY(x)
#define ELEM(...)
#define LIKELY(x)
@ CD_FLAG_TEMPORARY
@ CD_CUSTOMLOOPNORMAL
#define SCE_SELECT_FACE
#define SCE_SELECT_VERTEX
#define SCE_SELECT_EDGE
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
@ BM_SPACEARR_DIRTY_ALL
Definition: bmesh_class.h:416
@ BM_SPACEARR_DIRTY
Definition: bmesh_class.h:415
#define BM_DISK_EDGE_NEXT(e, v)
Definition: bmesh_class.h:625
#define BM_OMP_LIMIT
Definition: bmesh_class.h:656
#define BM_FACE_FIRST_LOOP(p)
Definition: bmesh_class.h:622
@ BM_LOOP
Definition: bmesh_class.h:385
@ BM_FACE
Definition: bmesh_class.h:386
@ BM_VERT
Definition: bmesh_class.h:383
@ BM_EDGE
Definition: bmesh_class.h:384
@ BM_ELEM_SELECT
Definition: bmesh_class.h:471
@ BM_ELEM_SMOOTH
Definition: bmesh_class.h:477
@ BM_ELEM_TAG
Definition: bmesh_class.h:484
#define BM_ELEM_CD_GET_VOID_P(ele, offset)
Definition: bmesh_class.h:541
#define BM_elem_index_get(ele)
Definition: bmesh_inline.h:110
#define BM_elem_flag_disable(ele, hflag)
Definition: bmesh_inline.h:15
#define BM_elem_flag_set(ele, hflag, val)
Definition: bmesh_inline.h:16
#define BM_elem_index_set(ele, index)
Definition: bmesh_inline.h:111
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:12
#define BM_elem_flag_enable(ele, hflag)
Definition: bmesh_inline.h:14
void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
Definition: bmesh_interp.c:839
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
#define BM_ITER_MESH_INDEX(ele, iter, bm, itype, indexvar)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_FACES_OF_MESH
@ BM_LOOPS_OF_VERT
@ BM_LOOPS_OF_EDGE
@ BM_LOOPS_OF_FACE
ATTR_WARN_UNUSED_RESULT BMesh * bm
void BM_mesh_elem_index_ensure(BMesh *bm, const char htype)
Definition: bmesh_mesh.cc:446
static void bm_mesh_loops_assign_normal_data(BMesh *bm, MLoopNorSpaceArray *lnors_spacearr, short(*r_clnors_data)[2], const int cd_loop_clnors_offset, const float(*new_lnors)[3])
void BM_mesh_normals_update_with_partial(BMesh *bm, const BMPartialUpdate *bmpinfo)
void BM_mesh_normals_update_with_partial_ex(BMesh *UNUSED(bm), const BMPartialUpdate *bmpinfo, const struct BMeshNormalsUpdate_Params *params)
static int bm_loop_index_cmp(const void *a, const void *b)
void BM_lnorspacearr_store(BMesh *bm, float(*r_lnors)[3])
static void bm_mesh_verts_calc_normals(BMesh *bm, const float(*fnos)[3], const float(*vcos)[3], float(*vnos)[3])
void BM_mesh_normals_update_ex(BMesh *bm, const struct BMeshNormalsUpdate_Params *params)
BMesh Compute Normals.
#define CLEAR_SPACEARRAY_THRESHOLD(x)
static void bm_vert_calc_normals_with_coords(BMVert *v, BMVertsCalcNormalsWithCoordsData *data)
static void bm_face_calc_normals_cb(void *UNUSED(userdata), MempoolIterData *mp_f, const TaskParallelTLS *__restrict UNUSED(tls))
static void bm_mesh_loops_calc_normals_for_vert_without_clnors(BMesh *bm, const float(*vcos)[3], const float(*fnos)[3], float(*r_lnos)[3], const bool do_rebuild, const float split_angle_cos, MLoopNorSpaceArray *r_lnors_spacearr, BLI_Stack *edge_vectors, BMVert *v)
void BM_lnorspace_update(BMesh *bm)
static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict userdata, void *__restrict chunk)
static void bm_partial_faces_parallel_range_calc_normals_cb(void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
static void bm_edge_tag_from_smooth(const float(*fnos)[3], BMEdge *e, const float split_angle_cos)
#define BM_LNORSPACE_UPDATE
void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
void BM_normals_loops_edges_tag(BMesh *bm, const bool do_edges)
struct BMVertsCalcNormalsWithCoordsData BMVertsCalcNormalsWithCoordsData
void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
bool BM_custom_loop_normals_to_vector_layer(BMesh *bm)
static bool bm_mesh_loops_split_lnor_fans(BMesh *bm, MLoopNorSpaceArray *lnors_spacearr, const float(*new_lnors)[3])
static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm, const float(*vcos)[3], const float(*fnos)[3], float(*r_lnos)[3], MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
#define EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS
static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm, const float(*vcos)[3], const float(*fnos)[3], const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool has_clnors, BLI_Stack *edge_vectors, BMLoop *l_curr, float(*r_lnos)[3], MLoopNorSpaceArray *r_lnors_spacearr)
static void bm_vert_calc_normals_cb(void *UNUSED(userdata), MempoolIterData *mp_v, const TaskParallelTLS *__restrict UNUSED(tls))
static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict userdata, void *__restrict chunk)
void BM_loops_calc_normal_vcos(BMesh *bm, const float(*vcos)[3], const float(*vnos)[3], const float(*fnos)[3], const bool use_split_normals, const float split_angle, float(*r_lnos)[3], MLoopNorSpaceArray *r_lnors_spacearr, short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild)
BMesh Compute Loop Normals from/to external data.
static void bm_loop_normal_mark_indiv_do_loop(BMLoop *l, BLI_bitmap *loops, MLoopNorSpaceArray *lnor_spacearr, int *totloopsel, const bool do_all_loops_of_vert)
void BM_mesh_normals_update(BMesh *bm)
#define LNOR_SPACE_TRIGO_THRESHOLD
void BM_lnorspace_err(BMesh *bm)
BMLoopNorEditDataArray * BM_loop_normal_editdata_array_init(BMesh *bm, const bool do_all_loops_of_vert)
BLI_INLINE bool bm_edge_is_smooth_no_angle_test(const BMEdge *e, const BMLoop *l_a, const BMLoop *l_b)
static void loop_normal_editdata_init(BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
void BM_loop_normal_editdata_array_free(BMLoopNorEditDataArray *lnors_ed_arr)
static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
static void bm_vert_calc_normals_impl(BMVert *v)
static void bm_mesh_loops_custom_normals_set(BMesh *bm, const float(*vcos)[3], const float(*fnos)[3], MLoopNorSpaceArray *r_lnors_spacearr, short(*r_clnors_data)[2], const int cd_loop_clnors_offset, float(*new_lnors)[3], const int cd_new_lnors_offset, bool do_split_fans)
static void bm_vert_calc_normals_with_coords_cb(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict UNUSED(tls))
static void bm_mesh_loops_calc_normals_for_vert_without_clnors_fn(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
BLI_INLINE void bm_vert_calc_normals_accum_loop(const BMLoop *l_iter, const float e1diff[3], const float e2diff[3], const float f_no[3], float v_no[3])
static void bm_mesh_loops_calc_normals_for_vert_with_clnors_fn(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
struct BMLoopsCalcNormalsWithCoords_TLS BMLoopsCalcNormalsWithCoords_TLS
bool BM_loop_check_cyclic_smooth_fan(BMLoop *l_curr)
static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm, const float(*vcos)[3], const float(*fnos)[3], float(*r_lnos)[3], const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos, MLoopNorSpaceArray *r_lnors_spacearr, BLI_Stack *edge_vectors, BMVert *v)
static void bm_mesh_loops_calc_normals__multi_threaded(BMesh *bm, const float(*vcos)[3], const float(*fnos)[3], float(*r_lnos)[3], MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
static void bm_partial_verts_parallel_range_calc_normal_cb(void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
struct BMLoopsCalcNormalsWithCoordsData BMLoopsCalcNormalsWithCoordsData
void BM_verts_calc_normal_vcos(BMesh *bm, const float(*fnos)[3], const float(*vcos)[3], float(*vnos)[3])
BMesh Compute Normals from/to external data.
static void bm_mesh_edges_sharp_tag(BMesh *bm, const float(*fnos)[3], float split_angle_cos, const bool do_sharp_edges_tag)
void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
void BM_custom_loop_normals_from_vector_layer(BMesh *bm, bool add_sharp_edges)
static void bm_edge_tag_from_smooth_and_set_sharp(const float(*fnos)[3], BMEdge *e, const float split_angle_cos)
static void bm_mesh_loops_calc_normals_no_autosmooth(BMesh *bm, const float(*vnos)[3], const float(*fnos)[3], float(*r_lnos)[3])
static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict userdata, void *__restrict UNUSED(chunk_join), void *__restrict chunk)
static void bm_mesh_loops_calc_normals(BMesh *bm, const float(*vcos)[3], const float(*fnos)[3], float(*r_lnos)[3], MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
float BM_face_calc_normal(const BMFace *f, float r_no[3])
BMESH UPDATE FACE NORMAL.
#define BM_ELEM_API_FLAG_DISABLE(element, f)
Definition: bmesh_private.h:71
#define BM_ELEM_API_FLAG_TEST(element, f)
Definition: bmesh_private.h:76
#define BM_ELEM_API_FLAG_ENABLE(element, f)
Definition: bmesh_private.h:66
bool BM_edge_loop_pair(BMEdge *e, BMLoop **r_la, BMLoop **r_lb)
Definition: bmesh_query.c:553
BMLoop * BM_vert_step_fan_loop(BMLoop *l, BMEdge **e_step)
Definition: bmesh_query.c:461
BMLoop * BM_face_vert_share_loop(BMFace *f, BMVert *v)
Return the Loop Shared by Face and Vertex.
Definition: bmesh_query.c:1100
BLI_INLINE BMVert * BM_edge_other_vert(BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
BLI_INLINE bool BM_vert_in_edge(const BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMLoop * l_b
ATTR_WARN_UNUSED_RESULT const BMVert * v
#define cosf(x)
Definition: cuda/compat.h:101
OperationNode * node
static float verts[][3]
uint nor
#define UINT_MAX
Definition: hash_md5.c:43
IconTextureDrawCall normal
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
static char faces[256]
#define G(x, y, z)
static void clear(Message *msg)
Definition: msgfmt.c:278
static unsigned a[3]
Definition: RandGen.cpp:78
bool isnan(double i)
Definition: numeric.h:451
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
BMVert * v1
Definition: bmesh_class.h:122
BMVert * v2
Definition: bmesh_class.h:122
struct BMLoop * l
Definition: bmesh_class.h:128
struct BMEditSelection * prev
Definition: bmesh_marking.h:10
float no[3]
Definition: bmesh_class.h:271
char hflag
Definition: bmesh_class.h:66
BMLoopNorEditData ** lidx_to_lnor_editdata
Definition: bmesh_class.h:404
BMLoopNorEditData * lnor_editdata
Definition: bmesh_class.h:399
struct BMVert * v
Definition: bmesh_class.h:153
struct BMEdge * e
Definition: bmesh_class.h:164
struct BMLoop * radial_next
Definition: bmesh_class.h:204
struct BMLoop * prev
Definition: bmesh_class.h:233
struct BMFace * f
Definition: bmesh_class.h:171
struct BMLoop * next
Definition: bmesh_class.h:233
BMPartialUpdate_Params params
float co[3]
Definition: bmesh_class.h:87
struct BMEdge * e
Definition: bmesh_class.h:97
float no[3]
Definition: bmesh_class.h:88
BMHeader head
Definition: bmesh_class.h:85
int totvert
Definition: bmesh_class.h:297
struct MLoopNorSpaceArray * lnor_spacearr
Definition: bmesh_class.h:343
char elem_index_dirty
Definition: bmesh_class.h:305
int totedge
Definition: bmesh_class.h:297
ListBase selected
Definition: bmesh_class.h:356
int totvertsel
Definition: bmesh_class.h:298
int totloop
Definition: bmesh_class.h:297
short selectmode
Definition: bmesh_class.h:350
char spacearr_dirty
Definition: bmesh_class.h:344
CustomData ldata
Definition: bmesh_class.h:337
void * link
Definition: BLI_linklist.h:24
struct LinkNode * next
Definition: BLI_linklist.h:23
void * last
Definition: DNA_listBase.h:31
MLoopNorSpace ** lspacearr
Definition: BKE_mesh.h:560
float ref_alpha
Definition: BKE_mesh.h:536
float vec_ortho[3]
Definition: BKE_mesh.h:534
float ref_beta
Definition: BKE_mesh.h:538
float vec_ref[3]
Definition: BKE_mesh.h:532
float vec_lnor[3]
Definition: BKE_mesh.h:530
struct LinkNode * loops
Definition: BKE_mesh.h:543
TaskParallelReduceFunc func_reduce
Definition: BLI_task.h:181
TaskParallelFreeFunc func_free
Definition: BLI_task.h:183
TaskParallelInitFunc func_init
Definition: BLI_task.h:176
size_t userdata_chunk_size
Definition: BLI_task.h:169