Blender  V3.3
bmesh_polygon_edgenet.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
9 // #define DEBUG_PRINT
10 
11 #include "MEM_guardedalloc.h"
12 
13 #include "BLI_alloca.h"
14 #include "BLI_array.h"
15 #include "BLI_kdopbvh.h"
16 #include "BLI_linklist_stack.h"
17 #include "BLI_math.h"
18 #include "BLI_memarena.h"
19 #include "BLI_sort_utils.h"
20 #include "BLI_utildefines_stack.h"
21 
22 #include "BKE_customdata.h"
23 
24 #include "bmesh.h"
25 #include "intern/bmesh_private.h"
26 
27 /* -------------------------------------------------------------------- */
38 /* NOTE: All these flags _must_ be cleared on exit. */
39 
40 /* face is a part of the edge-net (including the original face we're splitting) */
41 #define FACE_NET _FLAG_WALK
42 /* edge is a part of the edge-net we're filling */
43 #define EDGE_NET _FLAG_WALK
44 /* tag verts we've visit */
45 #define VERT_VISIT _FLAG_WALK
46 #define VERT_IN_QUEUE _FLAG_WALK_ALT
47 
48 struct VertOrder {
49  float angle;
51 };
52 
54 {
55  uint count = 0;
56  BMLoop *l;
57 
58  if ((l = e->l)) {
59  do {
61  count++;
62  }
63  } while ((l = l->radial_next) != e->l);
64  }
65  return count;
66 }
67 
69 {
70  BMLoop *l;
71 
72  if ((l = e->l)) {
73  do {
75  return l;
76  }
77  } while ((l = l->radial_next) != e->l);
78  }
79  return NULL;
80 }
81 
82 static void normalize_v2_m3_v3v3(float out[2],
83  const float axis_mat[3][3],
84  const float v1[3],
85  const float v2[3])
86 {
87  float dir[3];
88  sub_v3_v3v3(dir, v1, v2);
89  mul_v2_m3v3(out, axis_mat, dir);
91 }
92 
98  const float face_normal[3],
99  const float face_normal_matrix[3][3],
100  BMEdge *e_pair[2])
101 {
102  /* Always find one boundary edge (to determine winding)
103  * and one wire (if available), otherwise another boundary.
104  */
105 
106  /* detect winding */
107  BMLoop *l_walk;
108  bool swap;
109 
110  BLI_SMALLSTACK_DECLARE(edges_boundary, BMEdge *);
111  BLI_SMALLSTACK_DECLARE(edges_wire, BMEdge *);
112  int edges_boundary_len = 0;
113  int edges_wire_len = 0;
114 
115  {
116  BMEdge *e, *e_first;
117  e = e_first = v_init->e;
118  do {
121  if (count == 1) {
122  BLI_SMALLSTACK_PUSH(edges_boundary, e);
123  edges_boundary_len++;
124  }
125  else if (count == 0) {
126  BLI_SMALLSTACK_PUSH(edges_wire, e);
127  edges_wire_len++;
128  }
129  }
130  } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
131  }
132 
133  /* first edge should always be boundary */
134  if (edges_boundary_len == 0) {
135  return false;
136  }
137  e_pair[0] = BLI_SMALLSTACK_POP(edges_boundary);
138 
139  /* use to hold boundary OR wire edges */
140  BLI_SMALLSTACK_DECLARE(edges_search, BMEdge *);
141 
142  /* attempt one boundary and one wire, or 2 boundary */
143  if (edges_wire_len == 0) {
144  if (edges_boundary_len > 1) {
145  e_pair[1] = BLI_SMALLSTACK_POP(edges_boundary);
146 
147  if (edges_boundary_len > 2) {
148  BLI_SMALLSTACK_SWAP(edges_search, edges_boundary);
149  }
150  }
151  else {
152  /* one boundary and no wire */
153  return false;
154  }
155  }
156  else {
157  e_pair[1] = BLI_SMALLSTACK_POP(edges_wire);
158  if (edges_wire_len > 1) {
159  BLI_SMALLSTACK_SWAP(edges_search, edges_wire);
160  }
161  }
162 
163  /* if we swapped above, search this list for the best edge */
164  if (!BLI_SMALLSTACK_IS_EMPTY(edges_search)) {
165  /* find the best edge in 'edge_list' to use for 'e_pair[1]' */
166  const BMVert *v_prev = BM_edge_other_vert(e_pair[0], v_init);
167  const BMVert *v_next = BM_edge_other_vert(e_pair[1], v_init);
168 
169  float dir_prev[2], dir_next[2];
170 
171  normalize_v2_m3_v3v3(dir_prev, face_normal_matrix, v_prev->co, v_init->co);
172  normalize_v2_m3_v3v3(dir_next, face_normal_matrix, v_next->co, v_init->co);
173  float angle_best_cos = dot_v2v2(dir_next, dir_prev);
174 
175  BMEdge *e;
176  while ((e = BLI_SMALLSTACK_POP(edges_search))) {
177  v_next = BM_edge_other_vert(e, v_init);
178  float dir_test[2];
179 
180  normalize_v2_m3_v3v3(dir_test, face_normal_matrix, v_next->co, v_init->co);
181  const float angle_test_cos = dot_v2v2(dir_prev, dir_test);
182 
183  if (angle_test_cos > angle_best_cos) {
184  angle_best_cos = angle_test_cos;
185  e_pair[1] = e;
186  }
187  }
188  }
189 
190  /* flip based on winding */
191  l_walk = bm_edge_flagged_radial_first(e_pair[0]);
192  swap = false;
193  if (face_normal == l_walk->f->no) {
194  swap = !swap;
195  }
196  if (l_walk->v != v_init) {
197  swap = !swap;
198  }
199  if (swap) {
200  SWAP(BMEdge *, e_pair[0], e_pair[1]);
201  }
202 
203  return true;
204 }
205 
215 {
216  int edges_boundary_len = 0;
217  int edges_wire_len = 0;
218 
219  {
220  BMEdge *e, *e_first;
221  e = e_first = v_init->e;
222  do {
225  if (count == 1) {
226  edges_boundary_len++;
227  }
228  else if (count == 0) {
229  edges_wire_len++;
230  }
231  }
232  } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
233  }
234 
235  /* first edge should always be boundary */
236  if (edges_boundary_len == 0) {
237  return false;
238  }
239 
240  /* attempt one boundary and one wire, or 2 boundary */
241  if (edges_wire_len == 0) {
242  if (edges_boundary_len >= 2) {
243  /* pass */
244  }
245  else {
246  /* one boundary and no wire */
247  return false;
248  }
249  }
250  else {
251  /* pass */
252  }
253 
254  return true;
255 }
256 
258  const float face_normal[3],
259  /* cache to avoid realloc every time */
260  struct VertOrder *edge_order,
261  const uint edge_order_len,
262  BMEdge *e_pair[2])
263 {
264  /* fast-path for the common case (avoid push-pop).
265  * Also avoids tagging as visited since we know we
266  * can't reach these verts some other way */
267 #define USE_FASTPATH_NOFORK
268 
269  BMVert *v;
270  BMVert *v_dst;
271  bool found = false;
272 
273  struct VertOrder *eo;
274  STACK_DECLARE(edge_order);
275 
276  /* store visited verts so we can clear the visit flag after execution */
277  BLI_SMALLSTACK_DECLARE(vert_visit, BMVert *);
278 
279  /* likely this will stay very small
280  * all verts pushed into this stack _must_ have their previous edges set! */
281  BLI_SMALLSTACK_DECLARE(vert_stack, BMVert *);
282  BLI_SMALLSTACK_DECLARE(vert_stack_next, BMVert *);
283 
284  STACK_INIT(edge_order, edge_order_len);
285 
286  /* start stepping */
287  v = BM_edge_other_vert(e_pair[0], v_init);
288  v->e = e_pair[0];
289  BLI_SMALLSTACK_PUSH(vert_stack, v);
290 
291  v_dst = BM_edge_other_vert(e_pair[1], v_init);
292 
293 #ifdef DEBUG_PRINT
294  printf("%s: vert (search) %d\n", __func__, BM_elem_index_get(v_init));
295 #endif
296 
297  /* This loop will keep stepping over the best possible edge,
298  * in most cases it finds the direct route to close the face.
299  *
300  * In cases where paths can't be closed,
301  * alternatives are stored in the 'vert_stack'.
302  */
303  while ((v = BLI_SMALLSTACK_POP_EX(vert_stack, vert_stack_next))) {
304 #ifdef USE_FASTPATH_NOFORK
305  walk_nofork:
306 #else
307  BLI_SMALLSTACK_PUSH(vert_visit, v);
309 #endif
310 
311  BLI_assert(STACK_SIZE(edge_order) == 0);
312 
313  /* check if we're done! */
314  if (v == v_dst) {
315  found = true;
316  goto finally;
317  }
318 
319  BMEdge *e_next, *e_first;
320  e_first = v->e;
321  e_next = BM_DISK_EDGE_NEXT(e_first, v); /* always skip this verts edge */
322 
323  /* in rare cases there may be edges with a single connecting vertex */
324  if (e_next != e_first) {
325  do {
326  if (BM_ELEM_API_FLAG_TEST(e_next, EDGE_NET) &&
327  (bm_edge_flagged_radial_count(e_next) < 2)) {
328  BMVert *v_next;
329 
330  v_next = BM_edge_other_vert(e_next, v);
331  BLI_assert(v->e != e_next);
332 
333 #ifdef DEBUG_PRINT
334  /* indent and print */
335  {
336  BMVert *_v = v;
337  do {
338  printf(" ");
339  } while ((_v = BM_edge_other_vert(_v->e, _v)) != v_init);
340  printf("vert %d -> %d (add=%d)\n",
342  BM_elem_index_get(v_next),
343  BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT) == 0);
344  }
345 #endif
346 
347  if (!BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT)) {
348  eo = STACK_PUSH_RET_PTR(edge_order);
349  eo->v = v_next;
350 
351  v_next->e = e_next;
352  }
353  }
354  } while ((e_next = BM_DISK_EDGE_NEXT(e_next, v)) != e_first);
355  }
356 
357 #ifdef USE_FASTPATH_NOFORK
358  if (STACK_SIZE(edge_order) == 1) {
359  eo = STACK_POP_PTR(edge_order);
360  v = eo->v;
361 
362  goto walk_nofork;
363  }
364 #endif
365 
366  /* sort by angle if needed */
367  if (STACK_SIZE(edge_order) > 1) {
368  uint j;
369  BMVert *v_prev = BM_edge_other_vert(v->e, v);
370 
371  for (j = 0; j < STACK_SIZE(edge_order); j++) {
372  edge_order[j].angle = angle_signed_on_axis_v3v3v3_v3(
373  v_prev->co, v->co, edge_order[j].v->co, face_normal);
374  }
375  qsort(edge_order,
376  STACK_SIZE(edge_order),
377  sizeof(struct VertOrder),
379 
380 #ifdef USE_FASTPATH_NOFORK
381  /* only tag forks */
382  BLI_SMALLSTACK_PUSH(vert_visit, v);
384 #endif
385  }
386 
387  while ((eo = STACK_POP_PTR(edge_order))) {
388  BLI_SMALLSTACK_PUSH(vert_stack_next, eo->v);
389  }
390 
391  if (!BLI_SMALLSTACK_IS_EMPTY(vert_stack_next)) {
392  BLI_SMALLSTACK_SWAP(vert_stack, vert_stack_next);
393  }
394  }
395 
396 finally:
397  /* clear flag for next execution */
398  while ((v = BLI_SMALLSTACK_POP(vert_visit))) {
400  }
401 
402  return found;
403 
404 #undef USE_FASTPATH_NOFORK
405 }
406 
408  const float face_normal[3],
409  const float face_normal_matrix[3][3],
410  /* cache to avoid realloc every time */
411  struct VertOrder *edge_order,
412  const uint edge_order_len,
413  BMVert **r_face_verts,
414  int *r_face_verts_len)
415 {
416  BMEdge *e_pair[2];
417  BMVert *v;
418 
419  if (!bm_face_split_edgenet_find_loop_pair(v_init, face_normal, face_normal_matrix, e_pair)) {
420  return false;
421  }
422 
423  BLI_assert((bm_edge_flagged_radial_count(e_pair[0]) == 1) ||
424  (bm_edge_flagged_radial_count(e_pair[1]) == 1));
425 
427  v_init, face_normal, edge_order, edge_order_len, e_pair)) {
428  uint i = 0;
429 
430  r_face_verts[i++] = v_init;
431  v = BM_edge_other_vert(e_pair[1], v_init);
432  do {
433  r_face_verts[i++] = v;
434  } while ((v = BM_edge_other_vert(v->e, v)) != v_init);
435  *r_face_verts_len = i;
436  return (i > 2) ? true : false;
437  }
438  return false;
439 }
440 
442  BMFace *f,
443  BMEdge **edge_net,
444  const int edge_net_len,
445  BMFace ***r_face_arr,
446  int *r_face_arr_len)
447 {
448  /* re-use for new face verts */
449  BMVert **face_verts;
450  int face_verts_len;
451 
452  BMFace **face_arr = NULL;
453  BLI_array_declare(face_arr);
454 
455  BMVert **vert_queue;
456  STACK_DECLARE(vert_queue);
457  int i;
458 
459  struct VertOrder *edge_order;
460  const uint edge_order_len = edge_net_len + 2;
461 
462  BMVert *v;
463 
464  BMLoop *l_iter, *l_first;
465 
466  if (!edge_net_len) {
467  if (r_face_arr) {
468  *r_face_arr = NULL;
469  *r_face_arr_len = 0;
470  }
471  return false;
472  }
473 
474  /* These arrays used to be stack memory, however they can be
475  * large for single faces with complex edge-nets, see: T65980. */
476 
477  /* over-alloc (probably 2-4 is only used in most cases), for the biggest-fan */
478  edge_order = MEM_mallocN(sizeof(*edge_order) * edge_order_len, __func__);
479 
480  /* use later */
481  face_verts = MEM_mallocN(sizeof(*face_verts) * (edge_net_len + f->len), __func__);
482 
483  vert_queue = MEM_mallocN(sizeof(vert_queue) * (edge_net_len + f->len), __func__);
484  STACK_INIT(vert_queue, f->len + edge_net_len);
485 
488 
489 #ifdef DEBUG
490  for (i = 0; i < edge_net_len; i++) {
491  BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0);
492  BLI_assert(BM_edge_in_face(edge_net[i], f) == false);
493  }
494  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
495  do {
496  BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter->e, EDGE_NET) == 0);
497  } while ((l_iter = l_iter->next) != l_first);
498 #endif
499 
500  /* NOTE: 'VERT_IN_QUEUE' is often not needed at all,
501  * however in rare cases verts are added multiple times to the queue,
502  * that on its own is harmless but in _very_ rare cases,
503  * the queue will overflow its maximum size,
504  * so we better be strict about this! see: T51539 */
505 
506  for (i = 0; i < edge_net_len; i++) {
507  BM_ELEM_API_FLAG_ENABLE(edge_net[i], EDGE_NET);
510  }
511  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
512  do {
515  } while ((l_iter = l_iter->next) != l_first);
516 
517  float face_normal_matrix[3][3];
518  axis_dominant_v3_to_m3(face_normal_matrix, f->no);
519 
520  /* any vert can be used to begin with */
521  STACK_PUSH(vert_queue, l_first->v);
523 
524  while ((v = STACK_POP(vert_queue))) {
527  f->no,
528  face_normal_matrix,
529  edge_order,
530  edge_order_len,
531  face_verts,
532  &face_verts_len)) {
533  BMFace *f_new;
534 
535  f_new = BM_face_create_verts(bm, face_verts, face_verts_len, f, BM_CREATE_NOP, false);
536 
537  for (i = 0; i < edge_net_len; i++) {
539  }
540 
541  if (f_new) {
542  BLI_array_append(face_arr, f_new);
543  copy_v3_v3(f_new->no, f->no);
544 
545  /* warning, normally don't do this,
546  * its needed for mesh intersection - which tracks face-sides based on selection */
547  f_new->head.hflag = f->head.hflag;
548  if (f->head.hflag & BM_ELEM_SELECT) {
549  bm->totfacesel++;
550  }
551 
553 
554  /* add new verts to keep finding loops for
555  * (verts between boundary and manifold edges) */
556  l_iter = l_first = BM_FACE_FIRST_LOOP(f_new);
557  do {
558  /* Avoid adding to queue multiple times (not common but happens). */
559  if (!BM_ELEM_API_FLAG_TEST(l_iter->v, VERT_IN_QUEUE) &&
561  STACK_PUSH(vert_queue, l_iter->v);
563  }
564  } while ((l_iter = l_iter->next) != l_first);
565  }
566  }
567  }
568 
569  if (CustomData_has_math(&bm->ldata)) {
570  /* reuse VERT_VISIT here to tag vert's already interpolated */
571  BMIter iter;
572  BMLoop *l_other;
573 
574  /* See: #BM_loop_interp_from_face for similar logic. */
575  void **blocks = BLI_array_alloca(blocks, f->len);
576  float(*cos_2d)[2] = BLI_array_alloca(cos_2d, f->len);
577  float *w = BLI_array_alloca(w, f->len);
578  float axis_mat[3][3];
579  float co[2];
580 
581  /* interior loops */
582  axis_dominant_v3_to_m3(axis_mat, f->no);
583 
584  /* first simply copy from existing face */
585  i = 0;
586  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
587  do {
588  BM_ITER_ELEM (l_other, &iter, l_iter->v, BM_LOOPS_OF_VERT) {
589  if ((l_other->f != f) && BM_ELEM_API_FLAG_TEST(l_other->f, FACE_NET)) {
591  &bm->ldata, &bm->ldata, l_iter->head.data, &l_other->head.data);
592  }
593  }
594  /* tag not to interpolate */
596 
597  mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
598  blocks[i] = l_iter->head.data;
599 
600  } while ((void)i++, (l_iter = l_iter->next) != l_first);
601 
602  for (i = 0; i < edge_net_len; i++) {
603  BM_ITER_ELEM (v, &iter, edge_net[i], BM_VERTS_OF_EDGE) {
605  BMIter liter;
606 
608 
609  /* interpolate this loop, then copy to the rest */
610  l_first = NULL;
611 
612  BM_ITER_ELEM (l_iter, &liter, v, BM_LOOPS_OF_VERT) {
613  if (BM_ELEM_API_FLAG_TEST(l_iter->f, FACE_NET)) {
614  if (l_first == NULL) {
615  mul_v2_m3v3(co, axis_mat, v->co);
616  interp_weights_poly_v2(w, cos_2d, f->len, co);
618  &bm->ldata, (const void **)blocks, w, NULL, f->len, l_iter->head.data);
619  l_first = l_iter;
620  }
621  else {
623  &bm->ldata, &bm->ldata, l_first->head.data, &l_iter->head.data);
624  }
625  }
626  }
627  }
628  }
629  }
630  }
631 
632  /* cleanup */
633  for (i = 0; i < edge_net_len; i++) {
634  BM_ELEM_API_FLAG_DISABLE(edge_net[i], EDGE_NET);
635  /* from interp only */
636  BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v1, VERT_VISIT);
637  BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v2, VERT_VISIT);
638  }
639  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
640  do {
642  /* from interp only */
644  } while ((l_iter = l_iter->next) != l_first);
645 
646  if (BLI_array_len(face_arr)) {
647  bmesh_face_swap_data(f, face_arr[0]);
648  BM_face_kill(bm, face_arr[0]);
649  face_arr[0] = f;
650  }
651  else {
653  }
654 
655  for (i = 0; i < BLI_array_len(face_arr); i++) {
656  BM_ELEM_API_FLAG_DISABLE(face_arr[i], FACE_NET);
657  }
658 
659  if (r_face_arr) {
660  *r_face_arr = face_arr;
661  *r_face_arr_len = BLI_array_len(face_arr);
662  }
663  else {
664  if (face_arr) {
665  MEM_freeN(face_arr);
666  }
667  }
668 
669  MEM_freeN(edge_order);
670  MEM_freeN(face_verts);
671  MEM_freeN(vert_queue);
672 
673  return true;
674 }
675 
676 #undef FACE_NET
677 #undef VERT_VISIT
678 #undef EDGE_NET
679 
682 /* -------------------------------------------------------------------- */
696 #define USE_PARTIAL_CONNECT
697 
698 #define VERT_IS_VALID BM_ELEM_INTERNAL_TAG
699 
700 /* can be X or Y */
701 #define SORT_AXIS 0
702 
704  const BMVert *v_a,
705  const BMVert *v_b,
706  float r_isect[2])
707 {
708  /* This bias seems like it could be too large,
709  * mostly its not needed, see T52329 for example where it is. */
710  const float endpoint_bias = 1e-4f;
711  return ((isect_seg_seg_v2_point_ex(
712  v_a->co, v_b->co, e->v1->co, e->v2->co, endpoint_bias, r_isect) == 1) &&
713  ((e->v1 != v_a) && (e->v2 != v_a) && (e->v1 != v_b) && (e->v2 != v_b)));
714 }
715 
716 BLI_INLINE int axis_pt_cmp(const float pt_a[2], const float pt_b[2])
717 {
718  if (pt_a[0] < pt_b[0]) {
719  return -1;
720  }
721  if (pt_a[0] > pt_b[0]) {
722  return 1;
723  }
724  if (pt_a[1] < pt_b[1]) {
725  return -1;
726  }
727  if (pt_a[1] > pt_b[1]) {
728  return 1;
729  }
730  return 0;
731 }
732 
739  LinkNode edge_links; /* keep first */
741 
742  /* Set the following vars once we have >1 groups */
743 
744  /* when an edge in a previous group connects to this one,
745  * so there's no need to create one pointing back. */
747 
748  /* verts in the group which has the lowest & highest values,
749  * the lower vertex is connected to the first edge */
750  struct {
752  /* used for sorting only */
753  float min_axis[2];
754  float max_axis[2];
756 };
757 
758 static int group_min_cmp_fn(const void *p1, const void *p2)
759 {
760  const struct EdgeGroupIsland *g1 = *(struct EdgeGroupIsland **)p1;
761  const struct EdgeGroupIsland *g2 = *(struct EdgeGroupIsland **)p2;
762  /* min->co[SORT_AXIS] hasn't been applied yet */
763  int test = axis_pt_cmp(g1->vert_span.min_axis, g2->vert_span.min_axis);
764  if (UNLIKELY(test == 0)) {
766  }
767  return test;
768 }
769 
771  float dist_orig;
773 
776 
777  const uint *vert_range;
778 };
779 
782 
784 
785  const uint *vert_range;
786 };
787 
789  int index,
790  const BVHTreeRay *UNUSED(ray),
791  BVHTreeRayHit *hit)
792 {
794  const BMEdge *e = data->edge_arr[index];
795  const int v1_index = BM_elem_index_get(e->v1);
796  float co_isect[2];
797 
798  if (edge_isect_verts_point_2d(e, data->v_origin, data->v_other, co_isect)) {
799  const float t = line_point_factor_v2(co_isect, data->v_origin->co, data->v_other->co);
800  const float dist_new = data->dist_orig * t;
801  /* avoid float precision issues, possible this is greater,
802  * check above zero to allow some overlap
803  * (and needed for partial-connect which will overlap vertices) */
804  if (LIKELY((dist_new < hit->dist) && (dist_new > 0.0f))) {
805  /* v1/v2 will both be in the same group */
806  if (v1_index < (int)data->vert_range[0] || v1_index >= (int)data->vert_range[1]) {
807  hit->dist = dist_new;
808  hit->index = index;
809  }
810  }
811  }
812 }
813 
815  int index,
816  const BVHTreeRay *ray,
817  BVHTreeRayHit *hit)
818 {
820  const BMEdge *e = data->edge_arr[index];
821 
822  /* direction is normalized, so this will be the distance */
823  float dist_new;
824  if (isect_ray_seg_v2(
825  data->v_origin->co, ray->direction, e->v1->co, e->v2->co, &dist_new, NULL)) {
826  /* avoid float precision issues, possible this is greater,
827  * check above zero to allow some overlap
828  * (and needed for partial-connect which will overlap vertices) */
829  if (LIKELY(dist_new < hit->dist && (dist_new > 0.0f))) {
830  if (e->v1 != data->v_origin && e->v2 != data->v_origin) {
831  const int v1_index = BM_elem_index_get(e->v1);
832  /* v1/v2 will both be in the same group */
833  if (v1_index < (int)data->vert_range[0] || v1_index >= (int)data->vert_range[1]) {
834  hit->dist = dist_new;
835  hit->index = index;
836  }
837  }
838  }
839  }
840 }
841 
852 
855 
856  const uint *vert_range;
857 };
858 
860  BMVert *v_origin,
861  BMVert *v_other)
862 {
863  int index;
864 
865  BVHTreeRayHit hit = {0};
866  float dir[3];
867 
868  sub_v2_v2v2(dir, v_other->co, v_origin->co);
869  dir[2] = 0.0f;
870  hit.index = -1;
871  hit.dist = normalize_v2(dir);
872 
874  user_data.dist_orig = hit.dist;
875  user_data.edge_arr = args->edge_arr;
876  user_data.v_origin = v_origin;
877  user_data.v_other = v_other;
878  user_data.vert_range = args->vert_range;
879 
880  index = BLI_bvhtree_ray_cast_ex(args->bvhtree,
881  v_origin->co,
882  dir,
883  0.0f,
884  &hit,
886  &user_data,
887  0);
888 
889  BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
890 
891  /* check existing connections (no spatial optimization here since we're continually adding). */
892  if (LIKELY(index == -1)) {
893  float t_best = 1.0f;
894  for (uint i = 0; i < args->edge_arr_new_len; i++) {
895  float co_isect[2];
896  if (UNLIKELY(
897  edge_isect_verts_point_2d(args->edge_arr_new[i], v_origin, v_other, co_isect))) {
898  const float t_test = line_point_factor_v2(co_isect, v_origin->co, v_other->co);
899  if (t_test < t_best) {
900  t_best = t_test;
901 
902  e_hit = args->edge_arr_new[i];
903  }
904  }
905  }
906  }
907 
908  return e_hit;
909 }
910 
916  BMVert *v_origin,
917  const float dir[3])
918 {
919  int index;
920  BVHTreeRayHit hit = {0};
921 
922  BLI_ASSERT_UNIT_V2(dir);
923 
924  hit.index = -1;
926 
928  user_data.edge_arr = args->edge_arr;
929  user_data.v_origin = v_origin;
930  user_data.vert_range = args->vert_range;
931 
932  index = BLI_bvhtree_ray_cast_ex(args->bvhtree,
933  v_origin->co,
934  dir,
935  0.0f,
936  &hit,
938  &user_data,
939  0);
940 
941  BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
942 
943  /* check existing connections (no spatial optimization here since we're continually adding). */
944  if (LIKELY(index != -1)) {
945  for (uint i = 0; i < args->edge_arr_new_len; i++) {
946  BMEdge *e = args->edge_arr_new[i];
947  float dist_new;
948  if (isect_ray_seg_v2(v_origin->co, dir, e->v1->co, e->v2->co, &dist_new, NULL)) {
949  if (e->v1 != v_origin && e->v2 != v_origin) {
950  /* avoid float precision issues, possible this is greater */
951  if (LIKELY(dist_new < hit.dist)) {
952  hit.dist = dist_new;
953 
954  e_hit = args->edge_arr_new[i];
955  }
956  }
957  }
958  }
959  }
960 
961  return e_hit;
962 }
963 
965  BMVert *v_origin,
966  /* false = negative, true = positive */
967  bool direction_sign)
968 {
983  const float dir[3] = {[SORT_AXIS] = direction_sign ? 1.0f : -1.0f};
984  BMEdge *e_hit = test_edges_isect_2d_ray(args, v_origin, dir);
985  BMVert *v_other = NULL;
986 
987  if (e_hit) {
988  BMVert *v_other_fallback = NULL;
989 
990  BLI_SMALLSTACK_DECLARE(vert_search, BMVert *);
991 
992  /* ensure we never add verts multiple times (not all that likely - but possible) */
993  BLI_SMALLSTACK_DECLARE(vert_blacklist, BMVert *);
994 
995  do {
996  BMVert *v_pair[2];
997  /* ensure the closest vertex is popped back off the stack first */
998  if (len_squared_v2v2(v_origin->co, e_hit->v1->co) >
999  len_squared_v2v2(v_origin->co, e_hit->v2->co)) {
1000  ARRAY_SET_ITEMS(v_pair, e_hit->v1, e_hit->v2);
1001  }
1002  else {
1003  ARRAY_SET_ITEMS(v_pair, e_hit->v2, e_hit->v1);
1004  }
1005 
1006  for (int j = 0; j < 2; j++) {
1007  BMVert *v_iter = v_pair[j];
1008  if (BM_elem_flag_test(v_iter, VERT_IS_VALID)) {
1009  if (direction_sign ? (v_iter->co[SORT_AXIS] > v_origin->co[SORT_AXIS]) :
1010  (v_iter->co[SORT_AXIS] < v_origin->co[SORT_AXIS])) {
1011  BLI_SMALLSTACK_PUSH(vert_search, v_iter);
1012  BLI_SMALLSTACK_PUSH(vert_blacklist, v_iter);
1014  }
1015  }
1016  }
1017  v_other_fallback = v_other;
1018 
1019  } while ((v_other = BLI_SMALLSTACK_POP(vert_search)) &&
1020  (e_hit = test_edges_isect_2d_vert(args, v_origin, v_other)));
1021 
1022  if (v_other == NULL) {
1023  printf("Using fallback\n");
1024  v_other = v_other_fallback;
1025  }
1026 
1027  /* reset the blacklist flag, for future use */
1028  BMVert *v;
1029  while ((v = BLI_SMALLSTACK_POP(vert_blacklist))) {
1031  }
1032  }
1033 
1034  /* if we reach this line, v_other is either the best vertex or its NULL */
1035  return v_other ? BM_elem_index_get(v_other) : -1;
1036 }
1037 
1042 #ifdef USE_PARTIAL_CONNECT
1043 
1048 static bool test_tagged_and_notface(BMEdge *e, void *fptr)
1049 {
1050  BMFace *f = (BMFace *)fptr;
1052 }
1053 
1062 {
1063  /* -------------------------------------------------------------------- */
1064  /* Initial check that we may be a delimiting vert (keep this fast) */
1065 
1066  /* initial check - see if we have 3+ flagged edges attached to 'v_delimit'
1067  * if not, we can early exit */
1068  LinkNode *e_delimit_list = NULL;
1069  uint e_delimit_list_len = 0;
1070 
1071 # define EDGE_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1072 # define VERT_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1073 
1074 # define FOREACH_VERT_EDGE(v_, e_, body_) \
1075  { \
1076  BMEdge *e_ = v_->e; \
1077  do { \
1078  body_ \
1079  } while ((e_ = BM_DISK_EDGE_NEXT(e_, v_)) != v_->e); \
1080  } \
1081  ((void)0)
1082 
1083  /* start with face edges, since we need to split away wire-only edges */
1084  BMEdge *e_face_init = NULL;
1085 
1086  FOREACH_VERT_EDGE(v_delimit, e_iter, {
1087  if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1089  BLI_linklist_prepend_alloca(&e_delimit_list, e_iter);
1090  e_delimit_list_len++;
1091  if (e_iter->l != NULL && BM_edge_in_face(e_iter, f)) {
1092  e_face_init = e_iter;
1093  }
1094  }
1095  });
1096 
1097  /* skip typical edge-chain verts */
1098  if (LIKELY(e_delimit_list_len <= 2)) {
1099  return NULL;
1100  }
1101 
1102  /* -------------------------------------------------------------------- */
1103  /* Complicated stuff starts now! */
1104 
1105  /* Store connected vertices for restoring the flag */
1106  LinkNode *vert_stack = NULL;
1107  BLI_linklist_prepend_alloca(&vert_stack, v_delimit);
1109 
1110  /* Walk the net... */
1111  {
1112  BLI_SMALLSTACK_DECLARE(search, BMVert *);
1113  BMVert *v_other = BM_edge_other_vert(e_face_init ? e_face_init : v_delimit->e, v_delimit);
1114 
1115  BLI_SMALLSTACK_PUSH(search, v_other);
1116  if (BM_elem_flag_test(v_other, VERT_NOT_IN_STACK)) {
1118  BLI_linklist_prepend_alloca(&vert_stack, v_other);
1119  }
1120 
1121  while ((v_other = BLI_SMALLSTACK_POP(search))) {
1122  BLI_assert(BM_elem_flag_test(v_other, VERT_NOT_IN_STACK) == false);
1123  BMEdge *e_iter = v_other->e;
1124  do {
1125  BMVert *v_step = BM_edge_other_vert(e_iter, v_other);
1126  if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1127  if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK)) {
1129  BLI_SMALLSTACK_PUSH(search, v_step);
1130  BLI_linklist_prepend_alloca(&vert_stack, v_step);
1131  }
1132  }
1133  } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_other)) != v_other->e);
1134  }
1135  }
1136 
1137  /* Detect if this is a delimiter
1138  * by checking if we didn't walk any of edges connected to 'v_delimit'. */
1139  bool is_delimit = false;
1140  FOREACH_VERT_EDGE(v_delimit, e_iter, {
1141  BMVert *v_step = BM_edge_other_vert(e_iter, v_delimit);
1142  if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK) && !BM_edge_in_face(e_iter, f)) {
1143  is_delimit = true; /* if one vertex is valid - we have a mix */
1144  }
1145  else {
1146  /* match the vertex flag (only for edges around 'v_delimit') */
1148  }
1149  });
1150 
1151 # undef FOREACH_VERT_EDGE
1152 
1153  /* Execute the split */
1154  BMVert *v_split = NULL;
1155  if (is_delimit) {
1156  v_split = BM_vert_create(bm, v_delimit->co, NULL, 0);
1157  BM_vert_separate_tested_edges(bm, v_split, v_delimit, test_tagged_and_notface, f);
1159 
1160  BLI_assert(v_delimit->e != NULL);
1161 
1162  /* Degenerate, avoid eternal loop, see: T59074. */
1163 # if 0
1164  BLI_assert(v_split->e != NULL);
1165 # else
1166  if (UNLIKELY(v_split->e == NULL)) {
1167  BM_vert_kill(bm, v_split);
1168  v_split = NULL;
1169  }
1170 # endif
1171  }
1172 
1173  /* Restore flags */
1174  do {
1176  } while ((vert_stack = vert_stack->next));
1177 
1178  do {
1179  BM_elem_flag_enable((BMEdge *)e_delimit_list->link, EDGE_NOT_IN_STACK);
1180  } while ((e_delimit_list = e_delimit_list->next));
1181 
1182 # undef EDGE_NOT_IN_STACK
1183 # undef VERT_NOT_IN_STACK
1184 
1185  return v_split;
1186 }
1187 
1191 static bool bm_vert_partial_connect_check_overlap(const int *remap,
1192  const int v_a_index,
1193  const int v_b_index)
1194 {
1195  /* Connected to each other. */
1196  if (UNLIKELY((remap[v_a_index] == v_b_index) || (remap[v_b_index] == v_a_index))) {
1197  return true;
1198  }
1199  return false;
1200 }
1201 
1202 #endif /* USE_PARTIAL_CONNECT */
1203 
1205  BMFace *f,
1206  BMEdge **edge_net_init,
1207  const uint edge_net_init_len,
1208  bool use_partial_connect,
1210  BMEdge ***r_edge_net_new,
1211  uint *r_edge_net_new_len)
1212 {
1213  /* -------------------------------------------------------------------- */
1226  const uint edge_arr_len = (uint)edge_net_init_len + (uint)f->len;
1227  BMEdge **edge_arr = BLI_memarena_alloc(mem_arena, sizeof(*edge_arr) * edge_arr_len);
1228  bool ok = false;
1229  uint edge_net_new_len = (uint)edge_net_init_len;
1230 
1231  memcpy(edge_arr, edge_net_init, sizeof(*edge_arr) * (size_t)edge_net_init_len);
1232 
1233  /* _must_ clear on exit */
1234 #define EDGE_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1235 #define VERT_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1236 
1237  {
1238  uint i = edge_net_init_len;
1239  BMLoop *l_iter, *l_first;
1240  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
1241  do {
1244  edge_arr[i++] = l_iter->e;
1245  } while ((l_iter = l_iter->next) != l_first);
1246  BLI_assert(i == edge_arr_len);
1247  }
1248 
1249  for (uint i = 0; i < edge_arr_len; i++) {
1253  }
1254 
1255 #ifdef USE_PARTIAL_CONNECT
1256  /* Split-out delimiting vertices */
1257  struct TempVertPair {
1258  struct TempVertPair *next;
1259  BMVert *v_temp;
1260  BMVert *v_orig;
1261  };
1262 
1263  struct {
1264  struct TempVertPair *list;
1265  uint len;
1266  int *remap; /* temp -> orig mapping */
1267  } temp_vert_pairs = {NULL};
1268 
1269  if (use_partial_connect) {
1270  for (uint i = 0; i < edge_net_init_len; i++) {
1271  for (uint j = 0; j < 2; j++) {
1272  BMVert *v_delimit = (&edge_arr[i]->v1)[j];
1273  BMVert *v_other;
1274 
1275  /* NOTE: remapping will _never_ map a vertex to an already mapped vertex. */
1276  while (UNLIKELY((v_other = bm_face_split_edgenet_partial_connect(bm, v_delimit, f)))) {
1277  struct TempVertPair *tvp = BLI_memarena_alloc(mem_arena, sizeof(*tvp));
1278  tvp->next = temp_vert_pairs.list;
1279  tvp->v_orig = v_delimit;
1280  tvp->v_temp = v_other;
1281  temp_vert_pairs.list = tvp;
1282  temp_vert_pairs.len++;
1283  }
1284  }
1285  }
1286 
1287  if (temp_vert_pairs.len == 0) {
1288  use_partial_connect = false;
1289  }
1290  }
1291 #endif /* USE_PARTIAL_CONNECT */
1292 
1293  uint group_arr_len = 0;
1294  LinkNode *group_head = NULL;
1295  {
1296  /* scan 'edge_arr' backwards so the outer face boundary is handled first
1297  * (since its likely to be the largest) */
1298  uint edge_index = (edge_arr_len - 1);
1299  uint edge_in_group_tot = 0;
1300 
1301  BLI_SMALLSTACK_DECLARE(vstack, BMVert *);
1302 
1303  while (true) {
1304  LinkNode *edge_links = NULL;
1305  uint unique_verts_in_group = 0, unique_edges_in_group = 0;
1306 
1307  /* list of groups */
1308  BLI_assert(BM_elem_flag_test(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK));
1309  BLI_SMALLSTACK_PUSH(vstack, edge_arr[edge_index]->v1);
1310  BM_elem_flag_disable(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK);
1311 
1312  BMVert *v_iter;
1313  while ((v_iter = BLI_SMALLSTACK_POP(vstack))) {
1314  unique_verts_in_group++;
1315 
1316  BMEdge *e_iter = v_iter->e;
1317  do {
1318  if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1320  unique_edges_in_group++;
1321 
1322  BLI_linklist_prepend_arena(&edge_links, e_iter, mem_arena);
1323 
1324  BMVert *v_other = BM_edge_other_vert(e_iter, v_iter);
1325  if (BM_elem_flag_test(v_other, VERT_NOT_IN_STACK)) {
1326  BLI_SMALLSTACK_PUSH(vstack, v_other);
1328  }
1329  }
1330  } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_iter)) != v_iter->e);
1331  }
1332 
1333  struct EdgeGroupIsland *g = BLI_memarena_alloc(mem_arena, sizeof(*g));
1334  g->vert_len = unique_verts_in_group;
1335  g->edge_len = unique_edges_in_group;
1336  edge_in_group_tot += unique_edges_in_group;
1337 
1338  BLI_linklist_prepend_nlink(&group_head, edge_links, (LinkNode *)g);
1339 
1340  group_arr_len++;
1341 
1342  if (edge_in_group_tot == edge_arr_len) {
1343  break;
1344  }
1345 
1346  /* skip edges in the stack */
1347  while (BM_elem_flag_test(edge_arr[edge_index], EDGE_NOT_IN_STACK) == false) {
1348  BLI_assert(edge_index != 0);
1349  edge_index--;
1350  }
1351  }
1352  }
1353 
1354  /* single group - no holes */
1355  if (group_arr_len == 1) {
1356  goto finally;
1357  }
1358 
1359  /* -------------------------------------------------------------------- */
1360  /* Previous checks need to be kept fast, since they will run very often,
1361  * now we know there are holes, so calculate a spatial lookup info and
1362  * other per-group data.
1363  */
1364 
1365  float axis_mat[3][3];
1366  axis_dominant_v3_to_m3(axis_mat, f->no);
1367 
1368 #define VERT_IN_ARRAY BM_ELEM_INTERNAL_TAG
1369 
1370  struct EdgeGroupIsland **group_arr = BLI_memarena_alloc(mem_arena,
1371  sizeof(*group_arr) * group_arr_len);
1372  uint vert_arr_len = 0;
1373  /* sort groups by lowest value vertex */
1374  {
1375  /* fill 'groups_arr' in reverse order so the boundary face is first */
1376  struct EdgeGroupIsland **group_arr_p = &group_arr[group_arr_len];
1377 
1378  for (struct EdgeGroupIsland *g = (void *)group_head; g;
1379  g = (struct EdgeGroupIsland *)g->edge_links.next) {
1380  LinkNode *edge_links = g->edge_links.link;
1381 
1382  /* init with *any* different verts */
1383  g->vert_span.min = ((BMEdge *)edge_links->link)->v1;
1384  g->vert_span.max = ((BMEdge *)edge_links->link)->v2;
1385  float min_axis[2] = {FLT_MAX, FLT_MAX};
1386  float max_axis[2] = {-FLT_MAX, -FLT_MAX};
1387 
1388  do {
1389  BMEdge *e = edge_links->link;
1390  BLI_assert(e->head.htype == BM_EDGE);
1391 
1392  for (int j = 0; j < 2; j++) {
1393  BMVert *v_iter = (&e->v1)[j];
1394  BLI_assert(v_iter->head.htype == BM_VERT);
1395  /* ideally we could use 'v_iter->co[SORT_AXIS]' here,
1396  * but we need to sort the groups before setting the vertex array order */
1397  const float axis_value[2] = {
1398 #if SORT_AXIS == 0
1399  dot_m3_v3_row_x(axis_mat, v_iter->co),
1400  dot_m3_v3_row_y(axis_mat, v_iter->co),
1401 #else
1402  dot_m3_v3_row_y(axis_mat, v_iter->co),
1403  dot_m3_v3_row_x(axis_mat, v_iter->co),
1404 #endif
1405  };
1406 
1407  if (axis_pt_cmp(axis_value, min_axis) == -1) {
1408  g->vert_span.min = v_iter;
1409  copy_v2_v2(min_axis, axis_value);
1410  }
1411  if (axis_pt_cmp(axis_value, max_axis) == 1) {
1412  g->vert_span.max = v_iter;
1413  copy_v2_v2(max_axis, axis_value);
1414  }
1415  }
1416  } while ((edge_links = edge_links->next));
1417 
1418  copy_v2_v2(g->vert_span.min_axis, min_axis);
1419  copy_v2_v2(g->vert_span.max_axis, max_axis);
1420 
1421  g->has_prev_edge = false;
1422 
1423  vert_arr_len += g->vert_len;
1424 
1425  *(--group_arr_p) = g;
1426  }
1427  }
1428 
1429  qsort(group_arr, group_arr_len, sizeof(*group_arr), group_min_cmp_fn);
1430 
1431  /* we don't know how many unique verts there are connecting the edges, so over-alloc */
1432  BMVert **vert_arr = BLI_memarena_alloc(mem_arena, sizeof(*vert_arr) * vert_arr_len);
1433  /* map vertex -> group index */
1434  uint *verts_group_table = BLI_memarena_alloc(mem_arena,
1435  sizeof(*verts_group_table) * vert_arr_len);
1436 
1437  float(*vert_coords_backup)[3] = BLI_memarena_alloc(mem_arena,
1438  sizeof(*vert_coords_backup) * vert_arr_len);
1439 
1440  {
1441  /* relative location, for higher precision calculations */
1442  const float f_co_ref[3] = {UNPACK3(BM_FACE_FIRST_LOOP(f)->v->co)};
1443 
1444  int v_index = 0; /* global vert index */
1445  for (uint g_index = 0; g_index < group_arr_len; g_index++) {
1446  LinkNode *edge_links = group_arr[g_index]->edge_links.link;
1447  do {
1448  BMEdge *e = edge_links->link;
1449  for (int j = 0; j < 2; j++) {
1450  BMVert *v_iter = (&e->v1)[j];
1451  if (!BM_elem_flag_test(v_iter, VERT_IN_ARRAY)) {
1453 
1454  /* not nice, but alternatives aren't much better :S */
1455  {
1456  copy_v3_v3(vert_coords_backup[v_index], v_iter->co);
1457 
1458  /* for higher precision */
1459  sub_v3_v3(v_iter->co, f_co_ref);
1460 
1461  float co_2d[2];
1462  mul_v2_m3v3(co_2d, axis_mat, v_iter->co);
1463  v_iter->co[0] = co_2d[0];
1464  v_iter->co[1] = co_2d[1];
1465  v_iter->co[2] = 0.0f;
1466  }
1467 
1468  BM_elem_index_set(v_iter, v_index); /* set_dirty */
1469 
1470  vert_arr[v_index] = v_iter;
1471  verts_group_table[v_index] = g_index;
1472  v_index++;
1473  }
1474  }
1475  } while ((edge_links = edge_links->next));
1476  }
1477  }
1478 
1480 
1481  /* Now create bvh tree
1482  *
1483  * Note that a large epsilon is used because meshes with dimensions of around 100+ need it.
1484  * see T52329. */
1485  BVHTree *bvhtree = BLI_bvhtree_new(edge_arr_len, 1e-4f, 8, 8);
1486  for (uint i = 0; i < edge_arr_len; i++) {
1487  const float e_cos[2][3] = {
1488  {UNPACK2(edge_arr[i]->v1->co), 0.0f},
1489  {UNPACK2(edge_arr[i]->v2->co), 0.0f},
1490  };
1491  BLI_bvhtree_insert(bvhtree, i, (const float *)e_cos, 2);
1492  }
1493  BLI_bvhtree_balance(bvhtree);
1494 
1495 #ifdef USE_PARTIAL_CONNECT
1496  if (use_partial_connect) {
1497  /* needs to be done once the vertex indices have been written into */
1498  temp_vert_pairs.remap = BLI_memarena_alloc(mem_arena,
1499  sizeof(*temp_vert_pairs.remap) * vert_arr_len);
1500  copy_vn_i(temp_vert_pairs.remap, vert_arr_len, -1);
1501 
1502  struct TempVertPair *tvp = temp_vert_pairs.list;
1503  do {
1504  temp_vert_pairs.remap[BM_elem_index_get(tvp->v_temp)] = BM_elem_index_get(tvp->v_orig);
1505  } while ((tvp = tvp->next));
1506  }
1507 #endif /* USE_PARTIAL_CONNECT */
1508 
1509  /* Create connections between groups */
1510 
1511  /* may be an over-alloc, but not by much */
1512  edge_net_new_len = (uint)edge_net_init_len + ((group_arr_len - 1) * 2);
1513  BMEdge **edge_net_new = BLI_memarena_alloc(mem_arena, sizeof(*edge_net_new) * edge_net_new_len);
1514  memcpy(edge_net_new, edge_net_init, sizeof(*edge_net_new) * (size_t)edge_net_init_len);
1515 
1516  {
1517  uint edge_net_new_index = edge_net_init_len;
1518  /* start-end of the verts in the current group */
1519 
1520  uint vert_range[2];
1521 
1522  vert_range[0] = 0;
1523  vert_range[1] = group_arr[0]->vert_len;
1524 
1525  struct EdgeGroup_FindConnection_Args args = {
1526  .bvhtree = bvhtree,
1527 
1528  /* use the new edge array so we can scan edges which have been added */
1529  .edge_arr = edge_arr,
1530  .edge_arr_len = edge_arr_len,
1531 
1532  /* we only want to check newly created edges */
1533  .edge_arr_new = edge_net_new + edge_net_init_len,
1534  .edge_arr_new_len = 0,
1535 
1536  .vert_range = vert_range,
1537  };
1538 
1539  for (uint g_index = 1; g_index < group_arr_len; g_index++) {
1540  struct EdgeGroupIsland *g = group_arr[g_index];
1541 
1542  /* The range of verts this group uses in 'verts_arr' (not including the last index). */
1543  vert_range[0] = vert_range[1];
1544  vert_range[1] += g->vert_len;
1545 
1546  if (g->has_prev_edge == false) {
1547  BMVert *v_origin = g->vert_span.min;
1548  /* Index of BMVert for the edge group connection with `v_origin`. */
1549  const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, false);
1550  // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1551 
1552  /* only for degenerate geometry */
1553  if (index_other != -1) {
1554 #ifdef USE_PARTIAL_CONNECT
1555  if ((use_partial_connect == false) ||
1557  temp_vert_pairs.remap, BM_elem_index_get(v_origin), index_other) == false))
1558 #endif
1559  {
1560  BMVert *v_end = vert_arr[index_other];
1561 
1562  edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1563 #ifdef USE_PARTIAL_CONNECT
1564  BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1565 #endif
1566  edge_net_new_index++;
1567  args.edge_arr_new_len++;
1568  }
1569  }
1570  }
1571 
1572  {
1573  BMVert *v_origin = g->vert_span.max;
1574  /* Index of BMVert for the edge group connection with `v_origin`. */
1575  const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, true);
1576  // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1577 
1578  /* only for degenerate geometry */
1579  if (index_other != -1) {
1580 #ifdef USE_PARTIAL_CONNECT
1581  if ((use_partial_connect == false) ||
1583  temp_vert_pairs.remap, BM_elem_index_get(v_origin), index_other) == false))
1584 #endif
1585  {
1586  BMVert *v_end = vert_arr[index_other];
1587  edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1588 #ifdef USE_PARTIAL_CONNECT
1589  BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1590 #endif
1591  edge_net_new_index++;
1592  args.edge_arr_new_len++;
1593  }
1594 
1595  /* tell the 'next' group it doesn't need to create its own back-link */
1596  uint g_index_other = verts_group_table[index_other];
1597  group_arr[g_index_other]->has_prev_edge = true;
1598  }
1599  }
1600  }
1601  BLI_assert(edge_net_new_len >= edge_net_new_index);
1602  edge_net_new_len = edge_net_new_index;
1603  }
1604 
1605  BLI_bvhtree_free(bvhtree);
1606 
1607  *r_edge_net_new = edge_net_new;
1608  *r_edge_net_new_len = edge_net_new_len;
1609  ok = true;
1610 
1611  for (uint i = 0; i < vert_arr_len; i++) {
1612  copy_v3_v3(vert_arr[i]->co, vert_coords_backup[i]);
1613  }
1614 
1615 finally:
1616 
1617 #ifdef USE_PARTIAL_CONNECT
1618  /* don't free 'vert_temp_pair_list', its part of the arena */
1619  if (use_partial_connect) {
1620 
1621  /* Sanity check: ensure we don't have connecting edges before splicing begins. */
1622 # ifdef DEBUG
1623  {
1624  struct TempVertPair *tvp = temp_vert_pairs.list;
1625  do {
1626  /* We must _never_ create connections here
1627  * (in case the islands can't have a connection at all). */
1628  BLI_assert(BM_edge_exists(tvp->v_orig, tvp->v_temp) == NULL);
1629  } while ((tvp = tvp->next));
1630  }
1631 # endif
1632 
1633  struct TempVertPair *tvp = temp_vert_pairs.list;
1634  do {
1635  /* its _very_ unlikely the edge exists,
1636  * however splicing may cause this. see: T48012 */
1637  if (!BM_edge_exists(tvp->v_orig, tvp->v_temp)) {
1638  BM_vert_splice(bm, tvp->v_orig, tvp->v_temp);
1639  }
1640  } while ((tvp = tvp->next));
1641 
1642  /* Remove edges which have become doubles since splicing vertices together,
1643  * its less trouble than detecting future-doubles on edge-creation. */
1644  for (uint i = edge_net_init_len; i < edge_net_new_len; i++) {
1645  while (BM_edge_find_double(edge_net_new[i])) {
1646  BM_edge_kill(bm, edge_net_new[i]);
1647  edge_net_new_len--;
1648  if (i == edge_net_new_len) {
1649  break;
1650  }
1651  edge_net_new[i] = edge_net_new[edge_net_new_len];
1652  }
1653  }
1654 
1655  *r_edge_net_new_len = edge_net_new_len;
1656  }
1657 #endif
1658 
1659  for (uint i = 0; i < edge_arr_len; i++) {
1661  BM_elem_flag_disable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
1662  BM_elem_flag_disable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
1663  }
1664 
1665 #undef VERT_IN_ARRAY
1666 #undef VERT_NOT_IN_STACK
1667 #undef EDGE_NOT_IN_STACK
1668 
1669  return ok;
1670 }
1671 
1672 #undef SORT_AXIS
1673 
typedef float(TangentPoint)[2]
CustomData interface, see also DNA_customdata_types.h.
bool CustomData_has_math(const struct CustomData *data)
void CustomData_bmesh_copy_data(const struct CustomData *source, struct CustomData *dest, void *src_block, void **dest_block)
void CustomData_bmesh_interp(struct CustomData *data, const void **src_blocks, const float *weights, const float *sub_weights, int count, void *dst_block)
Definition: customdata.cc:4088
#define BLI_array_alloca(arr, realsize)
Definition: BLI_alloca.h:22
A (mainly) macro array library.
#define BLI_array_append(arr, item)
Definition: BLI_array.h:98
#define BLI_array_declare(arr)
Definition: BLI_array.h:50
#define BLI_array_len(arr)
Definition: BLI_array.h:63
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_INLINE
MINLINE axis_t min_axis(axis_t a, axis_t b)
Definition: BLI_kdopbvh.c:207
#define BVH_RAYCAST_DIST_MAX
Definition: BLI_kdopbvh.h:89
void BLI_bvhtree_balance(BVHTree *tree)
Definition: BLI_kdopbvh.c:937
BVHTree * BLI_bvhtree_new(int maxsize, float epsilon, char tree_type, char axis)
Definition: BLI_kdopbvh.c:854
int BLI_bvhtree_ray_cast_ex(BVHTree *tree, const float co[3], const float dir[3], float radius, BVHTreeRayHit *hit, BVHTree_RayCastCallback callback, void *userdata, int flag)
Definition: BLI_kdopbvh.c:1897
void BLI_bvhtree_free(BVHTree *tree)
Definition: BLI_kdopbvh.c:926
void BLI_bvhtree_insert(BVHTree *tree, int index, const float co[3], int numpoints)
Definition: BLI_kdopbvh.c:979
#define BLI_ASSERT_UNIT_V2(v)
bool isect_ray_seg_v2(const float ray_origin[2], const float ray_direction[2], const float v0[2], const float v1[2], float *r_lambda, float *r_u)
Definition: math_geom.c:1982
float line_point_factor_v2(const float p[2], const float l1[2], const float l2[2])
Definition: math_geom.c:3274
void axis_dominant_v3_to_m3(float r_mat[3][3], const float normal[3])
Normal to x,y matrix.
Definition: math_geom.c:3527
void interp_weights_poly_v2(float w[], float v[][2], int n, const float co[2])
int isect_seg_seg_v2_point_ex(const float v0[2], const float v1[2], const float v2[2], const float v3[2], float endpoint_bias, float vi[2])
Definition: math_geom.c:1195
void mul_v2_m3v3(float r[2], const float M[3][3], const float a[3])
Definition: math_matrix.c:917
MINLINE float len_squared_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3(float r[3], const float a[3])
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE float dot_m3_v3_row_x(const float M[3][3], const float a[3]) ATTR_WARN_UNUSED_RESULT
float angle_signed_on_axis_v3v3v3_v3(const float v1[3], const float v2[3], const float v3[3], const float axis[3]) ATTR_WARN_UNUSED_RESULT
Definition: math_vector.c:521
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE float dot_m3_v3_row_y(const float M[3][3], const float a[3]) ATTR_WARN_UNUSED_RESULT
void copy_vn_i(int *array_tar, int size, int val)
Definition: math_vector.c:1223
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE float dot_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE float normalize_v2(float r[2])
void * BLI_memarena_alloc(struct MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
Definition: BLI_memarena.c:116
int BLI_sortutil_cmp_float_reverse(const void *a_, const void *b_)
Definition: sort_utils.c:38
unsigned int uint
Definition: BLI_sys_types.h:67
#define UNPACK2(a)
#define ARRAY_SET_ITEMS(...)
#define SWAP(type, a, b)
#define UNUSED(x)
#define UNPACK3(a)
#define UNLIKELY(x)
#define LIKELY(x)
#define STACK_POP(stack)
#define STACK_POP_PTR(stack)
#define STACK_PUSH(stack, val)
#define STACK_DECLARE(stack)
#define STACK_SIZE(stack)
#define STACK_PUSH_RET_PTR(stack)
#define STACK_INIT(stack, stack_num)
void swap(T &a, T &b)
Definition: Common.h:19
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
Read Guarded memory(de)allocation.
#define BM_DISK_EDGE_NEXT(e, v)
Definition: bmesh_class.h:625
#define BM_FACE_FIRST_LOOP(p)
Definition: bmesh_class.h:622
@ BM_VERT
Definition: bmesh_class.h:383
@ BM_EDGE
Definition: bmesh_class.h:384
@ BM_ELEM_SELECT
Definition: bmesh_class.h:471
@ BM_ELEM_INTERNAL_TAG
Definition: bmesh_class.h:496
void bmesh_face_swap_data(BMFace *f_a, BMFace *f_b)
Definition: bmesh_core.c:2651
BMVert * BM_vert_create(BMesh *bm, const float co[3], const BMVert *v_example, const eBMCreateFlag create_flag)
Main function for creating a new vertex.
Definition: bmesh_core.c:41
BMFace * BM_face_create_verts(BMesh *bm, BMVert **vert_arr, const int len, const BMFace *f_example, const eBMCreateFlag create_flag, const bool create_edges)
Definition: bmesh_core.c:464
bool BM_vert_splice(BMesh *bm, BMVert *v_dst, BMVert *v_src)
Splice Vert.
Definition: bmesh_core.c:2046
void BM_vert_kill(BMesh *bm, BMVert *v)
Definition: bmesh_core.c:939
void BM_face_kill(BMesh *bm, BMFace *f)
Definition: bmesh_core.c:828
void BM_vert_separate_tested_edges(BMesh *UNUSED(bm), BMVert *v_dst, BMVert *v_src, bool(*testfn)(BMEdge *, void *arg), void *arg)
Definition: bmesh_core.c:2306
void BM_edge_kill(BMesh *bm, BMEdge *e)
Definition: bmesh_core.c:927
BMEdge * BM_edge_create(BMesh *bm, BMVert *v1, BMVert *v2, const BMEdge *e_example, const eBMCreateFlag create_flag)
Main function for creating a new edge.
Definition: bmesh_core.c:123
@ BM_CREATE_NOP
Definition: bmesh_core.h:12
#define BM_elem_index_get(ele)
Definition: bmesh_inline.h:110
#define BM_elem_flag_disable(ele, hflag)
Definition: bmesh_inline.h:15
#define BM_elem_index_set(ele, index)
Definition: bmesh_inline.h:111
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:12
#define BM_elem_flag_enable(ele, hflag)
Definition: bmesh_inline.h:14
#define BM_ITER_ELEM(ele, iter, data, itype)
@ BM_VERTS_OF_EDGE
@ BM_LOOPS_OF_VERT
ATTR_WARN_UNUSED_RESULT BMesh * bm
static bool bm_face_split_edgenet_find_loop_pair(BMVert *v_init, const float face_normal[3], const float face_normal_matrix[3][3], BMEdge *e_pair[2])
static void bvhtree_test_edges_isect_2d_vert_cb(void *user_data, int index, const BVHTreeRay *UNUSED(ray), BVHTreeRayHit *hit)
static BMLoop * bm_edge_flagged_radial_first(BMEdge *e)
#define EDGE_NET
static BMEdge * test_edges_isect_2d_vert(const struct EdgeGroup_FindConnection_Args *args, BMVert *v_origin, BMVert *v_other)
static bool bm_face_split_edgenet_find_loop(BMVert *v_init, const float face_normal[3], const float face_normal_matrix[3][3], struct VertOrder *edge_order, const uint edge_order_len, BMVert **r_face_verts, int *r_face_verts_len)
static bool bm_face_split_edgenet_find_loop_walk(BMVert *v_init, const float face_normal[3], struct VertOrder *edge_order, const uint edge_order_len, BMEdge *e_pair[2])
static bool bm_vert_partial_connect_check_overlap(const int *remap, const int v_a_index, const int v_b_index)
#define VERT_NOT_IN_STACK
static BMEdge * test_edges_isect_2d_ray(const struct EdgeGroup_FindConnection_Args *args, BMVert *v_origin, const float dir[3])
bool BM_face_split_edgenet_connect_islands(BMesh *bm, BMFace *f, BMEdge **edge_net_init, const uint edge_net_init_len, bool use_partial_connect, MemArena *mem_arena, BMEdge ***r_edge_net_new, uint *r_edge_net_new_len)
static void bvhtree_test_edges_isect_2d_ray_cb(void *user_data, int index, const BVHTreeRay *ray, BVHTreeRayHit *hit)
static BMVert * bm_face_split_edgenet_partial_connect(BMesh *bm, BMVert *v_delimit, BMFace *f)
#define VERT_IS_VALID
#define SORT_AXIS
static int bm_face_split_edgenet_find_connection(const struct EdgeGroup_FindConnection_Args *args, BMVert *v_origin, bool direction_sign)
#define EDGE_NOT_IN_STACK
#define VERT_IN_QUEUE
#define FOREACH_VERT_EDGE(v_, e_, body_)
bool BM_face_split_edgenet(BMesh *bm, BMFace *f, BMEdge **edge_net, const int edge_net_len, BMFace ***r_face_arr, int *r_face_arr_len)
#define VERT_VISIT
#define FACE_NET
static void normalize_v2_m3_v3v3(float out[2], const float axis_mat[3][3], const float v1[3], const float v2[3])
static bool bm_face_split_edgenet_find_loop_pair_exists(BMVert *v_init)
static bool test_tagged_and_notface(BMEdge *e, void *fptr)
#define VERT_IN_ARRAY
BLI_INLINE bool edge_isect_verts_point_2d(const BMEdge *e, const BMVert *v_a, const BMVert *v_b, float r_isect[2])
static uint bm_edge_flagged_radial_count(BMEdge *e)
static int group_min_cmp_fn(const void *p1, const void *p2)
BLI_INLINE int axis_pt_cmp(const float pt_a[2], const float pt_b[2])
#define BM_ELEM_API_FLAG_DISABLE(element, f)
Definition: bmesh_private.h:71
#define BM_ELEM_API_FLAG_TEST(element, f)
Definition: bmesh_private.h:76
#define BM_ELEM_API_FLAG_ENABLE(element, f)
Definition: bmesh_private.h:66
BMEdge * BM_edge_exists(BMVert *v_a, BMVert *v_b)
Definition: bmesh_query.c:1553
BMEdge * BM_edge_find_double(BMEdge *e)
Definition: bmesh_query.c:1581
bool BM_edge_in_face(const BMEdge *e, const BMFace *f)
Definition: bmesh_query.c:420
BLI_INLINE BMVert * BM_edge_other_vert(BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition: btQuadWord.h:119
void * user_data
int len
Definition: draw_manager.c:108
int count
static MemArena * mem_arena
Definition: makesdna.c:58
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
static ulong * next
static const pxr::TfToken out("out", pxr::TfToken::Immortal)
static const pxr::TfToken g("g", pxr::TfToken::Immortal)
BMVert * v1
Definition: bmesh_class.h:122
BMVert * v2
Definition: bmesh_class.h:122
int len
Definition: bmesh_class.h:267
BMHeader head
Definition: bmesh_class.h:255
float no[3]
Definition: bmesh_class.h:271
char htype
Definition: bmesh_class.h:64
char hflag
Definition: bmesh_class.h:66
void * data
Definition: bmesh_class.h:51
BMHeader head
Definition: bmesh_class.h:145
struct BMVert * v
Definition: bmesh_class.h:153
struct BMEdge * e
Definition: bmesh_class.h:164
struct BMLoop * radial_next
Definition: bmesh_class.h:204
struct BMFace * f
Definition: bmesh_class.h:171
struct BMLoop * next
Definition: bmesh_class.h:233
float co[3]
Definition: bmesh_class.h:87
struct BMEdge * e
Definition: bmesh_class.h:97
BMHeader head
Definition: bmesh_class.h:85
int totfacesel
Definition: bmesh_class.h:298
char elem_index_dirty
Definition: bmesh_class.h:305
CustomData ldata
Definition: bmesh_class.h:337
float direction[3]
Definition: BLI_kdopbvh.h:56
struct EdgeGroupIsland::@162 vert_span
void * link
Definition: BLI_linklist.h:24
struct LinkNode * next
Definition: BLI_linklist.h:23