Blender  V3.3
bmesh_intersect_edges.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2019 Blender Foundation. All rights reserved. */
3 
8 #include "MEM_guardedalloc.h"
9 
10 #include "BLI_math.h"
11 #include "BLI_sort.h"
12 #include "BLI_stack.h"
13 
14 #include "BKE_bvhutils.h"
15 
16 #include "atomic_ops.h"
17 
18 #include "bmesh.h"
19 
20 #include "bmesh_intersect_edges.h" /* own include */
21 
22 //#define INTERSECT_EDGES_DEBUG
23 
24 #define KDOP_TREE_TYPE 4
25 #define KDOP_AXIS_LEN 14
26 #define BLI_STACK_PAIR_LEN (2 * KDOP_TREE_TYPE)
27 
28 /* -------------------------------------------------------------------- */
34 /* Callbacks for `BM_vert_pair_shared_face_cb` */
35 
39 
46 };
47 
49  BMLoop *l_a,
50  BMLoop *l_b,
51  void *userdata)
52 {
53  struct EDBMSplitBestFaceData *data = userdata;
54  float no[3];
55  copy_v3_v3(no, f->no);
56 
57  float min = dot_v3v3(l_a->v->co, no);
58  float max = dot_v3v3(l_b->v->co, no);
59  if (min > max) {
60  SWAP(float, min, max);
61  }
62 
63  BMEdge **e_iter = &data->edgenet[0];
64  BMEdge *e_next = data->edgenet[1];
65  BMVert *v_test = ELEM((*e_iter)->v1, e_next->v1, e_next->v2) ? (*e_iter)->v2 : (*e_iter)->v1;
66 
67  int verts_len = data->edgenet_len - 1;
68  for (int i = verts_len; i--; e_iter++) {
69  v_test = BM_edge_other_vert(*e_iter, v_test);
70  BLI_assert(v_test != NULL);
71  if (!BM_face_point_inside_test(f, v_test->co)) {
72  return false;
73  }
74  float dot = dot_v3v3(v_test->co, no);
75  if (dot < min) {
76  min = dot;
77  }
78  if (dot > max) {
79  max = dot;
80  }
81  }
82 
83  const float test_edgenet_range_on_face_normal = max - min;
84  if (test_edgenet_range_on_face_normal < data->best_edgenet_range_on_face_normal) {
85  data->best_edgenet_range_on_face_normal = test_edgenet_range_on_face_normal;
86  data->r_best_face = f;
87  }
88 
89  return false;
90 }
91 
92 /* find the best splittable face between the two vertices. */
94  BMLoop *l_a,
95  BMLoop *l_b,
96  void *userdata)
97 {
98  float(*data)[3] = userdata;
99  float *v_a_co = data[0];
100  float *v_a_b_dir = data[1];
101  const float range_min = -FLT_EPSILON;
102  const float range_max = 1.0f + FLT_EPSILON;
103 
104  float co[3];
105  float dir[3];
106  float lambda_b;
107 
108  copy_v3_v3(co, l_a->prev->v->co);
109  sub_v3_v3v3(dir, l_a->next->v->co, co);
110  if (isect_ray_ray_v3(v_a_co, v_a_b_dir, co, dir, NULL, &lambda_b)) {
111  if (IN_RANGE(lambda_b, range_min, range_max)) {
112  return true;
113  }
114  copy_v3_v3(co, l_b->prev->v->co);
115  sub_v3_v3v3(dir, l_b->next->v->co, co);
116  if (isect_ray_ray_v3(v_a_co, v_a_b_dir, co, dir, NULL, &lambda_b)) {
117  return IN_RANGE(lambda_b, range_min, range_max);
118  }
119  }
120  return false;
121 }
122 
124  BMVert *v_a, BMVert *v_b, BMEdge **edgenet, const int edgenet_len, const float epsilon)
125 {
127 
128  BLI_assert(v_a != v_b);
129 
130  BMLoop *dummy;
131  if (edgenet_len == 1) {
132  float data[2][3];
133  copy_v3_v3(data[0], v_b->co);
134  sub_v3_v3v3(data[1], v_a->co, data[0]);
136  v_a, v_b, false, bm_vert_pair_share_splittable_face_cb, &data, &dummy, &dummy);
138  }
139  else {
140  struct EDBMSplitBestFaceData data = {
141  .edgenet = edgenet,
142  .edgenet_len = edgenet_len,
143  .best_edgenet_range_on_face_normal = FLT_MAX,
144  .r_best_face = NULL,
145  };
147  v_a, v_b, true, bm_vert_pair_share_best_splittable_face_cb, &data, &dummy, &dummy);
148 
149  if (data.r_best_face) {
150  /* Check if the edgenet's range is smaller than the face's range. */
151  float no[3], min = FLT_MAX, max = -FLT_MAX;
152  copy_v3_v3(no, data.r_best_face->no);
153  BMVert *v_test;
154  BMIter f_iter;
155  BM_ITER_ELEM (v_test, &f_iter, data.r_best_face, BM_VERTS_OF_FACE) {
156  float dot = dot_v3v3(v_test->co, no);
157  if (dot < min) {
158  min = dot;
159  }
160  if (dot > max) {
161  max = dot;
162  }
163  }
164  float face_range_on_normal = max - min + 2 * epsilon;
165  if (face_range_on_normal < data.best_edgenet_range_on_face_normal) {
166  data.r_best_face = NULL;
167  }
168  }
169  r_best_face = data.r_best_face;
170  }
171 
172  return r_best_face;
173 }
174 
177 /* -------------------------------------------------------------------- */
184  union {
187  struct {
189  float lambda;
190  };
191  };
192 };
193 
194 /* -------------------------------------------------------------------- */
195 /* Overlap Callbacks */
196 
201  float dist_sq;
202  float dist_sq_sq;
203 };
204 
205 /* Utils */
206 
207 static void bm_vert_pair_elem_setup_ex(BMVert *v, struct EDBMSplitElem *r_pair_elem)
208 {
209  r_pair_elem->vert = v;
210 }
211 
213  float lambda,
214  int *r_data_cut_edges_len,
215  struct EDBMSplitElem *r_pair_elem)
216 {
217  r_pair_elem->edge = e;
218  r_pair_elem->lambda = lambda;
219 
220  /* Even though we have multiple atomic operations, this is fine here, since
221  * there is no dependency on order.
222  * The `e->head.index` check + atomic increment will ever be true once, as
223  * expected. We don't care which instance of the code actually ends up
224  * incrementing `r_data_cut_edge_len`, so there is no race condition here. */
225  if (atomic_fetch_and_add_int32(&e->head.index, 1) == 0) {
226  atomic_fetch_and_add_int32(r_data_cut_edges_len, 1);
227  }
228 }
229 
230 /* Util for Vert x Edge and Edge x Edge callbacks */
232  BMEdge *e,
233  const float co[3],
234  const float dir[3],
235  float lambda,
236  float data_dist_sq,
237  int *data_cut_edges_len,
238  struct EDBMSplitElem r_pair[2])
239 {
240  BMVert *e_v;
241  float dist_sq_vert_factor;
242 
243  if (!IN_RANGE_INCL(lambda, 0.0f, 1.0f)) {
244  /* Vert x Vert is already handled elsewhere. */
245  return false;
246  }
247 
248  if (lambda < 0.5f) {
249  e_v = e->v1;
250  dist_sq_vert_factor = lambda;
251  }
252  else {
253  e_v = e->v2;
254  dist_sq_vert_factor = 1.0f - lambda;
255  }
256 
257  if (v != e_v) {
258  float dist_sq_vert = square_f(dist_sq_vert_factor) * len_squared_v3(dir);
259  if (dist_sq_vert < data_dist_sq) {
260  /* Vert x Vert is already handled elsewhere. */
261  return false;
262  }
263 
264  float closest[3];
265  madd_v3_v3v3fl(closest, co, dir, lambda);
266 
267  float dist_sq = len_squared_v3v3(v->co, closest);
268  if (dist_sq < data_dist_sq) {
269  bm_edge_pair_elem_setup(e, lambda, data_cut_edges_len, &r_pair[0]);
270  bm_vert_pair_elem_setup_ex(v, &r_pair[1]);
271  return true;
272  }
273  }
274 
275  return false;
276 }
277 
278 /* Vertex x Vertex Callback */
279 
280 static bool bm_vertxvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
281 {
282  struct EDBMSplitData *data = userdata;
283  BMVert *v_a = BM_vert_at_index(data->bm, index_a);
284  BMVert *v_b = BM_vert_at_index(data->bm, index_b);
285 
286  struct EDBMSplitElem *pair = BLI_stack_push_r(data->pair_stack[thread]);
287 
288  bm_vert_pair_elem_setup_ex(v_a, &pair[0]);
289  bm_vert_pair_elem_setup_ex(v_b, &pair[1]);
290 
291  return true;
292 }
293 
294 static bool bm_vertxvert_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
295 {
296  if (index_a < index_b) {
297  return bm_vertxvert_isect_cb(userdata, index_a, index_b, thread);
298  }
299  return false;
300 }
301 
302 /* Vertex x Edge and Edge x Vertex Callbacks */
303 
304 static bool bm_edgexvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
305 {
306  struct EDBMSplitData *data = userdata;
307  BMEdge *e = BM_edge_at_index(data->bm, index_a);
308  BMVert *v = BM_vert_at_index(data->bm, index_b);
309 
310  float co[3], dir[3], lambda;
311  copy_v3_v3(co, e->v1->co);
312  sub_v3_v3v3(dir, e->v2->co, co);
313  lambda = ray_point_factor_v3_ex(v->co, co, dir, 0.0f, -1.0f);
314 
315  struct EDBMSplitElem pair_tmp[2];
317  v, e, co, dir, lambda, data->dist_sq, &data->cut_edges_len, pair_tmp)) {
318  struct EDBMSplitElem *pair = BLI_stack_push_r(data->pair_stack[thread]);
319  pair[0] = pair_tmp[0];
320  pair[1] = pair_tmp[1];
321  }
322 
323  /* Always return false with edges. */
324  return false;
325 }
326 
327 /* Edge x Edge Callbacks */
328 
330  BMEdge *e_a,
331  BMEdge *e_b,
332  const float co_a[3],
333  const float dir_a[3],
334  const float co_b[3],
335  const float dir_b[3],
336  float lambda_a,
337  float lambda_b,
338  struct EDBMSplitElem r_pair[2])
339 {
340  float dist_sq_va_factor, dist_sq_vb_factor;
341  BMVert *e_a_v, *e_b_v;
342  if (lambda_a < 0.5f) {
343  e_a_v = e_a->v1;
344  dist_sq_va_factor = lambda_a;
345  }
346  else {
347  e_a_v = e_a->v2;
348  dist_sq_va_factor = 1.0f - lambda_a;
349  }
350 
351  if (lambda_b < 0.5f) {
352  e_b_v = e_b->v1;
353  dist_sq_vb_factor = lambda_b;
354  }
355  else {
356  e_b_v = e_b->v2;
357  dist_sq_vb_factor = 1.0f - lambda_b;
358  }
359 
360  if (e_a_v != e_b_v) {
361  if (!IN_RANGE_INCL(lambda_a, 0.0f, 1.0f) || !IN_RANGE_INCL(lambda_b, 0.0f, 1.0f)) {
362  /* Vert x Edge is already handled elsewhere. */
363  return false;
364  }
365 
366  float dist_sq_va = square_f(dist_sq_va_factor) * len_squared_v3(dir_a);
367  float dist_sq_vb = square_f(dist_sq_vb_factor) * len_squared_v3(dir_b);
368 
369  if (dist_sq_va < data->dist_sq || dist_sq_vb < data->dist_sq) {
370  /* Vert x Edge is already handled elsewhere. */
371  return false;
372  }
373 
374  float near_a[3], near_b[3];
375  madd_v3_v3v3fl(near_a, co_a, dir_a, lambda_a);
376  madd_v3_v3v3fl(near_b, co_b, dir_b, lambda_b);
377 
378  float dist_sq = len_squared_v3v3(near_a, near_b);
379  if (dist_sq < data->dist_sq) {
380  bm_edge_pair_elem_setup(e_a, lambda_a, &data->cut_edges_len, &r_pair[0]);
381  bm_edge_pair_elem_setup(e_b, lambda_b, &data->cut_edges_len, &r_pair[1]);
382  return true;
383  }
384  }
385  return false;
386 }
387 
388 static bool bm_edgexedge_isect_cb(void *userdata, int index_a, int index_b, int thread)
389 {
390  struct EDBMSplitData *data = userdata;
391  BMEdge *e_a = BM_edge_at_index(data->bm, index_a);
392  BMEdge *e_b = BM_edge_at_index(data->bm, index_b);
393 
394  if (BM_edge_share_vert_check(e_a, e_b)) {
395  /* The other vertices may intersect but Vert x Edge is already handled elsewhere. */
396  return false;
397  }
398 
399  float co_a[3], dir_a[3], co_b[3], dir_b[3];
400  copy_v3_v3(co_a, e_a->v1->co);
401  sub_v3_v3v3(dir_a, e_a->v2->co, co_a);
402 
403  copy_v3_v3(co_b, e_b->v1->co);
404  sub_v3_v3v3(dir_b, e_b->v2->co, co_b);
405 
406  float lambda_a, lambda_b;
407  /* Using with dist^4 as `epsilon` is not the best solution, but it fits in most cases. */
408  if (isect_ray_ray_epsilon_v3(co_a, dir_a, co_b, dir_b, data->dist_sq_sq, &lambda_a, &lambda_b)) {
409  struct EDBMSplitElem pair_tmp[2];
411  data, e_a, e_b, co_a, dir_a, co_b, dir_b, lambda_a, lambda_b, pair_tmp)) {
412  struct EDBMSplitElem *pair = BLI_stack_push_r(data->pair_stack[thread]);
413  pair[0] = pair_tmp[0];
414  pair[1] = pair_tmp[1];
415  }
416  }
417 
418  /* Edge x Edge returns always false. */
419  return false;
420 }
421 
422 static bool bm_edgexedge_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
423 {
424  if (index_a < index_b) {
425  return bm_edgexedge_isect_cb(userdata, index_a, index_b, thread);
426  }
427  return false;
428 }
429 
430 /* -------------------------------------------------------------------- */
431 /* BVHTree Overlap Function */
432 
433 static void bm_elemxelem_bvhtree_overlap(const BVHTree *tree1,
434  const BVHTree *tree2,
436  struct EDBMSplitData *data,
437  BLI_Stack **pair_stack)
438 {
439  int parallel_tasks_num = BLI_bvhtree_overlap_thread_num(tree1);
440  for (int i = 0; i < parallel_tasks_num; i++) {
441  if (pair_stack[i] == NULL) {
442  pair_stack[i] = BLI_stack_new(sizeof(const struct EDBMSplitElem[2]), __func__);
443  }
444  }
445  data->pair_stack = pair_stack;
447 }
448 
449 /* -------------------------------------------------------------------- */
450 /* Callbacks for `BLI_qsort_r` */
451 
452 static int sort_cmp_by_lambda_cb(const void *index1_v, const void *index2_v, void *keys_v)
453 {
454  const struct EDBMSplitElem *pair_flat = keys_v;
455  const int index1 = *(int *)index1_v;
456  const int index2 = *(int *)index2_v;
457 
458  if (pair_flat[index1].lambda > pair_flat[index2].lambda) {
459  return 1;
460  }
461  return -1;
462 }
463 
464 /* -------------------------------------------------------------------- */
465 /* Main API */
466 
467 #define INTERSECT_EDGES
468 
470  BMesh *bm, const char hflag, const float dist, const bool split_faces, GHash *r_targetmap)
471 {
472  bool ok = false;
473 
474  BMIter iter;
475  BMVert *v;
476  BMEdge *e;
477  int i;
478 
479  /* Store all intersections in this array. */
480  struct EDBMSplitElem(*pair_iter)[2], (*pair_array)[2] = NULL;
481  int pair_len = 0;
482 
483  BLI_Stack *pair_stack[BLI_STACK_PAIR_LEN] = {NULL};
484  BLI_Stack **pair_stack_vertxvert = pair_stack;
485  BLI_Stack **pair_stack_edgexelem = &pair_stack[KDOP_TREE_TYPE];
486 
487  const float dist_sq = square_f(dist);
488  const float dist_half = dist / 2;
489 
490  struct EDBMSplitData data = {
491  .bm = bm,
492  .pair_stack = pair_stack,
493  .cut_edges_len = 0,
494  .dist_sq = dist_sq,
495  .dist_sq_sq = square_f(dist_sq),
496  };
497 
499 
500  /* tag and count the verts to be tested. */
501  int verts_act_len = 0, verts_remain_len = 0;
502  BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
503  if (BM_elem_flag_test(v, hflag)) {
505  verts_act_len++;
506  }
507  else {
510  verts_remain_len++;
511  }
512  }
513 
514  /* The index will indicate which cut in pair_array this vertex belongs to. */
515  BM_elem_index_set(v, -1);
516  }
518 
519  /* Start the creation of BVHTrees. */
520  BVHTree *tree_verts_act = NULL, *tree_verts_remain = NULL;
521  if (verts_act_len) {
522  tree_verts_act = BLI_bvhtree_new(verts_act_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
523  }
524  if (verts_remain_len) {
525  tree_verts_remain = BLI_bvhtree_new(
526  verts_remain_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
527  }
528 
529  if (tree_verts_act || tree_verts_remain) {
530  BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
532  if (tree_verts_act) {
533  BLI_bvhtree_insert(tree_verts_act, i, v->co, 1);
534  }
535  }
536  else if (tree_verts_remain && !BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
537  BLI_bvhtree_insert(tree_verts_remain, i, v->co, 1);
538  }
539  }
540 
541  if (tree_verts_act) {
542  BLI_bvhtree_balance(tree_verts_act);
543  /* First pair search. */
545  tree_verts_act, tree_verts_act, bm_vertxvert_self_isect_cb, &data, pair_stack_vertxvert);
546  }
547 
548  if (tree_verts_remain) {
549  BLI_bvhtree_balance(tree_verts_remain);
550  }
551 
552  if (tree_verts_act && tree_verts_remain) {
554  tree_verts_remain, tree_verts_act, bm_vertxvert_isect_cb, &data, pair_stack_vertxvert);
555  }
556  }
557 
558  for (i = KDOP_TREE_TYPE; i--;) {
559  if (pair_stack_vertxvert[i]) {
560  pair_len += BLI_stack_count(pair_stack_vertxvert[i]);
561  }
562  }
563 
564 #ifdef INTERSECT_EDGES
565  uint vertxvert_pair_len = pair_len;
566 
567 # define EDGE_ACT_TO_TEST 1
568 # define EDGE_REMAIN_TO_TEST 2
569  /* Tag and count the edges. */
570  int edges_act_len = 0, edges_remain_len = 0;
571  BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
573  (len_squared_v3v3(e->v1->co, e->v2->co) < dist_sq)) {
574  /* Don't test hidden edges or smaller than the minimum distance.
575  * These have already been handled in the vertices overlap. */
576  BM_elem_index_set(e, 0);
577  if (split_faces) {
578  /* Tag to be ignored. */
580  }
581  continue;
582  }
583 
586  edges_act_len++;
587  }
588  else {
590  edges_remain_len++;
591  if (split_faces) {
592  /* Tag to be ignored. */
594  }
595  }
596  }
597 
598  BVHTree *tree_edges_act = NULL, *tree_edges_remain = NULL;
599  if (edges_act_len) {
600  tree_edges_act = BLI_bvhtree_new(edges_act_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
601  }
602 
603  if (edges_remain_len && (tree_edges_act || tree_verts_act)) {
604  tree_edges_remain = BLI_bvhtree_new(
605  edges_remain_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
606  }
607 
608  if (tree_edges_act || tree_edges_remain) {
609  BM_ITER_MESH_INDEX (e, &iter, bm, BM_EDGES_OF_MESH, i) {
610  int edge_test = BM_elem_index_get(e);
611  float co[2][3];
612  if (edge_test == EDGE_ACT_TO_TEST) {
613  BLI_assert(tree_edges_act);
614  e->head.index = 0;
615  copy_v3_v3(co[0], e->v1->co);
616  copy_v3_v3(co[1], e->v2->co);
617  BLI_bvhtree_insert(tree_edges_act, i, co[0], 2);
618  }
619  else if (edge_test == EDGE_REMAIN_TO_TEST) {
620  BLI_assert(tree_edges_remain);
621  e->head.index = 0;
622  copy_v3_v3(co[0], e->v1->co);
623  copy_v3_v3(co[1], e->v2->co);
624  BLI_bvhtree_insert(tree_edges_remain, i, co[0], 2);
625  }
626 # ifdef INTERSECT_EDGES_DEBUG
627  else {
628  e->head.index = 0;
629  }
630 # endif
631  /* Tag used when converting pairs to vert x vert. */
633  }
634 # undef EDGE_ACT_TO_TEST
635 # undef EDGE_REMAIN_TO_TEST
636 
637  /* Use `e->head.index` to count intersections. */
639 
640  if (tree_edges_act) {
641  BLI_bvhtree_balance(tree_edges_act);
642  }
643 
644  if (tree_edges_remain) {
645  BLI_bvhtree_balance(tree_edges_remain);
646  }
647 
648  int edgexedge_pair_len = 0;
649  if (tree_edges_act) {
650  /* Edge x Edge */
652  tree_edges_act, tree_edges_act, bm_edgexedge_self_isect_cb, &data, pair_stack_edgexelem);
653 
654  if (tree_edges_remain) {
656  tree_edges_remain, tree_edges_act, bm_edgexedge_isect_cb, &data, pair_stack_edgexelem);
657  }
658 
659  for (i = KDOP_TREE_TYPE; i--;) {
660  if (pair_stack_edgexelem[i]) {
661  edgexedge_pair_len += BLI_stack_count(pair_stack_edgexelem[i]);
662  }
663  }
664 
665  if (tree_verts_act) {
666  /* Edge v Vert */
668  tree_edges_act, tree_verts_act, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
669  }
670 
671  if (tree_verts_remain) {
672  /* Edge v Vert */
674  tree_edges_act, tree_verts_remain, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
675  }
676 
677  BLI_bvhtree_free(tree_edges_act);
678  }
679 
680  if (tree_verts_act && tree_edges_remain) {
681  /* Edge v Vert */
683  tree_edges_remain, tree_verts_act, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
684  }
685 
686  BLI_bvhtree_free(tree_edges_remain);
687 
688  int edgexelem_pair_len = 0;
689  for (i = KDOP_TREE_TYPE; i--;) {
690  if (pair_stack_edgexelem[i]) {
691  edgexelem_pair_len += BLI_stack_count(pair_stack_edgexelem[i]);
692  }
693  }
694 
695  pair_len += edgexelem_pair_len;
696  int edgexvert_pair_len = edgexelem_pair_len - edgexedge_pair_len;
697 
698  if (edgexelem_pair_len) {
699  pair_array = MEM_mallocN(sizeof(*pair_array) * pair_len, __func__);
700 
701  pair_iter = pair_array;
702  for (i = 0; i < BLI_STACK_PAIR_LEN; i++) {
703  if (pair_stack[i]) {
705  BLI_stack_pop_n_reverse(pair_stack[i], pair_iter, count);
706  pair_iter += count;
707  }
708  }
709 
710  /* Map intersections per edge. */
711  union {
712  struct {
713  int cuts_len;
714  int cuts_index[];
715  };
716  int as_int[0];
717  } * e_map_iter, *e_map;
718 
719 # ifdef INTERSECT_EDGES_DEBUG
720  int cut_edges_len = 0;
721  BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
722  if (e->head.index != 0) {
723  cut_edges_len++;
724  }
725  }
726  BLI_assert(cut_edges_len == data.cut_edges_len);
727 # endif
728 
729  size_t e_map_size = (data.cut_edges_len * sizeof(*e_map)) +
730  (((size_t)2 * edgexedge_pair_len + edgexvert_pair_len) *
731  sizeof(*(e_map->cuts_index)));
732 
733  e_map = MEM_mallocN(e_map_size, __func__);
734  int map_len = 0;
735 
736  /* Convert every pair to Vert x Vert. */
737 
738  /* The list of pairs starts with [vert x vert] followed by [edge x edge]
739  * and finally [edge x vert].
740  * Ignore the [vert x vert] pairs */
741  struct EDBMSplitElem *pair_flat, *pair_flat_iter;
742  pair_flat = (struct EDBMSplitElem *)&pair_array[vertxvert_pair_len];
743  pair_flat_iter = &pair_flat[0];
744  uint pair_flat_len = 2 * edgexelem_pair_len;
745  for (i = 0; i < pair_flat_len; i++, pair_flat_iter++) {
746  if (pair_flat_iter->elem->head.htype != BM_EDGE) {
747  continue;
748  }
749 
750  e = pair_flat_iter->edge;
753  int e_cuts_len = e->head.index;
754 
755  e_map_iter = (void *)&e_map->as_int[map_len];
756  e_map_iter->cuts_len = e_cuts_len;
757  e_map_iter->cuts_index[0] = i;
758 
759  /* Use `e->head.index` to indicate which slot to fill with the `cut` index. */
760  e->head.index = map_len + 1;
761  map_len += 1 + e_cuts_len;
762  }
763  else {
764  e_map->as_int[++e->head.index] = i;
765  }
766  }
767 
768  /* Split Edges A to set all Vert x Edge. */
769  for (i = 0; i < map_len;
770  e_map_iter = (void *)&e_map->as_int[i], i += 1 + e_map_iter->cuts_len) {
771 
772  /* sort by lambda. */
773  BLI_qsort_r(e_map_iter->cuts_index,
774  e_map_iter->cuts_len,
775  sizeof(*(e_map->cuts_index)),
777  pair_flat);
778 
779  float lambda, lambda_prev = 0.0f;
780  for (int j = 0; j < e_map_iter->cuts_len; j++) {
781  uint index = e_map_iter->cuts_index[j];
782 
783  struct EDBMSplitElem *pair_elem = &pair_flat[index];
784  lambda = (pair_elem->lambda - lambda_prev) / (1.0f - lambda_prev);
785  lambda_prev = pair_elem->lambda;
786  e = pair_elem->edge;
787  if (split_faces) {
788  /* Tagged edges are ignored when split faces.
789  * Un-tag these. */
791  }
792 
793  BMVert *v_new = BM_edge_split(bm, e, e->v1, NULL, lambda);
794  pair_elem->vert = v_new;
795  }
796  }
797 
798  MEM_freeN(e_map);
799  }
800  }
801 #endif
802 
803  BLI_bvhtree_free(tree_verts_act);
804  BLI_bvhtree_free(tree_verts_remain);
805 
806  if (r_targetmap) {
807  if (pair_len && pair_array == NULL) {
808  pair_array = MEM_mallocN(sizeof(*pair_array) * pair_len, __func__);
809  pair_iter = pair_array;
810  for (i = 0; i < BLI_STACK_PAIR_LEN; i++) {
811  if (pair_stack[i]) {
812  uint count = (uint)BLI_stack_count(pair_stack[i]);
813  BLI_stack_pop_n_reverse(pair_stack[i], pair_iter, count);
814  pair_iter += count;
815  }
816  }
817  }
818 
819  if (pair_array) {
820  BMVert *v_key, *v_val;
821  pair_iter = &pair_array[0];
822  for (i = 0; i < pair_len; i++, pair_iter++) {
823  BLI_assert((*pair_iter)[0].elem->head.htype == BM_VERT);
824  BLI_assert((*pair_iter)[1].elem->head.htype == BM_VERT);
825  BLI_assert((*pair_iter)[0].elem != (*pair_iter)[1].elem);
826  v_key = (*pair_iter)[0].vert;
827  v_val = (*pair_iter)[1].vert;
828  BLI_ghash_insert(r_targetmap, v_key, v_val);
829  }
830 
839  pair_iter = &pair_array[0];
840  for (i = 0; i < pair_len; i++, pair_iter++) {
841  v_key = (*pair_iter)[0].vert;
842  v_val = (*pair_iter)[1].vert;
843  BMVert *v_target;
844  while ((v_target = BLI_ghash_lookup(r_targetmap, v_val))) {
845  v_val = v_target;
846  }
847  if (v_val != (*pair_iter)[1].vert) {
848  BMVert **v_val_p = (BMVert **)BLI_ghash_lookup_p(r_targetmap, v_key);
849  *v_val_p = (*pair_iter)[1].vert = v_val;
850  }
851  if (split_faces) {
852  /* The vertex index indicates its position in the pair_array flat. */
853  BM_elem_index_set(v_key, i * 2);
854  BM_elem_index_set(v_val, i * 2 + 1);
855  }
856  }
857 
858  if (split_faces) {
859  BMEdge **edgenet = NULL;
860  int edgenet_alloc_len = 0;
861 
862  struct EDBMSplitElem *pair_flat = (struct EDBMSplitElem *)&pair_array[0];
863  BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
865  /* Edge out of context or already tested. */
866  continue;
867  }
868 
869  BMVert *va, *vb, *va_dest = NULL;
870  va = e->v1;
871  vb = e->v2;
872 
873  int v_cut = BM_elem_index_get(va);
874  int v_cut_other = BM_elem_index_get(vb);
875  if (v_cut == -1 && v_cut_other == -1) {
877  /* Edge out of context. */
879  }
880  continue;
881  }
882 
883  /* Tag to avoid testing again. */
885 
886  if (v_cut == -1) {
887  SWAP(BMVert *, va, vb);
888  v_cut = v_cut_other;
889  v_cut_other = -1;
890  }
891 
892  /* `v_cut` indicates the other vertex within the `pair_array`. */
893  v_cut += v_cut % 2 ? -1 : 1;
894  va_dest = pair_flat[v_cut].vert;
895 
896  if (BM_vert_pair_share_face_check(va, va_dest)) {
897  /* Vert par acts on the same face.
898  * Although there are cases like this where the face can be split,
899  * for efficiency it is better to ignore then. */
900  continue;
901  }
902 
903  BMFace *best_face = NULL;
904  BMVert *v_other_dest, *v_other = vb;
905  BMEdge *e_net = e;
906  int edgenet_len = 0;
907  while (true) {
908  if (v_cut_other != -1) {
909  v_cut_other += v_cut_other % 2 ? -1 : 1;
910  v_other_dest = pair_flat[v_cut_other].vert;
911 
912  if (BM_vert_pair_share_face_check(v_other, v_other_dest)) {
913  /* Vert par acts on the same face.
914  * Although there are cases like this where the face can be split,
915  * for efficiency and to avoid complications, it is better to ignore these cases.
916  */
917  break;
918  }
919  }
920  else {
921  v_other_dest = v_other;
922  }
923 
924  if (va_dest == v_other_dest) {
925  /* Edge/Edge-net to vertex - we can't split the face. */
926  break;
927  }
928  if (edgenet_len == 0 && BM_edge_exists(va_dest, v_other_dest)) {
929  /* Edge to edge - no need to detect face. */
930  break;
931  }
932 
933  if (edgenet_alloc_len == edgenet_len) {
934  edgenet_alloc_len = (edgenet_alloc_len + 1) * 2;
935  edgenet = MEM_reallocN(edgenet, (edgenet_alloc_len) * sizeof(*edgenet));
936  }
937  edgenet[edgenet_len++] = e_net;
938 
939  best_face = bm_vert_pair_best_face_get(
940  va_dest, v_other_dest, edgenet, edgenet_len, dist);
941 
942  if (best_face) {
943  if ((va_dest != va) && !BM_edge_exists(va_dest, va)) {
951  e_net = edgenet[0];
952  if (edgenet_len > 1) {
953  vb = BM_edge_other_vert(e_net, va);
954  }
955  else {
956  vb = v_other_dest;
957  }
958  edgenet[0] = BM_edge_create(bm, va_dest, vb, e_net, BM_CREATE_NOP);
959  }
960  if ((edgenet_len > 1) && (v_other_dest != v_other) &&
961  !BM_edge_exists(v_other_dest, v_other)) {
969  e_net = edgenet[edgenet_len - 1];
970  edgenet[edgenet_len - 1] = BM_edge_create(
971  bm, v_other_dest, BM_edge_other_vert(e_net, v_other), e_net, BM_CREATE_NOP);
972  }
973  break;
974  }
975 
976  BMEdge *e_test = e_net, *e_next = NULL;
977  while ((e_test = BM_DISK_EDGE_NEXT(e_test, v_other)) != (e_net)) {
978  if (!BM_edge_is_wire(e_test)) {
979  if (BM_elem_flag_test(e_test, BM_ELEM_TAG)) {
980  continue;
981  }
982  if (!BM_elem_flag_test(e_test->v1, BM_ELEM_TAG) &&
983  !BM_elem_flag_test(e_test->v2, BM_ELEM_TAG)) {
984  continue;
985  }
986  /* Avoids endless loop. */
988  }
989  else if (!BM_edge_is_wire(e_net)) {
990  continue;
991  }
992  e_next = e_test;
993  break;
994  }
995 
996  if (e_next == NULL) {
997  break;
998  }
999 
1000  e_net = e_next;
1001  v_other = BM_edge_other_vert(e_net, v_other);
1002  if (v_other == va) {
1003  /* Endless loop. */
1004  break;
1005  }
1006  v_cut_other = BM_elem_index_get(v_other);
1007  }
1008 
1009  if (best_face) {
1010  BMFace **face_arr = NULL;
1011  int face_arr_len = 0;
1012  BM_face_split_edgenet(bm, best_face, edgenet, edgenet_len, &face_arr, &face_arr_len);
1013  if (face_arr) {
1014  /* Update the new faces normal.
1015  * Normal is necessary to obtain the best face for edgenet */
1016  while (face_arr_len--) {
1017  BM_face_normal_update(face_arr[face_arr_len]);
1018  }
1019  MEM_freeN(face_arr);
1020  }
1021  }
1022  }
1023 
1024  if (edgenet) {
1025  MEM_freeN(edgenet);
1026  }
1027  }
1028  ok = true;
1029  }
1030  }
1031 
1032  for (i = BLI_STACK_PAIR_LEN; i--;) {
1033  if (pair_stack[i]) {
1034  BLI_stack_free(pair_stack[i]);
1035  }
1036  }
1037  if (pair_array) {
1038  MEM_freeN(pair_array);
1039  }
1040 
1041  return ok;
1042 }
1043 
typedef float(TangentPoint)[2]
#define BLI_assert(a)
Definition: BLI_assert.h:46
void * BLI_ghash_lookup(const GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition: BLI_ghash.c:734
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition: BLI_ghash.c:710
void ** BLI_ghash_lookup_p(GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition: BLI_ghash.c:748
@ BVH_OVERLAP_USE_THREADING
Definition: BLI_kdopbvh.h:77
void BLI_bvhtree_balance(BVHTree *tree)
Definition: BLI_kdopbvh.c:937
BVHTree * BLI_bvhtree_new(int maxsize, float epsilon, char tree_type, char axis)
Definition: BLI_kdopbvh.c:854
BVHTreeOverlap * BLI_bvhtree_overlap_ex(const BVHTree *tree1, const BVHTree *tree2, uint *r_overlap_num, BVHTree_OverlapCallback callback, void *userdata, uint max_interactions, int flag)
Definition: BLI_kdopbvh.c:1263
void BLI_bvhtree_free(BVHTree *tree)
Definition: BLI_kdopbvh.c:926
void BLI_bvhtree_insert(BVHTree *tree, int index, const float co[3], int numpoints)
Definition: BLI_kdopbvh.c:979
int BLI_bvhtree_overlap_thread_num(const BVHTree *tree)
Definition: BLI_kdopbvh.c:1234
bool(* BVHTree_OverlapCallback)(void *userdata, int index_a, int index_b, int thread)
Definition: BLI_kdopbvh.h:110
MINLINE float square_f(float a)
bool isect_ray_ray_v3(const float ray_origin_a[3], const float ray_direction_a[3], const float ray_origin_b[3], const float ray_direction_b[3], float *r_lambda_a, float *r_lambda_b)
Definition: math_geom.c:3032
float ray_point_factor_v3_ex(const float p[3], const float ray_origin[3], const float ray_direction[3], float epsilon, float fallback)
Definition: math_geom.c:3220
bool isect_ray_ray_epsilon_v3(const float ray_origin_a[3], const float ray_direction_a[3], const float ray_origin_b[3], const float ray_direction_b[3], float epsilon, float *r_lambda_a, float *r_lambda_b)
Definition: math_geom.c:2996
MINLINE float len_squared_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void madd_v3_v3v3fl(float r[3], const float a[3], const float b[3], float f)
void BLI_qsort_r(void *a, size_t n, size_t es, BLI_sort_cmp_t cmp, void *thunk)
Definition: sort.c:79
void BLI_stack_pop_n_reverse(BLI_Stack *stack, void *dst, unsigned int n) ATTR_NONNULL()
Definition: stack.c:154
void * BLI_stack_push_r(BLI_Stack *stack) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: stack.c:101
size_t BLI_stack_count(const BLI_Stack *stack) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: stack.c:225
void BLI_stack_free(BLI_Stack *stack) ATTR_NONNULL()
Definition: stack.c:94
#define BLI_stack_new(esize, descr)
unsigned int uint
Definition: BLI_sys_types.h:67
#define SWAP(type, a, b)
#define IN_RANGE(a, b, c)
#define UNUSED(x)
#define ELEM(...)
#define IN_RANGE_INCL(a, b, c)
Read Guarded memory(de)allocation.
#define MEM_reallocN(vmemh, len)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
#define BM_DISK_EDGE_NEXT(e, v)
Definition: bmesh_class.h:625
@ BM_VERT
Definition: bmesh_class.h:383
@ BM_EDGE
Definition: bmesh_class.h:384
@ BM_ELEM_HIDDEN
Definition: bmesh_class.h:472
@ BM_ELEM_TAG
Definition: bmesh_class.h:484
BMEdge * BM_edge_create(BMesh *bm, BMVert *v1, BMVert *v2, const BMEdge *e_example, const eBMCreateFlag create_flag)
Main function for creating a new edge.
Definition: bmesh_core.c:123
@ BM_CREATE_NOP
Definition: bmesh_core.h:12
#define BM_elem_index_get(ele)
Definition: bmesh_inline.h:110
#define BM_elem_flag_disable(ele, hflag)
Definition: bmesh_inline.h:15
#define BM_elem_index_set(ele, index)
Definition: bmesh_inline.h:111
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:12
#define BM_elem_flag_enable(ele, hflag)
Definition: bmesh_inline.h:14
#define KDOP_TREE_TYPE
bool BM_mesh_intersect_edges(BMesh *bm, const char hflag, const float dist, const bool split_faces, GHash *r_targetmap)
static bool bm_vertxvert_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
static void bm_vert_pair_elem_setup_ex(BMVert *v, struct EDBMSplitElem *r_pair_elem)
static bool bm_edgexvert_isect_impl(BMVert *v, BMEdge *e, const float co[3], const float dir[3], float lambda, float data_dist_sq, int *data_cut_edges_len, struct EDBMSplitElem r_pair[2])
static bool bm_vert_pair_share_best_splittable_face_cb(BMFace *f, BMLoop *l_a, BMLoop *l_b, void *userdata)
static bool bm_edgexedge_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
static bool bm_edgexedge_isect_impl(struct EDBMSplitData *data, BMEdge *e_a, BMEdge *e_b, const float co_a[3], const float dir_a[3], const float co_b[3], const float dir_b[3], float lambda_a, float lambda_b, struct EDBMSplitElem r_pair[2])
#define KDOP_AXIS_LEN
static bool bm_vert_pair_share_splittable_face_cb(BMFace *UNUSED(f), BMLoop *l_a, BMLoop *l_b, void *userdata)
static int sort_cmp_by_lambda_cb(const void *index1_v, const void *index2_v, void *keys_v)
#define EDGE_REMAIN_TO_TEST
#define BLI_STACK_PAIR_LEN
static bool bm_vertxvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
#define EDGE_ACT_TO_TEST
static BMFace * bm_vert_pair_best_face_get(BMVert *v_a, BMVert *v_b, BMEdge **edgenet, const int edgenet_len, const float epsilon)
static bool bm_edgexedge_isect_cb(void *userdata, int index_a, int index_b, int thread)
static bool bm_edgexvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
static void bm_elemxelem_bvhtree_overlap(const BVHTree *tree1, const BVHTree *tree2, BVHTree_OverlapCallback callback, struct EDBMSplitData *data, BLI_Stack **pair_stack)
static void bm_edge_pair_elem_setup(BMEdge *e, float lambda, int *r_data_cut_edges_len, struct EDBMSplitElem *r_pair_elem)
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
#define BM_ITER_MESH_INDEX(ele, iter, bm, itype, indexvar)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_VERTS_OF_FACE
ATTR_WARN_UNUSED_RESULT BMesh * bm
void BM_mesh_elem_table_ensure(BMesh *bm, const char htype)
Definition: bmesh_mesh.cc:558
BLI_INLINE BMEdge * BM_edge_at_index(BMesh *bm, const int index)
Definition: bmesh_mesh.h:109
BLI_INLINE BMVert * BM_vert_at_index(BMesh *bm, const int index)
Definition: bmesh_mesh.h:103
BMVert * BM_edge_split(BMesh *bm, BMEdge *e, BMVert *v, BMEdge **r_e, float fac)
Edge Split.
Definition: bmesh_mods.c:448
bool BM_face_point_inside_test(const BMFace *f, const float co[3])
void BM_face_normal_update(BMFace *f)
bool BM_face_split_edgenet(BMesh *bm, BMFace *f, BMEdge **edge_net, const int edge_net_len, BMFace ***r_face_arr, int *r_face_arr_len)
BMEdge * BM_edge_exists(BMVert *v_a, BMVert *v_b)
Definition: bmesh_query.c:1553
bool BM_vert_pair_share_face_check(BMVert *v_a, BMVert *v_b)
Definition: bmesh_query.c:100
bool BM_edge_in_face(const BMEdge *e, const BMFace *f)
Definition: bmesh_query.c:420
bool BM_edge_share_vert_check(BMEdge *e1, BMEdge *e2)
Definition: bmesh_query.c:1074
BMFace * BM_vert_pair_shared_face_cb(BMVert *v_a, BMVert *v_b, const bool allow_adjacent, bool(*callback)(BMFace *, BMLoop *, BMLoop *, void *userdata), void *user_data, BMLoop **r_l_a, BMLoop **r_l_b)
Definition: bmesh_query.c:137
BLI_INLINE BMVert * BM_edge_other_vert(BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
BLI_INLINE bool BM_edge_is_wire(const BMEdge *e) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMLoop * l_b
ATTR_WARN_UNUSED_RESULT const BMVert * v
bool closest(btVector3 &v)
Definition: thread.h:34
DEGForeachIDComponentCallback callback
int count
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
T dot(const vec_base< T, Size > &a, const vec_base< T, Size > &b)
static double epsilon
#define min(a, b)
Definition: sort.c:35
BMVert * v1
Definition: bmesh_class.h:122
BMVert * v2
Definition: bmesh_class.h:122
BMHeader head
Definition: bmesh_class.h:243
float no[3]
Definition: bmesh_class.h:271
char htype
Definition: bmesh_class.h:64
int index
Definition: bmesh_class.h:61
struct BMVert * v
Definition: bmesh_class.h:153
struct BMLoop * prev
Definition: bmesh_class.h:233
struct BMLoop * next
Definition: bmesh_class.h:233
float co[3]
Definition: bmesh_class.h:87
BMHeader head
Definition: bmesh_class.h:85
char elem_index_dirty
Definition: bmesh_class.h:305
BLI_Stack ** pair_stack
float max
ccl_device_inline int as_int(uint i)
Definition: util/math.h:212