Blender  V3.3
mesh_normals.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2001-2002 NaN Holding BV. All rights reserved. */
3 
12 #include <climits>
13 
14 #include "MEM_guardedalloc.h"
15 
16 #include "DNA_mesh_types.h"
17 #include "DNA_meshdata_types.h"
18 
19 #include "BLI_alloca.h"
20 #include "BLI_bitmap.h"
21 
22 #include "BLI_linklist.h"
23 #include "BLI_linklist_stack.h"
24 #include "BLI_math.h"
25 #include "BLI_math_vec_types.hh"
26 #include "BLI_memarena.h"
27 #include "BLI_span.hh"
28 #include "BLI_stack.h"
29 #include "BLI_task.h"
30 #include "BLI_task.hh"
31 #include "BLI_utildefines.h"
32 
33 #include "BKE_customdata.h"
34 #include "BKE_editmesh_cache.h"
35 #include "BKE_global.h"
36 #include "BKE_mesh.h"
37 
38 #include "atomic_ops.h"
39 
40 using blender::Span;
41 
42 // #define DEBUG_TIME
43 
44 #ifdef DEBUG_TIME
45 # include "PIL_time.h"
46 # include "PIL_time_utildefines.h"
47 #endif
48 
49 /* -------------------------------------------------------------------- */
58 static void add_v3_v3_atomic(float r[3], const float a[3])
59 {
60 #define FLT_EQ_NONAN(_fa, _fb) (*((const uint32_t *)&_fa) == *((const uint32_t *)&_fb))
61 
62  float virtual_lock = r[0];
63  while (true) {
64  /* This loops until following conditions are met:
65  * - `r[0]` has same value as virtual_lock (i.e. it did not change since last try).
66  * - `r[0]` was not `FLT_MAX`, i.e. it was not locked by another thread. */
67  const float test_lock = atomic_cas_float(&r[0], virtual_lock, FLT_MAX);
68  if (_ATOMIC_LIKELY(FLT_EQ_NONAN(test_lock, virtual_lock) && (test_lock != FLT_MAX))) {
69  break;
70  }
71  virtual_lock = test_lock;
72  }
73  virtual_lock += a[0];
74  r[1] += a[1];
75  r[2] += a[2];
76 
77  /* Second atomic operation to 'release'
78  * our lock on that vector and set its first scalar value. */
79  /* Note that we do not need to loop here, since we 'locked' `r[0]`,
80  * nobody should have changed it in the mean time. */
81  virtual_lock = atomic_cas_float(&r[0], FLT_MAX, virtual_lock);
82  BLI_assert(virtual_lock == FLT_MAX);
83 
84 #undef FLT_EQ_NONAN
85 }
86 
89 /* -------------------------------------------------------------------- */
96 {
99 }
100 
102 {
103  if (mesh->runtime.vert_normals == nullptr) {
105  mesh->totvert, sizeof(float[3]), __func__);
106  }
107 
108  BLI_assert(MEM_allocN_len(mesh->runtime.vert_normals) >= sizeof(float[3]) * mesh->totvert);
109 
110  return mesh->runtime.vert_normals;
111 }
112 
114 {
115  if (mesh->runtime.poly_normals == nullptr) {
117  mesh->totpoly, sizeof(float[3]), __func__);
118  }
119 
120  BLI_assert(MEM_allocN_len(mesh->runtime.poly_normals) >= sizeof(float[3]) * mesh->totpoly);
121 
122  return mesh->runtime.poly_normals;
123 }
124 
126 {
129 }
130 
132 {
135 }
136 
138 {
140 }
141 
143 {
145 }
146 
148 {
151 
154 }
155 
157 {
160  }
163  }
164 }
165 
168 /* -------------------------------------------------------------------- */
173  const MVert *mvert;
174  const MLoop *mloop;
175  const MPoly *mpoly;
176 
178  float (*pnors)[3];
179 };
180 
181 static void mesh_calc_normals_poly_fn(void *__restrict userdata,
182  const int pidx,
183  const TaskParallelTLS *__restrict UNUSED(tls))
184 {
186  const MPoly *mp = &data->mpoly[pidx];
187  BKE_mesh_calc_poly_normal(mp, data->mloop + mp->loopstart, data->mvert, data->pnors[pidx]);
188 }
189 
191  int UNUSED(mvert_len),
192  const MLoop *mloop,
193  int UNUSED(mloop_len),
194  const MPoly *mpoly,
195  int mpoly_len,
196  float (*r_poly_normals)[3])
197 {
198  TaskParallelSettings settings;
200  settings.min_iter_per_thread = 1024;
201 
202  BLI_assert((r_poly_normals != nullptr) || (mpoly_len == 0));
203 
205  data.mpoly = mpoly;
206  data.mloop = mloop;
207  data.mvert = mvert;
208  data.pnors = r_poly_normals;
209 
210  BLI_task_parallel_range(0, mpoly_len, &data, mesh_calc_normals_poly_fn, &settings);
211 }
212 
215 /* -------------------------------------------------------------------- */
223  const MVert *mvert;
224  const MLoop *mloop;
225  const MPoly *mpoly;
226 
228  float (*pnors)[3];
230  float (*vnors)[3];
231 };
232 
234  void *__restrict userdata, const int pidx, const TaskParallelTLS *__restrict UNUSED(tls))
235 {
237  const MPoly *mp = &data->mpoly[pidx];
238  const MLoop *ml = &data->mloop[mp->loopstart];
239  const MVert *mverts = data->mvert;
240  float(*vnors)[3] = data->vnors;
241 
242  float pnor_temp[3];
243  float *pnor = data->pnors ? data->pnors[pidx] : pnor_temp;
244 
245  const int i_end = mp->totloop - 1;
246 
247  /* Polygon Normal and edge-vector. */
248  /* Inline version of #BKE_mesh_calc_poly_normal, also does edge-vectors. */
249  {
250  zero_v3(pnor);
251  /* Newell's Method */
252  const float *v_curr = mverts[ml[i_end].v].co;
253  for (int i_next = 0; i_next <= i_end; i_next++) {
254  const float *v_next = mverts[ml[i_next].v].co;
255  add_newell_cross_v3_v3v3(pnor, v_curr, v_next);
256  v_curr = v_next;
257  }
258  if (UNLIKELY(normalize_v3(pnor) == 0.0f)) {
259  pnor[2] = 1.0f; /* Other axes set to zero. */
260  }
261  }
262 
263  /* Accumulate angle weighted face normal into the vertex normal. */
264  /* Inline version of #accumulate_vertex_normals_poly_v3. */
265  {
266  float edvec_prev[3], edvec_next[3], edvec_end[3];
267  const float *v_curr = mverts[ml[i_end].v].co;
268  sub_v3_v3v3(edvec_prev, mverts[ml[i_end - 1].v].co, v_curr);
269  normalize_v3(edvec_prev);
270  copy_v3_v3(edvec_end, edvec_prev);
271 
272  for (int i_next = 0, i_curr = i_end; i_next <= i_end; i_curr = i_next++) {
273  const float *v_next = mverts[ml[i_next].v].co;
274 
275  /* Skip an extra normalization by reusing the first calculated edge. */
276  if (i_next != i_end) {
277  sub_v3_v3v3(edvec_next, v_curr, v_next);
278  normalize_v3(edvec_next);
279  }
280  else {
281  copy_v3_v3(edvec_next, edvec_end);
282  }
283 
284  /* Calculate angle between the two poly edges incident on this vertex. */
285  const float fac = saacos(-dot_v3v3(edvec_prev, edvec_next));
286  const float vnor_add[3] = {pnor[0] * fac, pnor[1] * fac, pnor[2] * fac};
287 
288  add_v3_v3_atomic(vnors[ml[i_curr].v], vnor_add);
289  v_curr = v_next;
290  copy_v3_v3(edvec_prev, edvec_next);
291  }
292  }
293 }
294 
296  void *__restrict userdata, const int vidx, const TaskParallelTLS *__restrict UNUSED(tls))
297 {
299 
300  const MVert *mv = &data->mvert[vidx];
301  float *no = data->vnors[vidx];
302 
303  if (UNLIKELY(normalize_v3(no) == 0.0f)) {
304  /* Following Mesh convention; we use vertex coordinate itself for normal in this case. */
305  normalize_v3_v3(no, mv->co);
306  }
307 }
308 
310  const int mvert_len,
311  const MLoop *mloop,
312  const int UNUSED(mloop_len),
313  const MPoly *mpoly,
314  const int mpoly_len,
315  float (*r_poly_normals)[3],
316  float (*r_vert_normals)[3])
317 {
318  TaskParallelSettings settings;
320  settings.min_iter_per_thread = 1024;
321 
322  memset(r_vert_normals, 0, sizeof(*r_vert_normals) * (size_t)mvert_len);
323 
325  data.mpoly = mpoly;
326  data.mloop = mloop;
327  data.mvert = mvert;
328  data.pnors = r_poly_normals;
329  data.vnors = r_vert_normals;
330 
331  /* Compute poly normals, accumulating them into vertex normals. */
333  0, mpoly_len, &data, mesh_calc_normals_poly_and_vertex_accum_fn, &settings);
334 
335  /* Normalize and validate computed vertex normals. */
337  0, mvert_len, &data, mesh_calc_normals_poly_and_vertex_finalize_fn, &settings);
338 }
339 
342 /* -------------------------------------------------------------------- */
347 {
349  BLI_assert(mesh->runtime.vert_normals != nullptr || mesh->totvert == 0);
350  return mesh->runtime.vert_normals;
351  }
352 
353  if (mesh->totvert == 0) {
354  return nullptr;
355  }
356 
357  ThreadMutex *normals_mutex = (ThreadMutex *)mesh->runtime.normals_mutex;
358  BLI_mutex_lock(normals_mutex);
360  BLI_assert(mesh->runtime.vert_normals != nullptr);
361  BLI_mutex_unlock(normals_mutex);
362  return mesh->runtime.vert_normals;
363  }
364 
365  float(*vert_normals)[3];
366  float(*poly_normals)[3];
367 
368  /* Isolate task because a mutex is locked and computing normals is multi-threaded. */
370  Mesh &mesh_mutable = *const_cast<Mesh *>(mesh);
371 
372  vert_normals = BKE_mesh_vertex_normals_for_write(&mesh_mutable);
373  poly_normals = BKE_mesh_poly_normals_for_write(&mesh_mutable);
374 
376  mesh_mutable.totvert,
377  mesh_mutable.mloop,
378  mesh_mutable.totloop,
379  mesh_mutable.mpoly,
380  mesh_mutable.totpoly,
381  poly_normals,
382  vert_normals);
383 
385  BKE_mesh_poly_normals_clear_dirty(&mesh_mutable);
386  });
387 
388  BLI_mutex_unlock(normals_mutex);
389  return vert_normals;
390 }
391 
393 {
395  BLI_assert(mesh->runtime.poly_normals != nullptr || mesh->totpoly == 0);
396  return mesh->runtime.poly_normals;
397  }
398 
399  if (mesh->totpoly == 0) {
400  return nullptr;
401  }
402 
403  ThreadMutex *normals_mutex = (ThreadMutex *)mesh->runtime.normals_mutex;
404  BLI_mutex_lock(normals_mutex);
406  BLI_assert(mesh->runtime.poly_normals != nullptr);
407  BLI_mutex_unlock(normals_mutex);
408  return mesh->runtime.poly_normals;
409  }
410 
411  float(*poly_normals)[3];
412 
413  /* Isolate task because a mutex is locked and computing normals is multi-threaded. */
415  Mesh &mesh_mutable = *const_cast<Mesh *>(mesh);
416 
417  poly_normals = BKE_mesh_poly_normals_for_write(&mesh_mutable);
418 
419  BKE_mesh_calc_normals_poly(mesh_mutable.mvert,
420  mesh_mutable.totvert,
421  mesh_mutable.mloop,
422  mesh_mutable.totloop,
423  mesh_mutable.mpoly,
424  mesh_mutable.totpoly,
425  poly_normals);
426 
427  BKE_mesh_poly_normals_clear_dirty(&mesh_mutable);
428  });
429 
430  BLI_mutex_unlock(normals_mutex);
431  return poly_normals;
432 }
433 
435 {
441  break;
442  case ME_WRAPPER_TYPE_BMESH: {
443  struct BMEditMesh *em = mesh->edit_mesh;
445  if (emd->vertexCos) {
448  }
449  return;
450  }
451  }
452 }
453 
455 {
456 #ifdef DEBUG_TIME
458 #endif
460 #ifdef DEBUG_TIME
462 #endif
463 }
464 
466  int numVerts,
467  const MLoop *mloop,
468  const MLoopTri *looptri,
469  int looptri_num,
470  float (*r_tri_nors)[3])
471 {
472  float(*tnorms)[3] = (float(*)[3])MEM_calloc_arrayN((size_t)numVerts, sizeof(*tnorms), "tnorms");
473  float(*fnors)[3] = (r_tri_nors) ? r_tri_nors :
474  (float(*)[3])MEM_calloc_arrayN(
475  (size_t)looptri_num, sizeof(*fnors), "meshnormals");
476 
477  if (!tnorms || !fnors) {
478  goto cleanup;
479  }
480 
481  for (int i = 0; i < looptri_num; i++) {
482  const MLoopTri *lt = &looptri[i];
483  float *f_no = fnors[i];
484  const uint vtri[3] = {
485  mloop[lt->tri[0]].v,
486  mloop[lt->tri[1]].v,
487  mloop[lt->tri[2]].v,
488  };
489 
490  normal_tri_v3(f_no, mverts[vtri[0]].co, mverts[vtri[1]].co, mverts[vtri[2]].co);
491 
492  accumulate_vertex_normals_tri_v3(tnorms[vtri[0]],
493  tnorms[vtri[1]],
494  tnorms[vtri[2]],
495  f_no,
496  mverts[vtri[0]].co,
497  mverts[vtri[1]].co,
498  mverts[vtri[2]].co);
499  }
500 
501  /* Following Mesh convention; we use vertex coordinate itself for normal in this case. */
502  for (int i = 0; i < numVerts; i++) {
503  MVert *mv = &mverts[i];
504  float *no = tnorms[i];
505 
506  if (UNLIKELY(normalize_v3(no) == 0.0f)) {
507  normalize_v3_v3(no, mv->co);
508  }
509  }
510 
511 cleanup:
512  MEM_freeN(tnorms);
513 
514  if (fnors != r_tri_nors) {
515  MEM_freeN(fnors);
516  }
517 }
518 
520  const int numLoops,
521  const char data_type)
522 {
523  if (!(lnors_spacearr->lspacearr && lnors_spacearr->loops_pool)) {
524  MemArena *mem;
525 
526  if (!lnors_spacearr->mem) {
527  lnors_spacearr->mem = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
528  }
529  mem = lnors_spacearr->mem;
530  lnors_spacearr->lspacearr = (MLoopNorSpace **)BLI_memarena_calloc(
531  mem, sizeof(MLoopNorSpace *) * (size_t)numLoops);
532  lnors_spacearr->loops_pool = (LinkNode *)BLI_memarena_alloc(
533  mem, sizeof(LinkNode) * (size_t)numLoops);
534 
535  lnors_spacearr->spaces_num = 0;
536  }
538  lnors_spacearr->data_type = data_type;
539 }
540 
542  MLoopNorSpaceArray *lnors_spacearr_tls)
543 {
544  *lnors_spacearr_tls = *lnors_spacearr;
545  lnors_spacearr_tls->mem = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
546 }
547 
549  MLoopNorSpaceArray *lnors_spacearr_tls)
550 {
551  BLI_assert(lnors_spacearr->data_type == lnors_spacearr_tls->data_type);
552  BLI_assert(lnors_spacearr->mem != lnors_spacearr_tls->mem);
553  lnors_spacearr->spaces_num += lnors_spacearr_tls->spaces_num;
554  BLI_memarena_merge(lnors_spacearr->mem, lnors_spacearr_tls->mem);
555  BLI_memarena_free(lnors_spacearr_tls->mem);
556  lnors_spacearr_tls->mem = nullptr;
557  BKE_lnor_spacearr_clear(lnors_spacearr_tls);
558 }
559 
561 {
562  lnors_spacearr->spaces_num = 0;
563  lnors_spacearr->lspacearr = nullptr;
564  lnors_spacearr->loops_pool = nullptr;
565  if (lnors_spacearr->mem != nullptr) {
566  BLI_memarena_clear(lnors_spacearr->mem);
567  }
568 }
569 
571 {
572  lnors_spacearr->spaces_num = 0;
573  lnors_spacearr->lspacearr = nullptr;
574  lnors_spacearr->loops_pool = nullptr;
575  BLI_memarena_free(lnors_spacearr->mem);
576  lnors_spacearr->mem = nullptr;
577 }
578 
580 {
581  lnors_spacearr->spaces_num++;
582  return (MLoopNorSpace *)BLI_memarena_calloc(lnors_spacearr->mem, sizeof(MLoopNorSpace));
583 }
584 
585 /* This threshold is a bit touchy (usual float precision issue), this value seems OK. */
586 #define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f)
587 
589  const float lnor[3],
590  float vec_ref[3],
591  float vec_other[3],
592  BLI_Stack *edge_vectors)
593 {
594  const float pi2 = (float)M_PI * 2.0f;
595  float tvec[3], dtp;
596  const float dtp_ref = dot_v3v3(vec_ref, lnor);
597  const float dtp_other = dot_v3v3(vec_other, lnor);
598 
599  if (UNLIKELY(fabsf(dtp_ref) >= LNOR_SPACE_TRIGO_THRESHOLD ||
600  fabsf(dtp_other) >= LNOR_SPACE_TRIGO_THRESHOLD)) {
601  /* If vec_ref or vec_other are too much aligned with lnor, we can't build lnor space,
602  * tag it as invalid and abort. */
603  lnor_space->ref_alpha = lnor_space->ref_beta = 0.0f;
604 
605  if (edge_vectors) {
606  BLI_stack_clear(edge_vectors);
607  }
608  return;
609  }
610 
611  copy_v3_v3(lnor_space->vec_lnor, lnor);
612 
613  /* Compute ref alpha, average angle of all available edge vectors to lnor. */
614  if (edge_vectors) {
615  float alpha = 0.0f;
616  int count = 0;
617  while (!BLI_stack_is_empty(edge_vectors)) {
618  const float *vec = (const float *)BLI_stack_peek(edge_vectors);
619  alpha += saacosf(dot_v3v3(vec, lnor));
620  BLI_stack_discard(edge_vectors);
621  count++;
622  }
623  /* NOTE: In theory, this could be `count > 2`,
624  * but there is one case where we only have two edges for two loops:
625  * a smooth vertex with only two edges and two faces (our Monkey's nose has that, e.g.).
626  */
627  BLI_assert(count >= 2); /* This piece of code shall only be called for more than one loop. */
628  lnor_space->ref_alpha = alpha / (float)count;
629  }
630  else {
631  lnor_space->ref_alpha = (saacosf(dot_v3v3(vec_ref, lnor)) +
632  saacosf(dot_v3v3(vec_other, lnor))) /
633  2.0f;
634  }
635 
636  /* Project vec_ref on lnor's ortho plane. */
637  mul_v3_v3fl(tvec, lnor, dtp_ref);
638  sub_v3_v3(vec_ref, tvec);
639  normalize_v3_v3(lnor_space->vec_ref, vec_ref);
640 
641  cross_v3_v3v3(tvec, lnor, lnor_space->vec_ref);
642  normalize_v3_v3(lnor_space->vec_ortho, tvec);
643 
644  /* Project vec_other on lnor's ortho plane. */
645  mul_v3_v3fl(tvec, lnor, dtp_other);
646  sub_v3_v3(vec_other, tvec);
647  normalize_v3(vec_other);
648 
649  /* Beta is angle between ref_vec and other_vec, around lnor. */
650  dtp = dot_v3v3(lnor_space->vec_ref, vec_other);
651  if (LIKELY(dtp < LNOR_SPACE_TRIGO_THRESHOLD)) {
652  const float beta = saacos(dtp);
653  lnor_space->ref_beta = (dot_v3v3(lnor_space->vec_ortho, vec_other) < 0.0f) ? pi2 - beta : beta;
654  }
655  else {
656  lnor_space->ref_beta = pi2;
657  }
658 }
659 
661  MLoopNorSpace *lnor_space,
662  const int ml_index,
663  void *bm_loop,
664  const bool is_single)
665 {
666  BLI_assert((lnors_spacearr->data_type == MLNOR_SPACEARR_LOOP_INDEX && bm_loop == nullptr) ||
667  (lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR && bm_loop != nullptr));
668 
669  lnors_spacearr->lspacearr[ml_index] = lnor_space;
670  if (bm_loop == nullptr) {
671  bm_loop = POINTER_FROM_INT(ml_index);
672  }
673  if (is_single) {
674  BLI_assert(lnor_space->loops == nullptr);
675  lnor_space->flags |= MLNOR_SPACE_IS_SINGLE;
676  lnor_space->loops = (LinkNode *)bm_loop;
677  }
678  else {
679  BLI_assert((lnor_space->flags & MLNOR_SPACE_IS_SINGLE) == 0);
680  BLI_linklist_prepend_nlink(&lnor_space->loops, bm_loop, &lnors_spacearr->loops_pool[ml_index]);
681  }
682 }
683 
684 MINLINE float unit_short_to_float(const short val)
685 {
686  return (float)val / (float)SHRT_MAX;
687 }
688 
689 MINLINE short unit_float_to_short(const float val)
690 {
691  /* Rounding. */
692  return (short)floorf(val * (float)SHRT_MAX + 0.5f);
693 }
694 
696  const short clnor_data[2],
697  float r_custom_lnor[3])
698 {
699  /* NOP custom normal data or invalid lnor space, return. */
700  if (clnor_data[0] == 0 || lnor_space->ref_alpha == 0.0f || lnor_space->ref_beta == 0.0f) {
701  copy_v3_v3(r_custom_lnor, lnor_space->vec_lnor);
702  return;
703  }
704 
705  {
706  /* TODO: Check whether using #sincosf() gives any noticeable benefit
707  * (could not even get it working under linux though)! */
708  const float pi2 = (float)(M_PI * 2.0);
709  const float alphafac = unit_short_to_float(clnor_data[0]);
710  const float alpha = (alphafac > 0.0f ? lnor_space->ref_alpha : pi2 - lnor_space->ref_alpha) *
711  alphafac;
712  const float betafac = unit_short_to_float(clnor_data[1]);
713 
714  mul_v3_v3fl(r_custom_lnor, lnor_space->vec_lnor, cosf(alpha));
715 
716  if (betafac == 0.0f) {
717  madd_v3_v3fl(r_custom_lnor, lnor_space->vec_ref, sinf(alpha));
718  }
719  else {
720  const float sinalpha = sinf(alpha);
721  const float beta = (betafac > 0.0f ? lnor_space->ref_beta : pi2 - lnor_space->ref_beta) *
722  betafac;
723  madd_v3_v3fl(r_custom_lnor, lnor_space->vec_ref, sinalpha * cosf(beta));
724  madd_v3_v3fl(r_custom_lnor, lnor_space->vec_ortho, sinalpha * sinf(beta));
725  }
726  }
727 }
728 
730  const float custom_lnor[3],
731  short r_clnor_data[2])
732 {
733  /* We use nullptr vector as NOP custom normal (can be simpler than giving auto-computed `lnor`).
734  */
735  if (is_zero_v3(custom_lnor) || compare_v3v3(lnor_space->vec_lnor, custom_lnor, 1e-4f)) {
736  r_clnor_data[0] = r_clnor_data[1] = 0;
737  return;
738  }
739 
740  {
741  const float pi2 = (float)(M_PI * 2.0);
742  const float cos_alpha = dot_v3v3(lnor_space->vec_lnor, custom_lnor);
743  float vec[3], cos_beta;
744  float alpha;
745 
746  alpha = saacosf(cos_alpha);
747  if (alpha > lnor_space->ref_alpha) {
748  /* Note we could stick to [0, pi] range here,
749  * but makes decoding more complex, not worth it. */
750  r_clnor_data[0] = unit_float_to_short(-(pi2 - alpha) / (pi2 - lnor_space->ref_alpha));
751  }
752  else {
753  r_clnor_data[0] = unit_float_to_short(alpha / lnor_space->ref_alpha);
754  }
755 
756  /* Project custom lnor on (vec_ref, vec_ortho) plane. */
757  mul_v3_v3fl(vec, lnor_space->vec_lnor, -cos_alpha);
758  add_v3_v3(vec, custom_lnor);
759  normalize_v3(vec);
760 
761  cos_beta = dot_v3v3(lnor_space->vec_ref, vec);
762 
763  if (cos_beta < LNOR_SPACE_TRIGO_THRESHOLD) {
764  float beta = saacosf(cos_beta);
765  if (dot_v3v3(lnor_space->vec_ortho, vec) < 0.0f) {
766  beta = pi2 - beta;
767  }
768 
769  if (beta > lnor_space->ref_beta) {
770  r_clnor_data[1] = unit_float_to_short(-(pi2 - beta) / (pi2 - lnor_space->ref_beta));
771  }
772  else {
773  r_clnor_data[1] = unit_float_to_short(beta / lnor_space->ref_beta);
774  }
775  }
776  else {
777  r_clnor_data[1] = 0;
778  }
779  }
780 }
781 
782 #define LOOP_SPLIT_TASK_BLOCK_SIZE 1024
783 
785  /* Specific to each instance (each task). */
786 
789  float (*lnor)[3];
790  const MLoop *ml_curr;
791  const MLoop *ml_prev;
795  const int *e2l_prev;
796  int mp_index;
797 
801 
802  char pad_c;
803 };
804 
806  /* Read/write.
807  * Note we do not need to protect it, though, since two different tasks will *always* affect
808  * different elements in the arrays. */
811  short (*clnors_data)[2];
812 
813  /* Read-only. */
814  const MVert *mverts;
815  const MEdge *medges;
816  const MLoop *mloops;
817  const MPoly *mpolys;
818  int (*edge_to_loops)[2];
820  const float (*polynors)[3];
821  const float (*vert_normals)[3];
822 
823  int numEdges;
824  int numLoops;
825  int numPolys;
826 };
827 
828 #define INDEX_UNSET INT_MIN
829 #define INDEX_INVALID -1
830 /* See comment about edge_to_loops below. */
831 #define IS_EDGE_SHARP(_e2l) (ELEM((_e2l)[1], INDEX_UNSET, INDEX_INVALID))
832 
834  const bool check_angle,
835  const float split_angle,
836  const bool do_sharp_edges_tag)
837 {
838  const MEdge *medges = data->medges;
839  const MLoop *mloops = data->mloops;
840 
841  const MPoly *mpolys = data->mpolys;
842 
843  const int numEdges = data->numEdges;
844  const int numPolys = data->numPolys;
845 
846  float(*loopnors)[3] = data->loopnors; /* NOTE: loopnors may be nullptr here. */
847  const float(*polynors)[3] = data->polynors;
848 
849  int(*edge_to_loops)[2] = data->edge_to_loops;
850  int *loop_to_poly = data->loop_to_poly;
851 
852  BLI_bitmap *sharp_edges = do_sharp_edges_tag ? BLI_BITMAP_NEW(numEdges, __func__) : nullptr;
853 
854  const MPoly *mp;
855  int mp_index;
856 
857  const float split_angle_cos = check_angle ? cosf(split_angle) : -1.0f;
858 
859  for (mp = mpolys, mp_index = 0; mp_index < numPolys; mp++, mp_index++) {
860  const MLoop *ml_curr;
861  int *e2l;
862  int ml_curr_index = mp->loopstart;
863  const int ml_last_index = (ml_curr_index + mp->totloop) - 1;
864 
865  ml_curr = &mloops[ml_curr_index];
866 
867  for (; ml_curr_index <= ml_last_index; ml_curr++, ml_curr_index++) {
868  e2l = edge_to_loops[ml_curr->e];
869 
870  loop_to_poly[ml_curr_index] = mp_index;
871 
872  /* Pre-populate all loop normals as if their verts were all-smooth,
873  * this way we don't have to compute those later!
874  */
875  if (loopnors) {
876  copy_v3_v3(loopnors[ml_curr_index], data->vert_normals[ml_curr->v]);
877  }
878 
879  /* Check whether current edge might be smooth or sharp */
880  if ((e2l[0] | e2l[1]) == 0) {
881  /* 'Empty' edge until now, set e2l[0] (and e2l[1] to INDEX_UNSET to tag it as unset). */
882  e2l[0] = ml_curr_index;
883  /* We have to check this here too, else we might miss some flat faces!!! */
884  e2l[1] = (mp->flag & ME_SMOOTH) ? INDEX_UNSET : INDEX_INVALID;
885  }
886  else if (e2l[1] == INDEX_UNSET) {
887  const bool is_angle_sharp = (check_angle &&
888  dot_v3v3(polynors[loop_to_poly[e2l[0]]], polynors[mp_index]) <
889  split_angle_cos);
890 
891  /* Second loop using this edge, time to test its sharpness.
892  * An edge is sharp if it is tagged as such, or its face is not smooth,
893  * or both poly have opposed (flipped) normals, i.e. both loops on the same edge share the
894  * same vertex, or angle between both its polys' normals is above split_angle value.
895  */
896  if (!(mp->flag & ME_SMOOTH) || (medges[ml_curr->e].flag & ME_SHARP) ||
897  ml_curr->v == mloops[e2l[0]].v || is_angle_sharp) {
898  /* NOTE: we are sure that loop != 0 here ;). */
899  e2l[1] = INDEX_INVALID;
900 
901  /* We want to avoid tagging edges as sharp when it is already defined as such by
902  * other causes than angle threshold. */
903  if (do_sharp_edges_tag && is_angle_sharp) {
904  BLI_BITMAP_SET(sharp_edges, ml_curr->e, true);
905  }
906  }
907  else {
908  e2l[1] = ml_curr_index;
909  }
910  }
911  else if (!IS_EDGE_SHARP(e2l)) {
912  /* More than two loops using this edge, tag as sharp if not yet done. */
913  e2l[1] = INDEX_INVALID;
914 
915  /* We want to avoid tagging edges as sharp when it is already defined as such by
916  * other causes than angle threshold. */
917  if (do_sharp_edges_tag) {
918  BLI_BITMAP_SET(sharp_edges, ml_curr->e, false);
919  }
920  }
921  /* Else, edge is already 'disqualified' (i.e. sharp)! */
922  }
923  }
924 
925  /* If requested, do actual tagging of edges as sharp in another loop. */
926  if (do_sharp_edges_tag) {
927  MEdge *me;
928  int me_index;
929  for (me = (MEdge *)medges, me_index = 0; me_index < numEdges; me++, me_index++) {
930  if (BLI_BITMAP_TEST(sharp_edges, me_index)) {
931  me->flag |= ME_SHARP;
932  }
933  }
934 
935  MEM_freeN(sharp_edges);
936  }
937 }
938 
939 void BKE_edges_sharp_from_angle_set(const struct MVert *mverts,
940  const int UNUSED(numVerts),
941  struct MEdge *medges,
942  const int numEdges,
943  struct MLoop *mloops,
944  const int numLoops,
945  struct MPoly *mpolys,
946  const float (*polynors)[3],
947  const int numPolys,
948  const float split_angle)
949 {
950  if (split_angle >= (float)M_PI) {
951  /* Nothing to do! */
952  return;
953  }
954 
955  /* Mapping edge -> loops. See BKE_mesh_normals_loop_split() for details. */
956  int(*edge_to_loops)[2] = (int(*)[2])MEM_calloc_arrayN(
957  (size_t)numEdges, sizeof(*edge_to_loops), __func__);
958 
959  /* Simple mapping from a loop to its polygon index. */
960  int *loop_to_poly = (int *)MEM_malloc_arrayN((size_t)numLoops, sizeof(*loop_to_poly), __func__);
961 
962  LoopSplitTaskDataCommon common_data = {};
963  common_data.mverts = mverts;
964  common_data.medges = medges;
965  common_data.mloops = mloops;
966  common_data.mpolys = mpolys;
967  common_data.edge_to_loops = edge_to_loops;
968  common_data.loop_to_poly = loop_to_poly;
969  common_data.polynors = polynors;
970  common_data.numEdges = numEdges;
971  common_data.numPolys = numPolys;
972 
973  mesh_edges_sharp_tag(&common_data, true, split_angle, true);
974 
975  MEM_freeN(edge_to_loops);
976  MEM_freeN(loop_to_poly);
977 }
978 
980  const MPoly *mpolys,
981  const int *loop_to_poly,
982  const int *e2lfan_curr,
983  const uint mv_pivot_index,
984  const MLoop **r_mlfan_curr,
985  int *r_mlfan_curr_index,
986  int *r_mlfan_vert_index,
987  int *r_mpfan_curr_index)
988 {
989  const MLoop *mlfan_next;
990  const MPoly *mpfan_next;
991 
992  /* WARNING: This is rather complex!
993  * We have to find our next edge around the vertex (fan mode).
994  * First we find the next loop, which is either previous or next to mlfan_curr_index, depending
995  * whether both loops using current edge are in the same direction or not, and whether
996  * mlfan_curr_index actually uses the vertex we are fanning around!
997  * mlfan_curr_index is the index of mlfan_next here, and mlfan_next is not the real next one
998  * (i.e. not the future `mlfan_curr`). */
999  *r_mlfan_curr_index = (e2lfan_curr[0] == *r_mlfan_curr_index) ? e2lfan_curr[1] : e2lfan_curr[0];
1000  *r_mpfan_curr_index = loop_to_poly[*r_mlfan_curr_index];
1001 
1002  BLI_assert(*r_mlfan_curr_index >= 0);
1003  BLI_assert(*r_mpfan_curr_index >= 0);
1004 
1005  mlfan_next = &mloops[*r_mlfan_curr_index];
1006  mpfan_next = &mpolys[*r_mpfan_curr_index];
1007  if (((*r_mlfan_curr)->v == mlfan_next->v && (*r_mlfan_curr)->v == mv_pivot_index) ||
1008  ((*r_mlfan_curr)->v != mlfan_next->v && (*r_mlfan_curr)->v != mv_pivot_index)) {
1009  /* We need the previous loop, but current one is our vertex's loop. */
1010  *r_mlfan_vert_index = *r_mlfan_curr_index;
1011  if (--(*r_mlfan_curr_index) < mpfan_next->loopstart) {
1012  *r_mlfan_curr_index = mpfan_next->loopstart + mpfan_next->totloop - 1;
1013  }
1014  }
1015  else {
1016  /* We need the next loop, which is also our vertex's loop. */
1017  if (++(*r_mlfan_curr_index) >= mpfan_next->loopstart + mpfan_next->totloop) {
1018  *r_mlfan_curr_index = mpfan_next->loopstart;
1019  }
1020  *r_mlfan_vert_index = *r_mlfan_curr_index;
1021  }
1022  *r_mlfan_curr = &mloops[*r_mlfan_curr_index];
1023  /* And now we are back in sync, mlfan_curr_index is the index of `mlfan_curr`! Pff! */
1024 }
1025 
1027 {
1028  MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
1029  const short(*clnors_data)[2] = common_data->clnors_data;
1030 
1031  const MVert *mverts = common_data->mverts;
1032  const MEdge *medges = common_data->medges;
1033  const float(*polynors)[3] = common_data->polynors;
1034 
1035  MLoopNorSpace *lnor_space = data->lnor_space;
1036  float(*lnor)[3] = data->lnor;
1037  const MLoop *ml_curr = data->ml_curr;
1038  const MLoop *ml_prev = data->ml_prev;
1039  const int ml_curr_index = data->ml_curr_index;
1040 #if 0 /* Not needed for 'single' loop. */
1041  const int ml_prev_index = data->ml_prev_index;
1042  const int *e2l_prev = data->e2l_prev;
1043 #endif
1044  const int mp_index = data->mp_index;
1045 
1046  /* Simple case (both edges around that vertex are sharp in current polygon),
1047  * this loop just takes its poly normal.
1048  */
1049  copy_v3_v3(*lnor, polynors[mp_index]);
1050 
1051 #if 0
1052  printf("BASIC: handling loop %d / edge %d / vert %d / poly %d\n",
1053  ml_curr_index,
1054  ml_curr->e,
1055  ml_curr->v,
1056  mp_index);
1057 #endif
1058 
1059  /* If needed, generate this (simple!) lnor space. */
1060  if (lnors_spacearr) {
1061  float vec_curr[3], vec_prev[3];
1062 
1063  const uint mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
1064  const MVert *mv_pivot = &mverts[mv_pivot_index];
1065  const MEdge *me_curr = &medges[ml_curr->e];
1066  const MVert *mv_2 = (me_curr->v1 == mv_pivot_index) ? &mverts[me_curr->v2] :
1067  &mverts[me_curr->v1];
1068  const MEdge *me_prev = &medges[ml_prev->e];
1069  const MVert *mv_3 = (me_prev->v1 == mv_pivot_index) ? &mverts[me_prev->v2] :
1070  &mverts[me_prev->v1];
1071 
1072  sub_v3_v3v3(vec_curr, mv_2->co, mv_pivot->co);
1073  normalize_v3(vec_curr);
1074  sub_v3_v3v3(vec_prev, mv_3->co, mv_pivot->co);
1075  normalize_v3(vec_prev);
1076 
1077  BKE_lnor_space_define(lnor_space, *lnor, vec_curr, vec_prev, nullptr);
1078  /* We know there is only one loop in this space, no need to create a link-list in this case. */
1079  BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, nullptr, true);
1080 
1081  if (clnors_data) {
1082  BKE_lnor_space_custom_data_to_normal(lnor_space, clnors_data[ml_curr_index], *lnor);
1083  }
1084  }
1085 }
1086 
1088 {
1089  MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
1090  float(*loopnors)[3] = common_data->loopnors;
1091  short(*clnors_data)[2] = common_data->clnors_data;
1092 
1093  const MVert *mverts = common_data->mverts;
1094  const MEdge *medges = common_data->medges;
1095  const MLoop *mloops = common_data->mloops;
1096  const MPoly *mpolys = common_data->mpolys;
1097  const int(*edge_to_loops)[2] = common_data->edge_to_loops;
1098  const int *loop_to_poly = common_data->loop_to_poly;
1099  const float(*polynors)[3] = common_data->polynors;
1100 
1101  MLoopNorSpace *lnor_space = data->lnor_space;
1102 #if 0 /* Not needed for 'fan' loops. */
1103  float(*lnor)[3] = data->lnor;
1104 #endif
1105  const MLoop *ml_curr = data->ml_curr;
1106  const MLoop *ml_prev = data->ml_prev;
1107  const int ml_curr_index = data->ml_curr_index;
1108  const int ml_prev_index = data->ml_prev_index;
1109  const int mp_index = data->mp_index;
1110  const int *e2l_prev = data->e2l_prev;
1111 
1112  BLI_Stack *edge_vectors = data->edge_vectors;
1113 
1114  /* Sigh! we have to fan around current vertex, until we find the other non-smooth edge,
1115  * and accumulate face normals into the vertex!
1116  * Note in case this vertex has only one sharp edges, this is a waste because the normal is the
1117  * same as the vertex normal, but I do not see any easy way to detect that (would need to count
1118  * number of sharp edges per vertex, I doubt the additional memory usage would be worth it,
1119  * especially as it should not be a common case in real-life meshes anyway). */
1120  const uint mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
1121  const MVert *mv_pivot = &mverts[mv_pivot_index];
1122 
1123  /* `ml_curr` would be mlfan_prev if we needed that one. */
1124  const MEdge *me_org = &medges[ml_curr->e];
1125 
1126  const int *e2lfan_curr;
1127  float vec_curr[3], vec_prev[3], vec_org[3];
1128  const MLoop *mlfan_curr;
1129  float lnor[3] = {0.0f, 0.0f, 0.0f};
1130  /* `mlfan_vert_index` the loop of our current edge might not be the loop of our current vertex!
1131  */
1132  int mlfan_curr_index, mlfan_vert_index, mpfan_curr_index;
1133 
1134  /* We validate clnors data on the fly - cheapest way to do! */
1135  int clnors_avg[2] = {0, 0};
1136  short(*clnor_ref)[2] = nullptr;
1137  int clnors_count = 0;
1138  bool clnors_invalid = false;
1139 
1140  /* Temp loop normal stack. */
1141  BLI_SMALLSTACK_DECLARE(normal, float *);
1142  /* Temp clnors stack. */
1143  BLI_SMALLSTACK_DECLARE(clnors, short *);
1144 
1145  e2lfan_curr = e2l_prev;
1146  mlfan_curr = ml_prev;
1147  mlfan_curr_index = ml_prev_index;
1148  mlfan_vert_index = ml_curr_index;
1149  mpfan_curr_index = mp_index;
1150 
1151  BLI_assert(mlfan_curr_index >= 0);
1152  BLI_assert(mlfan_vert_index >= 0);
1153  BLI_assert(mpfan_curr_index >= 0);
1154 
1155  /* Only need to compute previous edge's vector once, then we can just reuse old current one! */
1156  {
1157  const MVert *mv_2 = (me_org->v1 == mv_pivot_index) ? &mverts[me_org->v2] : &mverts[me_org->v1];
1158 
1159  sub_v3_v3v3(vec_org, mv_2->co, mv_pivot->co);
1160  normalize_v3(vec_org);
1161  copy_v3_v3(vec_prev, vec_org);
1162 
1163  if (lnors_spacearr) {
1164  BLI_stack_push(edge_vectors, vec_org);
1165  }
1166  }
1167 
1168  // printf("FAN: vert %d, start edge %d\n", mv_pivot_index, ml_curr->e);
1169 
1170  while (true) {
1171  const MEdge *me_curr = &medges[mlfan_curr->e];
1172  /* Compute edge vectors.
1173  * NOTE: We could pre-compute those into an array, in the first iteration, instead of computing
1174  * them twice (or more) here. However, time gained is not worth memory and time lost,
1175  * given the fact that this code should not be called that much in real-life meshes.
1176  */
1177  {
1178  const MVert *mv_2 = (me_curr->v1 == mv_pivot_index) ? &mverts[me_curr->v2] :
1179  &mverts[me_curr->v1];
1180 
1181  sub_v3_v3v3(vec_curr, mv_2->co, mv_pivot->co);
1182  normalize_v3(vec_curr);
1183  }
1184 
1185  // printf("\thandling edge %d / loop %d\n", mlfan_curr->e, mlfan_curr_index);
1186 
1187  {
1188  /* Code similar to accumulate_vertex_normals_poly_v3. */
1189  /* Calculate angle between the two poly edges incident on this vertex. */
1190  const float fac = saacos(dot_v3v3(vec_curr, vec_prev));
1191  /* Accumulate */
1192  madd_v3_v3fl(lnor, polynors[mpfan_curr_index], fac);
1193 
1194  if (clnors_data) {
1195  /* Accumulate all clnors, if they are not all equal we have to fix that! */
1196  short(*clnor)[2] = &clnors_data[mlfan_vert_index];
1197  if (clnors_count) {
1198  clnors_invalid |= ((*clnor_ref)[0] != (*clnor)[0] || (*clnor_ref)[1] != (*clnor)[1]);
1199  }
1200  else {
1201  clnor_ref = clnor;
1202  }
1203  clnors_avg[0] += (*clnor)[0];
1204  clnors_avg[1] += (*clnor)[1];
1205  clnors_count++;
1206  /* We store here a pointer to all custom lnors processed. */
1207  BLI_SMALLSTACK_PUSH(clnors, (short *)*clnor);
1208  }
1209  }
1210 
1211  /* We store here a pointer to all loop-normals processed. */
1212  BLI_SMALLSTACK_PUSH(normal, (float *)(loopnors[mlfan_vert_index]));
1213 
1214  if (lnors_spacearr) {
1215  /* Assign current lnor space to current 'vertex' loop. */
1216  BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, mlfan_vert_index, nullptr, false);
1217  if (me_curr != me_org) {
1218  /* We store here all edges-normalized vectors processed. */
1219  BLI_stack_push(edge_vectors, vec_curr);
1220  }
1221  }
1222 
1223  if (IS_EDGE_SHARP(e2lfan_curr) || (me_curr == me_org)) {
1224  /* Current edge is sharp and we have finished with this fan of faces around this vert,
1225  * or this vert is smooth, and we have completed a full turn around it. */
1226  // printf("FAN: Finished!\n");
1227  break;
1228  }
1229 
1230  copy_v3_v3(vec_prev, vec_curr);
1231 
1232  /* Find next loop of the smooth fan. */
1234  mpolys,
1235  loop_to_poly,
1236  e2lfan_curr,
1237  mv_pivot_index,
1238  &mlfan_curr,
1239  &mlfan_curr_index,
1240  &mlfan_vert_index,
1241  &mpfan_curr_index);
1242 
1243  e2lfan_curr = edge_to_loops[mlfan_curr->e];
1244  }
1245 
1246  {
1247  float lnor_len = normalize_v3(lnor);
1248 
1249  /* If we are generating lnor spacearr, we can now define the one for this fan,
1250  * and optionally compute final lnor from custom data too!
1251  */
1252  if (lnors_spacearr) {
1253  if (UNLIKELY(lnor_len == 0.0f)) {
1254  /* Use vertex normal as fallback! */
1255  copy_v3_v3(lnor, loopnors[mlfan_vert_index]);
1256  lnor_len = 1.0f;
1257  }
1258 
1259  BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_curr, edge_vectors);
1260 
1261  if (clnors_data) {
1262  if (clnors_invalid) {
1263  short *clnor;
1264 
1265  clnors_avg[0] /= clnors_count;
1266  clnors_avg[1] /= clnors_count;
1267  /* Fix/update all clnors of this fan with computed average value. */
1268  if (G.debug & G_DEBUG) {
1269  printf("Invalid clnors in this fan!\n");
1270  }
1271  while ((clnor = (short *)BLI_SMALLSTACK_POP(clnors))) {
1272  // print_v2("org clnor", clnor);
1273  clnor[0] = (short)clnors_avg[0];
1274  clnor[1] = (short)clnors_avg[1];
1275  }
1276  // print_v2("new clnors", clnors_avg);
1277  }
1278  /* Extra bonus: since small-stack is local to this function,
1279  * no more need to empty it at all cost! */
1280 
1281  BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor);
1282  }
1283  }
1284 
1285  /* In case we get a zero normal here, just use vertex normal already set! */
1286  if (LIKELY(lnor_len != 0.0f)) {
1287  /* Copy back the final computed normal into all related loop-normals. */
1288  float *nor;
1289 
1290  while ((nor = (float *)BLI_SMALLSTACK_POP(normal))) {
1291  copy_v3_v3(nor, lnor);
1292  }
1293  }
1294  /* Extra bonus: since small-stack is local to this function,
1295  * no more need to empty it at all cost! */
1296  }
1297 }
1298 
1301  BLI_Stack *edge_vectors)
1302 {
1303  BLI_assert(data->ml_curr);
1304  if (data->e2l_prev) {
1305  BLI_assert((edge_vectors == nullptr) || BLI_stack_is_empty(edge_vectors));
1306  data->edge_vectors = edge_vectors;
1307  split_loop_nor_fan_do(common_data, data);
1308  }
1309  else {
1310  /* No need for edge_vectors for 'single' case! */
1311  split_loop_nor_single_do(common_data, data);
1312  }
1313 }
1314 
1315 static void loop_split_worker(TaskPool *__restrict pool, void *taskdata)
1316 {
1318  LoopSplitTaskData *data = (LoopSplitTaskData *)taskdata;
1319 
1320  /* Temp edge vectors stack, only used when computing lnor spacearr. */
1321  BLI_Stack *edge_vectors = common_data->lnors_spacearr ?
1322  BLI_stack_new(sizeof(float[3]), __func__) :
1323  nullptr;
1324 
1325 #ifdef DEBUG_TIME
1327 #endif
1328 
1329  for (int i = 0; i < LOOP_SPLIT_TASK_BLOCK_SIZE; i++, data++) {
1330  /* A nullptr ml_curr is used to tag ended data! */
1331  if (data->ml_curr == nullptr) {
1332  break;
1333  }
1334 
1335  loop_split_worker_do(common_data, data, edge_vectors);
1336  }
1337 
1338  if (edge_vectors) {
1339  BLI_stack_free(edge_vectors);
1340  }
1341 
1342 #ifdef DEBUG_TIME
1344 #endif
1345 }
1346 
1353  const MPoly *mpolys,
1354  const int (*edge_to_loops)[2],
1355  const int *loop_to_poly,
1356  const int *e2l_prev,
1357  BLI_bitmap *skip_loops,
1358  const MLoop *ml_curr,
1359  const MLoop *ml_prev,
1360  const int ml_curr_index,
1361  const int ml_prev_index,
1362  const int mp_curr_index)
1363 {
1364  const uint mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
1365  const int *e2lfan_curr;
1366  const MLoop *mlfan_curr;
1367  /* `mlfan_vert_index` the loop of our current edge might not be the loop of our current vertex!
1368  */
1369  int mlfan_curr_index, mlfan_vert_index, mpfan_curr_index;
1370 
1371  e2lfan_curr = e2l_prev;
1372  if (IS_EDGE_SHARP(e2lfan_curr)) {
1373  /* Sharp loop, so not a cyclic smooth fan. */
1374  return false;
1375  }
1376 
1377  mlfan_curr = ml_prev;
1378  mlfan_curr_index = ml_prev_index;
1379  mlfan_vert_index = ml_curr_index;
1380  mpfan_curr_index = mp_curr_index;
1381 
1382  BLI_assert(mlfan_curr_index >= 0);
1383  BLI_assert(mlfan_vert_index >= 0);
1384  BLI_assert(mpfan_curr_index >= 0);
1385 
1386  BLI_assert(!BLI_BITMAP_TEST(skip_loops, mlfan_vert_index));
1387  BLI_BITMAP_ENABLE(skip_loops, mlfan_vert_index);
1388 
1389  while (true) {
1390  /* Find next loop of the smooth fan. */
1392  mpolys,
1393  loop_to_poly,
1394  e2lfan_curr,
1395  mv_pivot_index,
1396  &mlfan_curr,
1397  &mlfan_curr_index,
1398  &mlfan_vert_index,
1399  &mpfan_curr_index);
1400 
1401  e2lfan_curr = edge_to_loops[mlfan_curr->e];
1402 
1403  if (IS_EDGE_SHARP(e2lfan_curr)) {
1404  /* Sharp loop/edge, so not a cyclic smooth fan. */
1405  return false;
1406  }
1407  /* Smooth loop/edge. */
1408  if (BLI_BITMAP_TEST(skip_loops, mlfan_vert_index)) {
1409  if (mlfan_vert_index == ml_curr_index) {
1410  /* We walked around a whole cyclic smooth fan without finding any already-processed loop,
1411  * means we can use initial `ml_curr` / `ml_prev` edge as start for this smooth fan. */
1412  return true;
1413  }
1414  /* Already checked in some previous looping, we can abort. */
1415  return false;
1416  }
1417 
1418  /* We can skip it in future, and keep checking the smooth fan. */
1419  BLI_BITMAP_ENABLE(skip_loops, mlfan_vert_index);
1420  }
1421 }
1422 
1424 {
1425  MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
1426  float(*loopnors)[3] = common_data->loopnors;
1427 
1428  const MLoop *mloops = common_data->mloops;
1429  const MPoly *mpolys = common_data->mpolys;
1430  const int *loop_to_poly = common_data->loop_to_poly;
1431  const int(*edge_to_loops)[2] = common_data->edge_to_loops;
1432  const int numLoops = common_data->numLoops;
1433  const int numPolys = common_data->numPolys;
1434 
1435  const MPoly *mp;
1436  int mp_index;
1437 
1438  const MLoop *ml_curr;
1439  const MLoop *ml_prev;
1440  int ml_curr_index;
1441  int ml_prev_index;
1442 
1443  BLI_bitmap *skip_loops = BLI_BITMAP_NEW(numLoops, __func__);
1444 
1445  LoopSplitTaskData *data_buff = nullptr;
1446  int data_idx = 0;
1447 
1448  /* Temp edge vectors stack, only used when computing lnor spacearr
1449  * (and we are not multi-threading). */
1450  BLI_Stack *edge_vectors = nullptr;
1451 
1452 #ifdef DEBUG_TIME
1454 #endif
1455 
1456  if (!pool) {
1457  if (lnors_spacearr) {
1458  edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
1459  }
1460  }
1461 
1462  /* We now know edges that can be smoothed (with their vector, and their two loops),
1463  * and edges that will be hard! Now, time to generate the normals.
1464  */
1465  for (mp = mpolys, mp_index = 0; mp_index < numPolys; mp++, mp_index++) {
1466  float(*lnors)[3];
1467  const int ml_last_index = (mp->loopstart + mp->totloop) - 1;
1468  ml_curr_index = mp->loopstart;
1469  ml_prev_index = ml_last_index;
1470 
1471  ml_curr = &mloops[ml_curr_index];
1472  ml_prev = &mloops[ml_prev_index];
1473  lnors = &loopnors[ml_curr_index];
1474 
1475  for (; ml_curr_index <= ml_last_index; ml_curr++, ml_curr_index++, lnors++) {
1476  const int *e2l_curr = edge_to_loops[ml_curr->e];
1477  const int *e2l_prev = edge_to_loops[ml_prev->e];
1478 
1479 #if 0
1480  printf("Checking loop %d / edge %u / vert %u (sharp edge: %d, skiploop: %d)",
1481  ml_curr_index,
1482  ml_curr->e,
1483  ml_curr->v,
1484  IS_EDGE_SHARP(e2l_curr),
1485  BLI_BITMAP_TEST_BOOL(skip_loops, ml_curr_index));
1486 #endif
1487 
1488  /* A smooth edge, we have to check for cyclic smooth fan case.
1489  * If we find a new, never-processed cyclic smooth fan, we can do it now using that loop/edge
1490  * as 'entry point', otherwise we can skip it. */
1491 
1492  /* NOTE: In theory, we could make #loop_split_generator_check_cyclic_smooth_fan() store
1493  * mlfan_vert_index'es and edge indexes in two stacks, to avoid having to fan again around
1494  * the vert during actual computation of `clnor` & `clnorspace`.
1495  * However, this would complicate the code, add more memory usage, and despite its logical
1496  * complexity, #loop_manifold_fan_around_vert_next() is quite cheap in term of CPU cycles,
1497  * so really think it's not worth it. */
1498  if (!IS_EDGE_SHARP(e2l_curr) && (BLI_BITMAP_TEST(skip_loops, ml_curr_index) ||
1500  mpolys,
1501  edge_to_loops,
1502  loop_to_poly,
1503  e2l_prev,
1504  skip_loops,
1505  ml_curr,
1506  ml_prev,
1507  ml_curr_index,
1508  ml_prev_index,
1509  mp_index))) {
1510  // printf("SKIPPING!\n");
1511  }
1512  else {
1513  LoopSplitTaskData *data, data_local;
1514 
1515  // printf("PROCESSING!\n");
1516 
1517  if (pool) {
1518  if (data_idx == 0) {
1519  data_buff = (LoopSplitTaskData *)MEM_calloc_arrayN(
1520  LOOP_SPLIT_TASK_BLOCK_SIZE, sizeof(*data_buff), __func__);
1521  }
1522  data = &data_buff[data_idx];
1523  }
1524  else {
1525  data = &data_local;
1526  memset(data, 0, sizeof(*data));
1527  }
1528 
1529  if (IS_EDGE_SHARP(e2l_curr) && IS_EDGE_SHARP(e2l_prev)) {
1530  data->lnor = lnors;
1531  data->ml_curr = ml_curr;
1532  data->ml_prev = ml_prev;
1533  data->ml_curr_index = ml_curr_index;
1534 #if 0 /* Not needed for 'single' loop. */
1535  data->ml_prev_index = ml_prev_index;
1536  data->e2l_prev = nullptr; /* Tag as 'single' task. */
1537 #endif
1538  data->mp_index = mp_index;
1539  if (lnors_spacearr) {
1540  data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
1541  }
1542  }
1543  /* We *do not need* to check/tag loops as already computed!
1544  * Due to the fact a loop only links to one of its two edges,
1545  * a same fan *will never be walked more than once!*
1546  * Since we consider edges having neighbor polys with inverted
1547  * (flipped) normals as sharp, we are sure that no fan will be skipped,
1548  * even only considering the case (sharp curr_edge, smooth prev_edge),
1549  * and not the alternative (smooth curr_edge, sharp prev_edge).
1550  * All this due/thanks to link between normals and loop ordering (i.e. winding).
1551  */
1552  else {
1553 #if 0 /* Not needed for 'fan' loops. */
1554  data->lnor = lnors;
1555 #endif
1556  data->ml_curr = ml_curr;
1557  data->ml_prev = ml_prev;
1558  data->ml_curr_index = ml_curr_index;
1559  data->ml_prev_index = ml_prev_index;
1560  data->e2l_prev = e2l_prev; /* Also tag as 'fan' task. */
1561  data->mp_index = mp_index;
1562  if (lnors_spacearr) {
1563  data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
1564  }
1565  }
1566 
1567  if (pool) {
1568  data_idx++;
1569  if (data_idx == LOOP_SPLIT_TASK_BLOCK_SIZE) {
1570  BLI_task_pool_push(pool, loop_split_worker, data_buff, true, nullptr);
1571  data_idx = 0;
1572  }
1573  }
1574  else {
1575  loop_split_worker_do(common_data, data, edge_vectors);
1576  }
1577  }
1578 
1579  ml_prev = ml_curr;
1580  ml_prev_index = ml_curr_index;
1581  }
1582  }
1583 
1584  /* Last block of data. Since it is calloc'ed and we use first nullptr item as stopper,
1585  * everything is fine. */
1586  if (pool && data_idx) {
1587  BLI_task_pool_push(pool, loop_split_worker, data_buff, true, nullptr);
1588  }
1589 
1590  if (edge_vectors) {
1591  BLI_stack_free(edge_vectors);
1592  }
1593  MEM_freeN(skip_loops);
1594 
1595 #ifdef DEBUG_TIME
1597 #endif
1598 }
1599 
1601  const float (*vert_normals)[3],
1602  const int UNUSED(numVerts),
1603  MEdge *medges,
1604  const int numEdges,
1605  MLoop *mloops,
1606  float (*r_loopnors)[3],
1607  const int numLoops,
1608  MPoly *mpolys,
1609  const float (*polynors)[3],
1610  const int numPolys,
1611  const bool use_split_normals,
1612  const float split_angle,
1613  MLoopNorSpaceArray *r_lnors_spacearr,
1614  short (*clnors_data)[2],
1615  int *r_loop_to_poly)
1616 {
1617  /* For now this is not supported.
1618  * If we do not use split normals, we do not generate anything fancy! */
1619  BLI_assert(use_split_normals || !(r_lnors_spacearr));
1620 
1621  if (!use_split_normals) {
1622  /* In this case, we simply fill lnors with vnors (or fnors for flat faces), quite simple!
1623  * Note this is done here to keep some logic and consistency in this quite complex code,
1624  * since we may want to use lnors even when mesh's 'autosmooth' is disabled
1625  * (see e.g. mesh mapping code).
1626  * As usual, we could handle that on case-by-case basis,
1627  * but simpler to keep it well confined here. */
1628  int mp_index;
1629 
1630  for (mp_index = 0; mp_index < numPolys; mp_index++) {
1631  MPoly *mp = &mpolys[mp_index];
1632  int ml_index = mp->loopstart;
1633  const int ml_index_end = ml_index + mp->totloop;
1634  const bool is_poly_flat = ((mp->flag & ME_SMOOTH) == 0);
1635 
1636  for (; ml_index < ml_index_end; ml_index++) {
1637  if (r_loop_to_poly) {
1638  r_loop_to_poly[ml_index] = mp_index;
1639  }
1640  if (is_poly_flat) {
1641  copy_v3_v3(r_loopnors[ml_index], polynors[mp_index]);
1642  }
1643  else {
1644  copy_v3_v3(r_loopnors[ml_index], vert_normals[mloops[ml_index].v]);
1645  }
1646  }
1647  }
1648  return;
1649  }
1650 
1665  int(*edge_to_loops)[2] = (int(*)[2])MEM_calloc_arrayN(
1666  (size_t)numEdges, sizeof(*edge_to_loops), __func__);
1667 
1668  /* Simple mapping from a loop to its polygon index. */
1669  int *loop_to_poly = r_loop_to_poly ? r_loop_to_poly :
1670  (int *)MEM_malloc_arrayN(
1671  (size_t)numLoops, sizeof(*loop_to_poly), __func__);
1672 
1673  /* When using custom loop normals, disable the angle feature! */
1674  const bool check_angle = (split_angle < (float)M_PI) && (clnors_data == nullptr);
1675 
1676  MLoopNorSpaceArray _lnors_spacearr = {nullptr};
1677 
1678 #ifdef DEBUG_TIME
1680 #endif
1681 
1682  if (!r_lnors_spacearr && clnors_data) {
1683  /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1684  r_lnors_spacearr = &_lnors_spacearr;
1685  }
1686  if (r_lnors_spacearr) {
1687  BKE_lnor_spacearr_init(r_lnors_spacearr, numLoops, MLNOR_SPACEARR_LOOP_INDEX);
1688  }
1689 
1690  /* Init data common to all tasks. */
1691  LoopSplitTaskDataCommon common_data;
1692  common_data.lnors_spacearr = r_lnors_spacearr;
1693  common_data.loopnors = r_loopnors;
1694  common_data.clnors_data = clnors_data;
1695  common_data.mverts = mverts;
1696  common_data.medges = medges;
1697  common_data.mloops = mloops;
1698  common_data.mpolys = mpolys;
1699  common_data.edge_to_loops = edge_to_loops;
1700  common_data.loop_to_poly = loop_to_poly;
1701  common_data.polynors = polynors;
1702  common_data.vert_normals = vert_normals;
1703  common_data.numEdges = numEdges;
1704  common_data.numLoops = numLoops;
1705  common_data.numPolys = numPolys;
1706 
1707  /* This first loop check which edges are actually smooth, and compute edge vectors. */
1708  mesh_edges_sharp_tag(&common_data, check_angle, split_angle, false);
1709 
1710  if (numLoops < LOOP_SPLIT_TASK_BLOCK_SIZE * 8) {
1711  /* Not enough loops to be worth the whole threading overhead. */
1712  loop_split_generator(nullptr, &common_data);
1713  }
1714  else {
1716 
1717  loop_split_generator(task_pool, &common_data);
1718 
1720 
1722  }
1723 
1724  MEM_freeN(edge_to_loops);
1725  if (!r_loop_to_poly) {
1726  MEM_freeN(loop_to_poly);
1727  }
1728 
1729  if (r_lnors_spacearr) {
1730  if (r_lnors_spacearr == &_lnors_spacearr) {
1731  BKE_lnor_spacearr_free(r_lnors_spacearr);
1732  }
1733  }
1734 
1735 #ifdef DEBUG_TIME
1737 #endif
1738 }
1739 
1740 #undef INDEX_UNSET
1741 #undef INDEX_INVALID
1742 #undef IS_EDGE_SHARP
1743 
1753 static void mesh_normals_loop_custom_set(const MVert *mverts,
1754  const float (*vert_normals)[3],
1755  const int numVerts,
1756  MEdge *medges,
1757  const int numEdges,
1758  MLoop *mloops,
1759  float (*r_custom_loopnors)[3],
1760  const int numLoops,
1761  MPoly *mpolys,
1762  const float (*polynors)[3],
1763  const int numPolys,
1764  short (*r_clnors_data)[2],
1765  const bool use_vertices)
1766 {
1767  /* We *may* make that poor #BKE_mesh_normals_loop_split() even more complex by making it handling
1768  * that feature too, would probably be more efficient in absolute.
1769  * However, this function *is not* performance-critical, since it is mostly expected to be called
1770  * by io add-ons when importing custom normals, and modifier
1771  * (and perhaps from some editing tools later?).
1772  * So better to keep some simplicity here, and just call #BKE_mesh_normals_loop_split() twice! */
1773  MLoopNorSpaceArray lnors_spacearr = {nullptr};
1774  BLI_bitmap *done_loops = BLI_BITMAP_NEW((size_t)numLoops, __func__);
1775  float(*lnors)[3] = (float(*)[3])MEM_calloc_arrayN((size_t)numLoops, sizeof(*lnors), __func__);
1776  int *loop_to_poly = (int *)MEM_malloc_arrayN((size_t)numLoops, sizeof(int), __func__);
1777  /* In this case we always consider split nors as ON,
1778  * and do not want to use angle to define smooth fans! */
1779  const bool use_split_normals = true;
1780  const float split_angle = (float)M_PI;
1781 
1782  BLI_SMALLSTACK_DECLARE(clnors_data, short *);
1783 
1784  /* Compute current lnor spacearr. */
1786  vert_normals,
1787  numVerts,
1788  medges,
1789  numEdges,
1790  mloops,
1791  lnors,
1792  numLoops,
1793  mpolys,
1794  polynors,
1795  numPolys,
1796  use_split_normals,
1797  split_angle,
1798  &lnors_spacearr,
1799  nullptr,
1800  loop_to_poly);
1801 
1802  /* Set all given zero vectors to their default value. */
1803  if (use_vertices) {
1804  for (int i = 0; i < numVerts; i++) {
1805  if (is_zero_v3(r_custom_loopnors[i])) {
1806  copy_v3_v3(r_custom_loopnors[i], vert_normals[i]);
1807  }
1808  }
1809  }
1810  else {
1811  for (int i = 0; i < numLoops; i++) {
1812  if (is_zero_v3(r_custom_loopnors[i])) {
1813  copy_v3_v3(r_custom_loopnors[i], lnors[i]);
1814  }
1815  }
1816  }
1817 
1818  BLI_assert(lnors_spacearr.data_type == MLNOR_SPACEARR_LOOP_INDEX);
1819 
1820  /* Now, check each current smooth fan (one lnor space per smooth fan!),
1821  * and if all its matching custom lnors are not (enough) equal, add sharp edges as needed.
1822  * This way, next time we run BKE_mesh_normals_loop_split(), we'll get lnor spacearr/smooth fans
1823  * matching given custom lnors.
1824  * Note this code *will never* unsharp edges! And quite obviously,
1825  * when we set custom normals per vertices, running this is absolutely useless. */
1826  if (!use_vertices) {
1827  for (int i = 0; i < numLoops; i++) {
1828  if (!lnors_spacearr.lspacearr[i]) {
1829  /* This should not happen in theory, but in some rare case (probably ugly geometry)
1830  * we can get some nullptr loopspacearr at this point. :/
1831  * Maybe we should set those loops' edges as sharp? */
1832  BLI_BITMAP_ENABLE(done_loops, i);
1833  if (G.debug & G_DEBUG) {
1834  printf("WARNING! Getting invalid nullptr loop space for loop %d!\n", i);
1835  }
1836  continue;
1837  }
1838 
1839  if (!BLI_BITMAP_TEST(done_loops, i)) {
1840  /* Notes:
1841  * - In case of mono-loop smooth fan, we have nothing to do.
1842  * - Loops in this linklist are ordered (in reversed order compared to how they were
1843  * discovered by BKE_mesh_normals_loop_split(), but this is not a problem).
1844  * Which means if we find a mismatching clnor,
1845  * we know all remaining loops will have to be in a new, different smooth fan/lnor space.
1846  * - In smooth fan case, we compare each clnor against a ref one,
1847  * to avoid small differences adding up into a real big one in the end!
1848  */
1849  if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1850  BLI_BITMAP_ENABLE(done_loops, i);
1851  continue;
1852  }
1853 
1854  LinkNode *loops = lnors_spacearr.lspacearr[i]->loops;
1855  MLoop *prev_ml = nullptr;
1856  const float *org_nor = nullptr;
1857 
1858  while (loops) {
1859  const int lidx = POINTER_AS_INT(loops->link);
1860  MLoop *ml = &mloops[lidx];
1861  const int nidx = lidx;
1862  float *nor = r_custom_loopnors[nidx];
1863 
1864  if (!org_nor) {
1865  org_nor = nor;
1866  }
1867  else if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1868  /* Current normal differs too much from org one, we have to tag the edge between
1869  * previous loop's face and current's one as sharp.
1870  * We know those two loops do not point to the same edge,
1871  * since we do not allow reversed winding in a same smooth fan. */
1872  const MPoly *mp = &mpolys[loop_to_poly[lidx]];
1873  const MLoop *mlp =
1874  &mloops[(lidx == mp->loopstart) ? mp->loopstart + mp->totloop - 1 : lidx - 1];
1875  medges[(prev_ml->e == mlp->e) ? prev_ml->e : ml->e].flag |= ME_SHARP;
1876 
1877  org_nor = nor;
1878  }
1879 
1880  prev_ml = ml;
1881  loops = loops->next;
1882  BLI_BITMAP_ENABLE(done_loops, lidx);
1883  }
1884 
1885  /* We also have to check between last and first loops,
1886  * otherwise we may miss some sharp edges here!
1887  * This is just a simplified version of above while loop.
1888  * See T45984. */
1889  loops = lnors_spacearr.lspacearr[i]->loops;
1890  if (loops && org_nor) {
1891  const int lidx = POINTER_AS_INT(loops->link);
1892  MLoop *ml = &mloops[lidx];
1893  const int nidx = lidx;
1894  float *nor = r_custom_loopnors[nidx];
1895 
1896  if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1897  const MPoly *mp = &mpolys[loop_to_poly[lidx]];
1898  const MLoop *mlp =
1899  &mloops[(lidx == mp->loopstart) ? mp->loopstart + mp->totloop - 1 : lidx - 1];
1900  medges[(prev_ml->e == mlp->e) ? prev_ml->e : ml->e].flag |= ME_SHARP;
1901  }
1902  }
1903  }
1904  }
1905 
1906  /* And now, recompute our new auto lnors and lnor spacearr! */
1907  BKE_lnor_spacearr_clear(&lnors_spacearr);
1909  vert_normals,
1910  numVerts,
1911  medges,
1912  numEdges,
1913  mloops,
1914  lnors,
1915  numLoops,
1916  mpolys,
1917  polynors,
1918  numPolys,
1919  use_split_normals,
1920  split_angle,
1921  &lnors_spacearr,
1922  nullptr,
1923  loop_to_poly);
1924  }
1925  else {
1926  BLI_bitmap_set_all(done_loops, true, (size_t)numLoops);
1927  }
1928 
1929  /* And we just have to convert plain object-space custom normals to our
1930  * lnor space-encoded ones. */
1931  for (int i = 0; i < numLoops; i++) {
1932  if (!lnors_spacearr.lspacearr[i]) {
1933  BLI_BITMAP_DISABLE(done_loops, i);
1934  if (G.debug & G_DEBUG) {
1935  printf("WARNING! Still getting invalid nullptr loop space in second loop for loop %d!\n",
1936  i);
1937  }
1938  continue;
1939  }
1940 
1941  if (BLI_BITMAP_TEST_BOOL(done_loops, i)) {
1942  /* Note we accumulate and average all custom normals in current smooth fan,
1943  * to avoid getting different clnors data (tiny differences in plain custom normals can
1944  * give rather huge differences in computed 2D factors). */
1945  LinkNode *loops = lnors_spacearr.lspacearr[i]->loops;
1946  if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1947  BLI_assert(POINTER_AS_INT(loops) == i);
1948  const int nidx = use_vertices ? (int)mloops[i].v : i;
1949  float *nor = r_custom_loopnors[nidx];
1950 
1951  BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], nor, r_clnors_data[i]);
1952  BLI_BITMAP_DISABLE(done_loops, i);
1953  }
1954  else {
1955  int avg_nor_count = 0;
1956  float avg_nor[3];
1957  short clnor_data_tmp[2], *clnor_data;
1958 
1959  zero_v3(avg_nor);
1960  while (loops) {
1961  const int lidx = POINTER_AS_INT(loops->link);
1962  const int nidx = use_vertices ? (int)mloops[lidx].v : lidx;
1963  float *nor = r_custom_loopnors[nidx];
1964 
1965  avg_nor_count++;
1966  add_v3_v3(avg_nor, nor);
1967  BLI_SMALLSTACK_PUSH(clnors_data, (short *)r_clnors_data[lidx]);
1968 
1969  loops = loops->next;
1970  BLI_BITMAP_DISABLE(done_loops, lidx);
1971  }
1972 
1973  mul_v3_fl(avg_nor, 1.0f / (float)avg_nor_count);
1974  BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], avg_nor, clnor_data_tmp);
1975 
1976  while ((clnor_data = (short *)BLI_SMALLSTACK_POP(clnors_data))) {
1977  clnor_data[0] = clnor_data_tmp[0];
1978  clnor_data[1] = clnor_data_tmp[1];
1979  }
1980  }
1981  }
1982  }
1983 
1984  MEM_freeN(lnors);
1985  MEM_freeN(loop_to_poly);
1986  MEM_freeN(done_loops);
1987  BKE_lnor_spacearr_free(&lnors_spacearr);
1988 }
1989 
1991  const float (*vert_normals)[3],
1992  const int numVerts,
1993  MEdge *medges,
1994  const int numEdges,
1995  MLoop *mloops,
1996  float (*r_custom_loopnors)[3],
1997  const int numLoops,
1998  MPoly *mpolys,
1999  const float (*polynors)[3],
2000  const int numPolys,
2001  short (*r_clnors_data)[2])
2002 {
2004  vert_normals,
2005  numVerts,
2006  medges,
2007  numEdges,
2008  mloops,
2009  r_custom_loopnors,
2010  numLoops,
2011  mpolys,
2012  polynors,
2013  numPolys,
2014  r_clnors_data,
2015  false);
2016 }
2017 
2019  const float (*vert_normals)[3],
2020  float (*r_custom_vertnors)[3],
2021  const int numVerts,
2022  MEdge *medges,
2023  const int numEdges,
2024  MLoop *mloops,
2025  const int numLoops,
2026  MPoly *mpolys,
2027  const float (*polynors)[3],
2028  const int numPolys,
2029  short (*r_clnors_data)[2])
2030 {
2032  vert_normals,
2033  numVerts,
2034  medges,
2035  numEdges,
2036  mloops,
2037  r_custom_vertnors,
2038  numLoops,
2039  mpolys,
2040  polynors,
2041  numPolys,
2042  r_clnors_data,
2043  true);
2044 }
2045 
2046 static void mesh_set_custom_normals(Mesh *mesh, float (*r_custom_nors)[3], const bool use_vertices)
2047 {
2048  short(*clnors)[2];
2049  const int numloops = mesh->totloop;
2050 
2051  clnors = (short(*)[2])CustomData_get_layer(&mesh->ldata, CD_CUSTOMLOOPNORMAL);
2052  if (clnors != nullptr) {
2053  memset(clnors, 0, sizeof(*clnors) * (size_t)numloops);
2054  }
2055  else {
2056  clnors = (short(*)[2])CustomData_add_layer(
2057  &mesh->ldata, CD_CUSTOMLOOPNORMAL, CD_CALLOC, nullptr, numloops);
2058  }
2059 
2062  mesh->totvert,
2063  mesh->medge,
2064  mesh->totedge,
2065  mesh->mloop,
2066  r_custom_nors,
2067  mesh->totloop,
2068  mesh->mpoly,
2070  mesh->totpoly,
2071  clnors,
2072  use_vertices);
2073 }
2074 
2075 void BKE_mesh_set_custom_normals(Mesh *mesh, float (*r_custom_loopnors)[3])
2076 {
2077  mesh_set_custom_normals(mesh, r_custom_loopnors, false);
2078 }
2079 
2080 void BKE_mesh_set_custom_normals_from_vertices(Mesh *mesh, float (*r_custom_vertnors)[3])
2081 {
2082  mesh_set_custom_normals(mesh, r_custom_vertnors, true);
2083 }
2084 
2085 void BKE_mesh_normals_loop_to_vertex(const int numVerts,
2086  const MLoop *mloops,
2087  const int numLoops,
2088  const float (*clnors)[3],
2089  float (*r_vert_clnors)[3])
2090 {
2091  int *vert_loops_count = (int *)MEM_calloc_arrayN(
2092  (size_t)numVerts, sizeof(*vert_loops_count), __func__);
2093 
2094  copy_vn_fl((float *)r_vert_clnors, 3 * numVerts, 0.0f);
2095 
2096  int i;
2097  const MLoop *ml;
2098  for (i = 0, ml = mloops; i < numLoops; i++, ml++) {
2099  const uint v = ml->v;
2100 
2101  add_v3_v3(r_vert_clnors[v], clnors[i]);
2102  vert_loops_count[v]++;
2103  }
2104 
2105  for (i = 0; i < numVerts; i++) {
2106  mul_v3_fl(r_vert_clnors[i], 1.0f / (float)vert_loops_count[i]);
2107  }
2108 
2109  MEM_freeN(vert_loops_count);
2110 }
2111 
2112 #undef LNOR_SPACE_TRIGO_THRESHOLD
2113 
typedef float(TangentPoint)[2]
CustomData interface, see also DNA_customdata_types.h.
@ CD_CALLOC
void * CustomData_get_layer(const struct CustomData *data, int type)
void * CustomData_add_layer(struct CustomData *data, int type, eCDAllocType alloctype, void *layer, int totelem)
Definition: customdata.cc:2776
void BKE_editmesh_cache_ensure_vert_normals(struct BMEditMesh *em, struct EditMeshData *emd)
void BKE_editmesh_cache_ensure_poly_normals(struct BMEditMesh *em, struct EditMeshData *emd)
@ G_DEBUG
Definition: BKE_global.h:174
@ MLNOR_SPACEARR_LOOP_INDEX
Definition: BKE_mesh.h:571
@ MLNOR_SPACEARR_BMLOOP_PTR
Definition: BKE_mesh.h:572
void BKE_mesh_calc_poly_normal(const struct MPoly *mpoly, const struct MLoop *loopstart, const struct MVert *mvarray, float r_no[3])
@ MLNOR_SPACE_IS_SINGLE
Definition: BKE_mesh.h:553
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_BITMAP_NEW(_num, _alloc_string)
Definition: BLI_bitmap.h:40
#define BLI_BITMAP_TEST(_bitmap, _index)
Definition: BLI_bitmap.h:64
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition: BLI_bitmap.h:81
#define BLI_BITMAP_DISABLE(_bitmap, _index)
Definition: BLI_bitmap.h:88
#define BLI_BITMAP_TEST_BOOL(_bitmap, _index)
Definition: BLI_bitmap.h:74
void BLI_bitmap_set_all(BLI_bitmap *bitmap, bool set, size_t bits)
Definition: bitmap.c:17
#define BLI_BITMAP_SET(_bitmap, _index, _set)
Definition: BLI_bitmap.h:102
unsigned int BLI_bitmap
Definition: BLI_bitmap.h:16
MINLINE float saacos(float fac)
MINLINE float saacosf(float f)
#define M_PI
Definition: BLI_math_base.h:20
void accumulate_vertex_normals_tri_v3(float n1[3], float n2[3], float n3[3], const float f_no[3], const float co1[3], const float co2[3], const float co3[3])
Definition: math_geom.c:4977
float normal_tri_v3(float n[3], const float v1[3], const float v2[3], const float v3[3])
Definition: math_geom.c:33
#define MINLINE
MINLINE bool compare_v3v3(const float a[3], const float b[3], float limit) ATTR_WARN_UNUSED_RESULT
MINLINE void madd_v3_v3fl(float r[3], const float a[3], float f)
MINLINE float normalize_v3(float r[3])
MINLINE void sub_v3_v3(float r[3], const float a[3])
MINLINE void add_newell_cross_v3_v3v3(float n[3], const float v_prev[3], const float v_curr[3])
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE bool is_zero_v3(const float a[3]) ATTR_WARN_UNUSED_RESULT
void copy_vn_fl(float *array_tar, int size, float val)
Definition: math_vector.c:1259
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void cross_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE float normalize_v3_v3(float r[3], const float a[3])
MINLINE void zero_v3(float r[3])
MINLINE void mul_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void add_v3_v3(float r[3], const float a[3])
void BLI_memarena_free(struct MemArena *ma) ATTR_NONNULL(1)
Definition: BLI_memarena.c:94
struct MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
Definition: BLI_memarena.c:64
void BLI_memarena_merge(MemArena *ma_dst, MemArena *ma_src) ATTR_NONNULL(1
#define BLI_MEMARENA_STD_BUFSIZE
Definition: BLI_memarena.h:20
void * BLI_memarena_alloc(struct MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
Definition: BLI_memarena.c:116
void void BLI_memarena_clear(MemArena *ma) ATTR_NONNULL(1)
Definition: BLI_memarena.c:208
void * BLI_memarena_calloc(struct MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
Definition: BLI_memarena.c:153
void * BLI_stack_peek(BLI_Stack *stack) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: stack.c:166
void BLI_stack_push(BLI_Stack *stack, const void *src) ATTR_NONNULL()
Definition: stack.c:129
bool BLI_stack_is_empty(const BLI_Stack *stack) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: stack.c:247
void BLI_stack_clear(BLI_Stack *stack) ATTR_NONNULL()
Definition: stack.c:193
void BLI_stack_free(BLI_Stack *stack) ATTR_NONNULL()
Definition: stack.c:94
void BLI_stack_discard(BLI_Stack *stack) ATTR_NONNULL()
Definition: stack.c:173
#define BLI_stack_new(esize, descr)
unsigned int uint
Definition: BLI_sys_types.h:67
@ TASK_PRIORITY_HIGH
Definition: BLI_task.h:57
void * BLI_task_pool_user_data(TaskPool *pool)
Definition: task_pool.cc:525
void BLI_task_pool_work_and_wait(TaskPool *pool)
Definition: task_pool.cc:480
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition: task_range.cc:94
TaskPool * BLI_task_pool_create(void *userdata, eTaskPriority priority)
Definition: task_pool.cc:390
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition: BLI_task.h:293
void BLI_task_pool_free(TaskPool *pool)
Definition: task_pool.cc:440
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Definition: task_pool.cc:459
void BLI_mutex_lock(ThreadMutex *mutex)
Definition: threads.cc:373
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition: threads.cc:378
pthread_mutex_t ThreadMutex
Definition: BLI_threads.h:82
#define POINTER_FROM_INT(i)
#define UNUSED(x)
#define POINTER_AS_INT(i)
#define UNLIKELY(x)
#define ELEM(...)
#define LIKELY(x)
@ CD_CUSTOMLOOPNORMAL
eMeshWrapperType
@ ME_WRAPPER_TYPE_MDATA
@ ME_WRAPPER_TYPE_SUBD
@ ME_WRAPPER_TYPE_BMESH
@ ME_SMOOTH
@ ME_SHARP
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble GLdouble r _GL_VOID_RET _GL_VOID GLfloat GLfloat r _GL_VOID_RET _GL_VOID GLint GLint r _GL_VOID_RET _GL_VOID GLshort GLshort r _GL_VOID_RET _GL_VOID GLdouble GLdouble r
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
Platform independent time functions.
Utility defines for timing/benchmarks.
#define TIMEIT_START_AVERAGED(var)
#define TIMEIT_END_AVERAGED(var)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE float atomic_cas_float(float *v, float old, float _new)
#define _ATOMIC_LIKELY(x)
ATTR_WARN_UNUSED_RESULT const BMVert * v
#define sinf(x)
Definition: cuda/compat.h:102
#define cosf(x)
Definition: cuda/compat.h:101
TaskPool * task_pool
uint nor
IconTextureDrawCall normal
int count
void *(* MEM_malloc_arrayN)(size_t len, size_t size, const char *str)
Definition: mallocn.c:34
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_calloc_arrayN)(size_t len, size_t size, const char *str)
Definition: mallocn.c:32
size_t(* MEM_allocN_len)(const void *vmemh)
Definition: mallocn.c:26
#define G(x, y, z)
MLoopNorSpace * BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr)
float(* BKE_mesh_vertex_normals_for_write(Mesh *mesh))[3]
static void mesh_calc_normals_poly_and_vertex_finalize_fn(void *__restrict userdata, const int vidx, const TaskParallelTLS *__restrict UNUSED(tls))
static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
static void mesh_calc_normals_poly_and_vertex_accum_fn(void *__restrict userdata, const int pidx, const TaskParallelTLS *__restrict UNUSED(tls))
void BKE_lnor_space_define(MLoopNorSpace *lnor_space, const float lnor[3], float vec_ref[3], float vec_other[3], BLI_Stack *edge_vectors)
static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common_data)
#define INDEX_INVALID
void BKE_mesh_clear_derived_normals(Mesh *mesh)
static void loop_split_worker_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data, BLI_Stack *edge_vectors)
#define FLT_EQ_NONAN(_fa, _fb)
void BKE_mesh_calc_normals_poly(const MVert *mvert, int UNUSED(mvert_len), const MLoop *mloop, int UNUSED(mloop_len), const MPoly *mpoly, int mpoly_len, float(*r_poly_normals)[3])
#define LOOP_SPLIT_TASK_BLOCK_SIZE
MINLINE short unit_float_to_short(const float val)
void BKE_mesh_calc_normals_looptri(MVert *mverts, int numVerts, const MLoop *mloop, const MLoopTri *looptri, int looptri_num, float(*r_tri_nors)[3])
static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, const int numLoops, const char data_type)
void BKE_mesh_vertex_normals_clear_dirty(Mesh *mesh)
void BKE_mesh_normals_loop_custom_set(const MVert *mverts, const float(*vert_normals)[3], const int numVerts, MEdge *medges, const int numEdges, MLoop *mloops, float(*r_custom_loopnors)[3], const int numLoops, MPoly *mpolys, const float(*polynors)[3], const int numPolys, short(*r_clnors_data)[2])
static void add_v3_v3_atomic(float r[3], const float a[3])
Definition: mesh_normals.cc:58
void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr)
void BKE_mesh_normals_loop_to_vertex(const int numVerts, const MLoop *mloops, const int numLoops, const float(*clnors)[3], float(*r_vert_clnors)[3])
void BKE_mesh_poly_normals_clear_dirty(Mesh *mesh)
void BKE_mesh_calc_normals_poly_and_vertex(const MVert *mvert, const int mvert_len, const MLoop *mloop, const int UNUSED(mloop_len), const MPoly *mpoly, const int mpoly_len, float(*r_poly_normals)[3], float(*r_vert_normals)[3])
bool BKE_mesh_poly_normals_are_dirty(const Mesh *mesh)
#define INDEX_UNSET
#define LNOR_SPACE_TRIGO_THRESHOLD
static void mesh_normals_loop_custom_set(const MVert *mverts, const float(*vert_normals)[3], const int numVerts, MEdge *medges, const int numEdges, MLoop *mloops, float(*r_custom_loopnors)[3], const int numLoops, MPoly *mpolys, const float(*polynors)[3], const int numPolys, short(*r_clnors_data)[2], const bool use_vertices)
bool BKE_mesh_vertex_normals_are_dirty(const Mesh *mesh)
void BKE_mesh_normals_loop_split(const MVert *mverts, const float(*vert_normals)[3], const int UNUSED(numVerts), MEdge *medges, const int numEdges, MLoop *mloops, float(*r_loopnors)[3], const int numLoops, MPoly *mpolys, const float(*polynors)[3], const int numPolys, const bool use_split_normals, const float split_angle, MLoopNorSpaceArray *r_lnors_spacearr, short(*clnors_data)[2], int *r_loop_to_poly)
void BKE_mesh_assert_normals_dirty_or_calculated(const Mesh *mesh)
static bool loop_split_generator_check_cyclic_smooth_fan(const MLoop *mloops, const MPoly *mpolys, const int(*edge_to_loops)[2], const int *loop_to_poly, const int *e2l_prev, BLI_bitmap *skip_loops, const MLoop *ml_curr, const MLoop *ml_prev, const int ml_curr_index, const int ml_prev_index, const int mp_curr_index)
void BKE_mesh_calc_normals(Mesh *mesh)
static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data, const bool check_angle, const float split_angle, const bool do_sharp_edges_tag)
void BKE_mesh_normals_tag_dirty(Mesh *mesh)
Definition: mesh_normals.cc:95
const float(* BKE_mesh_poly_normals_ensure(const Mesh *mesh))[3]
void BKE_mesh_loop_manifold_fan_around_vert_next(const MLoop *mloops, const MPoly *mpolys, const int *loop_to_poly, const int *e2lfan_curr, const uint mv_pivot_index, const MLoop **r_mlfan_curr, int *r_mlfan_curr_index, int *r_mlfan_vert_index, int *r_mpfan_curr_index)
void BKE_lnor_spacearr_free(MLoopNorSpaceArray *lnors_spacearr)
void BKE_mesh_normals_loop_custom_from_vertices_set(const MVert *mverts, const float(*vert_normals)[3], float(*r_custom_vertnors)[3], const int numVerts, MEdge *medges, const int numEdges, MLoop *mloops, const int numLoops, MPoly *mpolys, const float(*polynors)[3], const int numPolys, short(*r_clnors_data)[2])
void BKE_mesh_set_custom_normals(Mesh *mesh, float(*r_custom_loopnors)[3])
static void mesh_calc_normals_poly_fn(void *__restrict userdata, const int pidx, const TaskParallelTLS *__restrict UNUSED(tls))
float(* BKE_mesh_poly_normals_for_write(Mesh *mesh))[3]
void BKE_lnor_space_custom_data_to_normal(MLoopNorSpace *lnor_space, const short clnor_data[2], float r_custom_lnor[3])
void BKE_mesh_ensure_normals_for_display(Mesh *mesh)
void BKE_lnor_spacearr_tls_join(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
const float(* BKE_mesh_vertex_normals_ensure(const Mesh *mesh))[3]
void BKE_edges_sharp_from_angle_set(const struct MVert *mverts, const int UNUSED(numVerts), struct MEdge *medges, const int numEdges, struct MLoop *mloops, const int numLoops, struct MPoly *mpolys, const float(*polynors)[3], const int numPolys, const float split_angle)
static void loop_split_worker(TaskPool *__restrict pool, void *taskdata)
MINLINE float unit_short_to_float(const short val)
void BKE_lnor_spacearr_tls_init(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
void BKE_mesh_set_custom_normals_from_vertices(Mesh *mesh, float(*r_custom_vertnors)[3])
static void mesh_set_custom_normals(Mesh *mesh, float(*r_custom_nors)[3], const bool use_vertices)
#define IS_EDGE_SHARP(_e2l)
void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, const int ml_index, void *bm_loop, const bool is_single)
void BKE_lnor_space_custom_normal_to_data(MLoopNorSpace *lnor_space, const float custom_lnor[3], short r_clnor_data[2])
#define floorf(x)
Definition: metal/compat.h:224
#define fabsf(x)
Definition: metal/compat.h:219
static unsigned a[3]
Definition: RandGen.cpp:78
void isolate_task(const Function &function)
Definition: BLI_task.hh:125
const float(* vertexCos)[3]
void * link
Definition: BLI_linklist.h:24
struct LinkNode * next
Definition: BLI_linklist.h:23
const float(* polynors)[3]
const float(* vert_normals)[3]
MLoopNorSpaceArray * lnors_spacearr
const MLoop * ml_prev
const int * e2l_prev
float(* lnor)[3]
MLoopNorSpace * lnor_space
const MLoop * ml_curr
BLI_Stack * edge_vectors
unsigned int v1
unsigned int v2
struct LinkNode * loops_pool
Definition: BKE_mesh.h:561
struct MemArena * mem
Definition: BKE_mesh.h:565
MLoopNorSpace ** lspacearr
Definition: BKE_mesh.h:560
float ref_alpha
Definition: BKE_mesh.h:536
float vec_ortho[3]
Definition: BKE_mesh.h:534
float ref_beta
Definition: BKE_mesh.h:538
float vec_ref[3]
Definition: BKE_mesh.h:532
float vec_lnor[3]
Definition: BKE_mesh.h:530
struct LinkNode * loops
Definition: BKE_mesh.h:543
unsigned int tri[3]
unsigned int e
unsigned int v
float co[3]
char vert_normals_dirty
float(* vert_normals)[3]
struct EditMeshData * edit_data
char poly_normals_dirty
float(* poly_normals)[3]
void * normals_mutex
struct MEdge * medge
struct BMEditMesh * edit_mesh
struct MVert * mvert
int totedge
int totvert
struct MLoop * mloop
Mesh_Runtime runtime
int totpoly
int totloop
struct MPoly * mpoly
CustomData ldata
ccl_device_inline float beta(float x, float y)
Definition: util/math.h:775