76 #define USE_SCANFILL_EDGE_WORKAROUND
78 #define SPLINE_RESOL_CAP_PER_PIXEL 2
79 #define SPLINE_RESOL_CAP_MIN 8
80 #define SPLINE_RESOL_CAP_MAX 64
83 #define BUCKET_PIXELS_PER_CELL 4
85 #define SF_EDGE_IS_BOUNDARY 0xff
86 #define SF_KEYINDEX_TEMP_ID ((unsigned int)-1)
88 #define TRI_TERMINATOR_ID ((unsigned int)-1)
89 #define TRI_VERT ((unsigned int)-1)
94 # define FACE_ASSERT(face, vert_max) \
96 unsigned int *_t = face; \
97 BLI_assert(_t[0] < vert_max); \
98 BLI_assert(_t[1] < vert_max); \
99 BLI_assert(_t[2] < vert_max); \
100 BLI_assert(_t[3] < vert_max || _t[3] == TRI_VERT); \
105 # define FACE_ASSERT(face, vert_max)
111 float r_p[2],
const float p[2],
const float cent[2],
const float angle,
const float asp[2])
118 r_p[0] = (p[0] - cent[0]) / asp[0];
119 r_p[1] = (p[1] - cent[1]) / asp[1];
122 p_new[0] = ((r_p[0] *
c) - (r_p[1] * s)) * asp[0];
123 p_new[1] = ((r_p[0] * s) + (r_p[1] *
c)) * asp[1];
126 r_p[0] = p_new[0] + cent[0];
127 r_p[1] = p_new[1] + cent[1];
131 const unsigned int min,
132 const unsigned int max)
138 const float co_xy[2],
141 const float co[3] = {co_xy[0], co_xy[1], co_z};
220 const unsigned int layers_tot = mr_handle->
layers_tot;
223 for (
uint i = 0; i < layers_tot; i++, layer++) {
235 unsigned int bucket_index;
236 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
237 unsigned int *face_index = layer->
buckets_face[bucket_index];
252 float (*diff_points)[2],
253 const unsigned int tot_diff_point,
257 unsigned int k_prev = tot_diff_point - 2;
258 unsigned int k_curr = tot_diff_point - 1;
259 unsigned int k_next = 0;
267 const float *co_prev;
268 const float *co_curr;
269 const float *co_next;
271 const float ofs_squared = ofs * ofs;
273 co_prev = diff_points[k_prev];
274 co_curr = diff_points[k_curr];
275 co_next = diff_points[k_next];
281 for (k = 0; k < tot_diff_point; k++) {
284 co_curr = diff_points[k_curr];
285 co_next = diff_points[k_next];
293 if ((do_test ==
false) ||
300 diff_feather_points[k][0] = diff_points[k][0] + (d[1] * ofs);
301 diff_feather_points[k][1] = diff_points[k][1] + (-d[0] * ofs);
323 unsigned int face_index,
324 const unsigned int bucket_x,
325 const unsigned int bucket_y,
326 const float bucket_size_x,
327 const float bucket_size_y,
328 const float bucket_max_rad_squared)
330 unsigned int *face = layer->
face_array[face_index];
333 const float xmin = layer->
bounds.
xmin + (bucket_size_x * (
float)bucket_x);
334 const float ymin = layer->
bounds.
ymin + (bucket_size_y * (
float)bucket_y);
335 const float xmax = xmin + bucket_size_x;
336 const float ymax = ymin + bucket_size_y;
338 const float cent[2] = {(xmin + xmax) * 0.5f, (ymin + ymax) * 0.5f};
341 const float *
v1 =
cos[face[0]];
342 const float *
v2 =
cos[face[1]];
343 const float *v3 =
cos[face[2]];
359 const float *
v1 =
cos[face[0]];
360 const float *
v2 =
cos[face[1]];
361 const float *v3 =
cos[face[2]];
362 const float *v4 =
cos[face[3]];
419 const float bucket_size_x = (bucket_dim_x + FLT_EPSILON) / (
float)layer->
buckets_x;
420 const float bucket_size_y = (bucket_dim_y + FLT_EPSILON) / (
float)layer->
buckets_y;
421 const float bucket_max_rad = (
max_ff(bucket_size_x, bucket_size_y) * (
float)
M_SQRT2) +
423 const float bucket_max_rad_squared = bucket_max_rad * bucket_max_rad;
425 unsigned int *face = &layer->
face_array[0][0];
430 unsigned int *bucketstore_tot =
MEM_callocN(bucket_tot *
sizeof(
unsigned int), __func__);
432 unsigned int face_index;
434 for (face_index = 0; face_index < layer->
face_tot; face_index++, face += 4) {
441 const float *
v1 =
cos[face[0]];
442 const float *
v2 =
cos[face[1]];
443 const float *v3 =
cos[face[2]];
451 const float *
v1 =
cos[face[0]];
452 const float *
v2 =
cos[face[1]];
453 const float *v3 =
cos[face[2]];
454 const float *v4 =
cos[face[3]];
463 if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) {
465 CLAMP(xmin, 0.0f, 1.0f);
466 CLAMP(ymin, 0.0f, 1.0f);
467 CLAMP(xmax, 0.0f, 1.0f);
468 CLAMP(ymax, 0.0f, 1.0f);
471 unsigned int xi_min = (
unsigned int)((xmin - layer->
bounds.
xmin) *
473 unsigned int xi_max = (
unsigned int)((xmax - layer->
bounds.
xmin) *
475 unsigned int yi_min = (
unsigned int)((ymin - layer->
bounds.
ymin) *
477 unsigned int yi_max = (
unsigned int)((ymax - layer->
bounds.
ymin) *
498 for (yi = yi_min; yi <= yi_max; yi++) {
499 unsigned int bucket_index = (layer->
buckets_x * yi) + xi_min;
500 for (xi = xi_min; xi <= xi_max; xi++, bucket_index++) {
519 bucket_max_rad_squared)) {
521 bucketstore_tot[bucket_index]++;
531 unsigned int **buckets_face =
MEM_mallocN(bucket_tot *
sizeof(*buckets_face), __func__);
532 unsigned int bucket_index;
534 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
535 if (bucketstore_tot[bucket_index]) {
537 (bucketstore_tot[bucket_index] + 1) *
sizeof(
unsigned int), __func__);
540 buckets_face[bucket_index] = bucket;
542 for (bucket_node = bucketstore[bucket_index]; bucket_node;
543 bucket_node = bucket_node->
next) {
550 buckets_face[bucket_index] =
NULL;
568 const bool do_aspect_correct,
569 const bool do_mask_aa,
570 const bool do_feather)
572 const rctf default_bounds = {0.0f, 1.0f, 0.0f, 1.0f};
574 const float asp_xy[2] = {
578 const float zvec[3] = {0.0f, 0.0f, -1.0f};
580 unsigned int masklay_index;
590 for (masklay =
mask->masklayers.first, masklay_index = 0; masklay;
591 masklay = masklay->
next, masklay_index++) {
594 unsigned int tot_splines;
596 unsigned int open_spline_index = 0;
606 unsigned int sf_vert_tot = 0;
607 unsigned int tot_feather_quads = 0;
609 #ifdef USE_SCANFILL_EDGE_WORKAROUND
610 unsigned int tot_boundary_used = 0;
611 unsigned int tot_boundary_found = 0;
622 open_spline_ranges =
MEM_callocN(
sizeof(*open_spline_ranges) * tot_splines, __func__);
630 float(*diff_points)[2];
631 unsigned int tot_diff_point;
633 float(*diff_feather_points)[2];
634 float(*diff_feather_points_flip)[2];
635 unsigned int tot_diff_feather_points;
639 const unsigned int resol =
CLAMPIS(
MAX2(resol_a, resol_b), 4, 512);
645 spline, resol,
false, &tot_diff_feather_points);
649 tot_diff_feather_points = 0;
650 diff_feather_points =
NULL;
653 if (tot_diff_point > 3) {
659 if (do_aspect_correct) {
666 fp = &diff_points[0][0];
667 ffp = tot_diff_feather_points ? &diff_feather_points[0][0] :
NULL;
671 fp = &diff_points[0][1];
672 ffp = tot_diff_feather_points ? &diff_feather_points[0][1] :
NULL;
676 for (
uint i = 0; i < tot_diff_point; i++, fp += 2) {
677 (*fp) = (((*fp) - 0.5f) / asp) + 0.5f;
680 if (tot_diff_feather_points) {
681 for (
uint i = 0; i < tot_diff_feather_points; i++, ffp += 2) {
682 (*ffp) = (((*ffp) - 0.5f) / asp) + 0.5f;
689 if (do_mask_aa ==
true) {
690 if (do_feather ==
false) {
691 tot_diff_feather_points = tot_diff_point;
693 sizeof(*diff_feather_points) * (
size_t)tot_diff_feather_points, __func__);
696 diff_feather_points, diff_points, tot_diff_point, pixel_size,
false);
701 diff_feather_points, diff_points, tot_diff_point, pixel_size,
true);
709 spline, diff_feather_points, tot_diff_feather_points);
713 sf_vert_prev->
tmp.
u = sf_vert_tot;
716 sf_vert_prev->
keyindex = sf_vert_tot + tot_diff_point;
720 for (j = 1; j < tot_diff_point; j++) {
722 sf_vert->
tmp.
u = sf_vert_tot;
723 sf_vert->
keyindex = sf_vert_tot + tot_diff_point;
727 sf_vert = sf_vert_prev;
730 for (j = 0; j < tot_diff_point; j++) {
733 #ifdef USE_SCANFILL_EDGE_WORKAROUND
734 if (diff_feather_points) {
741 sf_vert_prev = sf_vert;
742 sf_vert = sf_vert->
next;
745 if (diff_feather_points) {
746 BLI_assert(tot_diff_feather_points == tot_diff_point);
750 for (j = 0; j < tot_diff_feather_points; j++) {
756 tot_feather_quads += tot_diff_point;
761 if (diff_feather_points) {
764 diff_feather_points_flip =
MEM_mallocN(
sizeof(
float[2]) * tot_diff_feather_points,
765 "diff_feather_points_flip");
768 for (j = 0; j < tot_diff_point; j++) {
769 sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
770 add_v2_v2v2(diff_feather_points_flip[j], diff_points[j], co_diff);
774 spline, diff_feather_points, tot_diff_feather_points);
776 spline, diff_feather_points_flip, tot_diff_feather_points);
779 diff_feather_points_flip =
NULL;
782 open_spline_ranges[open_spline_index].
vertex_offset = sf_vert_tot;
783 open_spline_ranges[open_spline_index].
vertex_total = tot_diff_point;
786 for (j = 0; j < tot_diff_point; j++) {
790 sf_vert->
tmp.
u = sf_vert_tot;
796 sf_vert->
tmp.
u = sf_vert_tot;
801 if (diff_feather_points_flip) {
803 &sf_ctx, diff_feather_points_flip[j], 1.0f);
807 sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
812 sf_vert->
tmp.
u = sf_vert_tot;
816 tot_feather_quads += 2;
820 tot_feather_quads -= 2;
823 if (diff_feather_points_flip) {
825 diff_feather_points_flip =
NULL;
835 const float *fp_cent;
836 const float *fp_turn;
840 fp_cent = diff_points[0];
841 fp_turn = diff_feather_points[0];
843 #define CALC_CAP_RESOL \
845 (unsigned int)(len_v2v2(fp_cent, fp_turn) / (pixel_size * SPLINE_RESOL_CAP_PER_PIXEL)), \
846 SPLINE_RESOL_CAP_MIN, \
847 SPLINE_RESOL_CAP_MAX)
852 for (k = 1; k < vertex_total_cap; k++) {
853 const float angle = (
float)k * (1.0f / (
float)vertex_total_cap) * (
float)
M_PI;
858 sf_vert->
tmp.
u = sf_vert_tot;
862 tot_feather_quads += vertex_total_cap;
867 fp_cent = diff_points[tot_diff_point - 1];
868 fp_turn = diff_feather_points[tot_diff_point - 1];
873 for (k = 1; k < vertex_total_cap; k++) {
874 const float angle = (
float)k * (1.0f / (
float)vertex_total_cap) * (
float)
M_PI;
879 sf_vert->
tmp.
u = sf_vert_tot;
883 tot_feather_quads += vertex_total_cap;
892 #undef CALC_CAP_RESOL
902 if (diff_feather_points) {
908 unsigned int(*face_array)[4], *face;
910 unsigned int sf_tri_tot;
912 unsigned int face_index;
913 int scanfill_flag = 0;
915 bool is_isect =
false;
920 face_coords =
MEM_mallocN(
sizeof(
float[3]) * sf_vert_tot,
"maskrast_face_coords");
926 cos = (
float *)face_coords;
928 sf_vert_next = sf_vert->
next;
948 &sf_ctx, &isect_remvertbase, &isect_remedgebase))) {
950 unsigned int i = sf_vert_tot;
953 sizeof(
float[3]) * (sf_vert_tot + sf_vert_tot_isect));
955 cos = (
float *)&face_coords[sf_vert_tot][0];
959 sf_vert->
tmp.
u = i++;
963 sf_vert_tot += sf_vert_tot_isect;
985 ((
size_t)sf_tri_tot + (
size_t)tot_feather_quads),
986 "maskrast_face_index");
990 face = (
unsigned int *)face_array;
992 *(face++) = sf_tri->
v3->
tmp.
u;
993 *(face++) = sf_tri->
v2->
tmp.
u;
994 *(face++) = sf_tri->
v1->
tmp.
u;
1005 if (tot_feather_quads) {
1010 *(face++) = sf_edge->
v1->
tmp.
u;
1011 *(face++) = sf_edge->
v2->
tmp.
u;
1017 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1018 tot_boundary_found++;
1024 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1025 if (tot_boundary_found != tot_boundary_used) {
1026 BLI_assert(tot_boundary_found < tot_boundary_used);
1031 while (open_spline_index > 0) {
1032 const unsigned int vertex_offset = open_spline_ranges[--open_spline_index].
vertex_offset;
1033 unsigned int vertex_total = open_spline_ranges[open_spline_index].
vertex_total;
1034 unsigned int vertex_total_cap_head =
1036 unsigned int vertex_total_cap_tail =
1043 for (k = 0; k < vertex_total - 1; k++, j += 3) {
1062 if (open_spline_ranges[open_spline_index].
is_cyclic) {
1063 *(face++) = vertex_offset + 0;
1066 *(face++) = vertex_offset + 1;
1071 *(face++) = vertex_offset + 0;
1072 *(face++) = vertex_offset + 2;
1078 unsigned int midvidx = vertex_offset;
1082 j = midvidx + (vertex_total * 3);
1084 for (k = 0; k < vertex_total_cap_head - 2; k++, j++) {
1085 *(face++) = midvidx + 0;
1086 *(face++) = midvidx + 0;
1093 j = vertex_offset + (vertex_total * 3);
1096 *(face++) = midvidx + 0;
1097 *(face++) = midvidx + 0;
1098 *(face++) = midvidx + 1;
1103 *(face++) = midvidx + 0;
1104 *(face++) = midvidx + 0;
1105 *(face++) = j + vertex_total_cap_head - 2;
1106 *(face++) = midvidx + 2;
1114 j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1116 midvidx = vertex_offset + (vertex_total * 3) - 3;
1118 for (k = 0; k < vertex_total_cap_tail - 2; k++, j++) {
1119 *(face++) = midvidx;
1120 *(face++) = midvidx;
1127 j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1130 *(face++) = midvidx + 0;
1131 *(face++) = midvidx + 0;
1133 *(face++) = midvidx + 1;
1137 *(face++) = midvidx + 0;
1138 *(face++) = midvidx + 0;
1139 *(face++) = midvidx + 2;
1140 *(face++) = j + vertex_total_cap_tail - 2;
1150 "%u %u (%u %u), %u\n",
1152 sf_tri_tot + tot_feather_quads,
1155 tot_boundary_used - tot_boundary_found);
1158 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1159 BLI_assert(face_index + (tot_boundary_used - tot_boundary_found) ==
1160 sf_tri_tot + tot_feather_quads);
1162 BLI_assert(face_index == sf_tri_tot + tot_feather_quads);
1168 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1169 layer->
face_tot = (sf_tri_tot + tot_feather_quads) -
1170 (tot_boundary_used - tot_boundary_found);
1172 layer->
face_tot = (sf_tri_tot + tot_feather_quads);
1212 static float maskrasterize_layer_z_depth_tri(
const float pt[2],
1219 return (
v1[2] *
w[0]) + (
v2[2] *
w[1]) + (v3[2] *
w[2]);
1224 const float pt[2],
const float v1[3],
const float v2[3],
const float v3[3],
const float v4[3])
1234 const float dist_orig,
1243 if ((
cos[0][2] < dist_orig) || (
cos[1][2] < dist_orig) || (
cos[2][2] < dist_orig)) {
1246 return maskrasterize_layer_z_depth_tri(
xy,
cos[face[0]],
cos[face[1]],
cos[face[2]]);
1260 if ((
cos[0][2] < dist_orig) || (
cos[1][2] < dist_orig) || (
cos[2][2] < dist_orig) ||
1261 (
cos[3][2] < dist_orig)) {
1276 return maskrasterize_layer_z_depth_tri(
xy,
cos[face[0]],
cos[face[1]],
cos[face[2]]);
1279 return maskrasterize_layer_z_depth_tri(
xy,
cos[face[0]],
cos[face[2]],
cos[face[3]]);
1306 unsigned int(*face_array)[4] = layer->
face_array;
1308 float best_dist = 1.0f;
1311 face_array[*face_index],
cos, best_dist,
xy);
1312 if (test_dist < best_dist) {
1313 best_dist = test_dist;
1315 if (best_dist == 0.0f) {
1333 const unsigned int layers_tot = mr_handle->
layers_tot;
1339 for (
uint i = 0; i < layers_tot; i++, layer++) {
1349 value_layer = (3.0f * value_layer * value_layer -
1350 2.0f * value_layer * value_layer * value_layer);
1353 value_layer =
sqrtf(2.0f * value_layer - value_layer * value_layer);
1356 value_layer =
sqrtf(value_layer);
1359 value_layer = value_layer * value_layer;
1362 value_layer = value_layer * (2.0f - value_layer);
1371 value_layer *= layer->
alpha;
1379 value_layer = 1.0f - value_layer;
1382 switch (layer->
blend) {
1384 value += value_layer * (1.0f - value);
1387 value -= value_layer * value;
1390 value += value_layer;
1393 value -= value_layer;
1396 value =
max_ff(value, value_layer);
1399 value =
min_ff(value, value_layer);
1402 value *= value_layer;
1405 value = (value * (1.0f - layer->
alpha)) + (value_layer * layer->
alpha);
1408 value =
fabsf(value - value_layer);
1413 value += value_layer;
1419 CLAMP(value, 0.0f, 1.0f);
1444 const float x_inv =
data->x_inv;
1445 const float x_px_ofs =
data->x_px_ofs;
1451 xy[0] = ((
float)
x * x_inv) + x_px_ofs;
1458 const unsigned int width,
1459 const unsigned int height,
1468 .mr_handle = mr_handle,
1471 .x_px_ofs = x_inv * 0.5f,
1472 .y_px_ofs = y_inv * 0.5f,
typedef float(TangentPoint)[2]
float(* BKE_mask_spline_differentiate_with_resolution(struct MaskSpline *spline, unsigned int resol, unsigned int *r_tot_diff_point))[2]
unsigned int BKE_mask_spline_feather_resolution(struct MaskSpline *spline, int width, int height)
void BKE_mask_spline_feather_collapse_inner_loops(struct MaskSpline *spline, float(*feather_points)[2], unsigned int tot_feather_point)
float(* BKE_mask_spline_feather_differentiated_points_with_resolution(struct MaskSpline *spline, unsigned int resol, bool do_feather_isect, unsigned int *r_tot_feather_point))[2]
unsigned int BKE_mask_spline_resolution(struct MaskSpline *spline, int width, int height)
void BLI_linklist_prepend_arena(LinkNode **listp, void *ptr, struct MemArena *ma) ATTR_NONNULL(1
void void void BLI_movelisttolist(struct ListBase *dst, struct ListBase *src) ATTR_NONNULL(1
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
MINLINE float min_ff(float a, float b)
void barycentric_weights_v2(const float v1[2], const float v2[2], const float v3[2], const float co[2], float w[3])
bool isect_point_tri_v2_cw(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
void barycentric_weights_v2_quad(const float v1[2], const float v2[2], const float v3[2], const float v4[2], const float co[2], float w[4])
int isect_point_tri_v2(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
float dist_squared_to_line_segment_v2(const float p[2], const float l1[2], const float l2[2])
MINLINE float len_squared_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void add_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE float normalize_v2(float r[2])
void BLI_memarena_free(struct MemArena *ma) ATTR_NONNULL(1)
struct MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
void BLI_rctf_union(struct rctf *rct_a, const struct rctf *rct_b)
bool BLI_rctf_isect_pt_v(const struct rctf *rect, const float xy[2])
bool BLI_rctf_isect(const struct rctf *src1, const struct rctf *src2, struct rctf *dest)
void BLI_rctf_init(struct rctf *rect, float xmin, float xmax, float ymin, float ymax)
BLI_INLINE float BLI_rctf_size_x(const struct rctf *rct)
void BLI_rctf_do_minmax_v(struct rctf *rect, const float xy[2])
BLI_INLINE float BLI_rctf_size_y(const struct rctf *rct)
void BLI_rctf_init_minmax(struct rctf *rect)
struct ScanFillVert * BLI_scanfill_vert_add(ScanFillContext *sf_ctx, const float vec[3])
struct ScanFillEdge * BLI_scanfill_edge_add(ScanFillContext *sf_ctx, struct ScanFillVert *v1, struct ScanFillVert *v2)
bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx, ListBase *fillvertbase, ListBase *filledgebase)
void BLI_scanfill_begin_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, int flag, const float nor_proj[3])
#define BLI_SCANFILL_ARENA_SIZE
void BLI_scanfill_end_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
@ BLI_SCANFILL_CALC_POLYS
@ BLI_SCANFILL_CALC_HOLES
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
#define POINTER_AS_UINT(i)
#define POINTER_FROM_UINT(i)
#define CLOG_ERROR(clg_ref,...)
@ MASK_LAYERFLAG_FILL_OVERLAP
@ MASK_LAYERFLAG_FILL_DISCRETE
@ MASK_SPLINE_NOINTERSECT
@ MASK_BLEND_MERGE_SUBTRACT
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
Read Guarded memory(de)allocation.
#define MEM_SIZE_OPTIMAL(size)
#define MEM_reallocN(vmemh, len)
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Bright Control the brightness and contrast of the input color Vector Map an input vectors to used to fine tune the interpolation of the input Camera Retrieve information about the camera and how it relates to the current shading point s position CLAMP
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
SIMD_FORCE_INLINE btScalar angle(const btVector3 &v) const
Return the angle between this and another vector.
SyclQueue void void size_t num_bytes void
static bool is_cyclic(const Nurb *nu)
ccl_global float * buffer
void(* MEM_freeN)(void *vmemh)
void *(* MEM_callocN)(size_t len, const char *str)
void *(* MEM_mallocN)(size_t len, const char *str)
#define SF_EDGE_IS_BOUNDARY
struct MaskRasterSplineInfo MaskRasterSplineInfo
void BKE_maskrasterize_handle_free(MaskRasterHandle *mr_handle)
static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
void BKE_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mask, const int width, const int height, const bool do_aspect_correct, const bool do_mask_aa, const bool do_feather)
struct MaskRasterizeBufferData MaskRasterizeBufferData
float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
void BKE_maskrasterize_buffer(MaskRasterHandle *mr_handle, const unsigned int width, const unsigned int height, float *buffer)
Rasterize a buffer from a single mask (threaded execution).
#define FACE_ASSERT(face, vert_max)
#define SF_KEYINDEX_TEMP_ID
static void rotate_point_v2(float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2])
static float maskrasterize_layer_z_depth_quad(const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
static void maskrasterize_spline_differentiate_point_outset(float(*diff_feather_points)[2], float(*diff_points)[2], const unsigned int tot_diff_point, const float ofs, const bool do_test)
static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
static float maskrasterize_layer_isect(const unsigned int *face, float(*cos)[3], const float dist_orig, const float xy[2])
BLI_INLINE unsigned int layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2])
static ScanFillVert * scanfill_vert_add_v2_with_depth(ScanFillContext *sf_ctx, const float co_xy[2], const float co_z)
#define TRI_TERMINATOR_ID
#define BUCKET_PIXELS_PER_CELL
static void layer_bucket_init_dummy(MaskRasterLayer *layer)
BLI_INLINE unsigned int clampis_uint(const unsigned int v, const unsigned int min, const unsigned int max)
static bool layer_bucket_isect_test(const MaskRasterLayer *layer, unsigned int face_index, const unsigned int bucket_x, const unsigned int bucket_y, const float bucket_size_x, const float bucket_size_y, const float bucket_max_rad_squared)
MaskRasterHandle * BKE_maskrasterize_handle_new(void)
struct MaskRasterLayer MaskRasterLayer
static void maskrasterize_buffer_cb(void *__restrict userdata, const int y, const TaskParallelTLS *__restrict UNUSED(tls))
ccl_device_inline float4 mask(const int4 &mask, const float4 &a)
INLINE Rall1d< T, V, S > cos(const Rall1d< T, V, S > &arg)
unsigned int ** buckets_face
unsigned int(* face_array)[4]
float buckets_xy_scalar[2]
unsigned int vertex_total
unsigned int vertex_offset
unsigned int vertex_total_cap_tail
unsigned int vertex_total_cap_head
MaskRasterHandle * mr_handle
struct ScanFillEdge * next
union ScanFillEdge::@122 tmp
struct ScanFillFace * next
struct ScanFillVert * next
union ScanFillVert::@121 tmp