Blender  V3.3
mask_rasterize.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2012 Blender Foundation. All rights reserved. */
3 
52 #include "CLG_log.h"
53 
54 #include "MEM_guardedalloc.h"
55 
56 #include "DNA_mask_types.h"
57 #include "DNA_scene_types.h"
58 #include "DNA_vec_types.h"
59 
60 #include "BLI_memarena.h"
61 #include "BLI_scanfill.h"
62 #include "BLI_utildefines.h"
63 
64 #include "BLI_linklist.h"
65 #include "BLI_listbase.h"
66 #include "BLI_math.h"
67 #include "BLI_rect.h"
68 #include "BLI_task.h"
69 
70 #include "BKE_mask.h"
71 
72 #include "BLI_strict_flags.h"
73 
74 /* this is rather and annoying hack, use define to isolate it.
75  * problem is caused by scanfill removing edges on us. */
76 #define USE_SCANFILL_EDGE_WORKAROUND
77 
78 #define SPLINE_RESOL_CAP_PER_PIXEL 2
79 #define SPLINE_RESOL_CAP_MIN 8
80 #define SPLINE_RESOL_CAP_MAX 64
81 
82 /* found this gives best performance for high detail masks, values between 2 and 8 work best */
83 #define BUCKET_PIXELS_PER_CELL 4
84 
85 #define SF_EDGE_IS_BOUNDARY 0xff
86 #define SF_KEYINDEX_TEMP_ID ((unsigned int)-1)
87 
88 #define TRI_TERMINATOR_ID ((unsigned int)-1)
89 #define TRI_VERT ((unsigned int)-1)
90 
91 /* for debugging add... */
92 #ifndef NDEBUG
93 // printf("%u %u %u %u\n", _t[0], _t[1], _t[2], _t[3]);
94 # define FACE_ASSERT(face, vert_max) \
95  { \
96  unsigned int *_t = face; \
97  BLI_assert(_t[0] < vert_max); \
98  BLI_assert(_t[1] < vert_max); \
99  BLI_assert(_t[2] < vert_max); \
100  BLI_assert(_t[3] < vert_max || _t[3] == TRI_VERT); \
101  } \
102  (void)0
103 #else
104 /* do nothing */
105 # define FACE_ASSERT(face, vert_max)
106 #endif
107 
108 static CLG_LogRef LOG = {"bke.mask_rasterize"};
109 
110 static void rotate_point_v2(
111  float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2])
112 {
113  const float s = sinf(angle);
114  const float c = cosf(angle);
115  float p_new[2];
116 
117  /* translate point back to origin */
118  r_p[0] = (p[0] - cent[0]) / asp[0];
119  r_p[1] = (p[1] - cent[1]) / asp[1];
120 
121  /* rotate point */
122  p_new[0] = ((r_p[0] * c) - (r_p[1] * s)) * asp[0];
123  p_new[1] = ((r_p[0] * s) + (r_p[1] * c)) * asp[1];
124 
125  /* translate point back */
126  r_p[0] = p_new[0] + cent[0];
127  r_p[1] = p_new[1] + cent[1];
128 }
129 
130 BLI_INLINE unsigned int clampis_uint(const unsigned int v,
131  const unsigned int min,
132  const unsigned int max)
133 {
134  return v < min ? min : (v > max ? max : v);
135 }
136 
138  const float co_xy[2],
139  const float co_z)
140 {
141  const float co[3] = {co_xy[0], co_xy[1], co_z};
142  return BLI_scanfill_vert_add(sf_ctx, co);
143 }
144 
145 /* --------------------------------------------------------------------- */
146 /* local structs for mask rasterizing */
147 /* --------------------------------------------------------------------- */
148 
156 /* internal use only */
157 typedef struct MaskRasterLayer {
158  /* geometry */
159  unsigned int face_tot;
160  unsigned int (*face_array)[4]; /* access coords tri/quad */
161  float (*face_coords)[3]; /* xy, z 0-1 (1.0 == filled) */
162 
163  /* 2d bounds (to quickly skip bucket lookup) */
165 
166  /* buckets */
167  unsigned int **buckets_face;
168  /* cache divide and subtract */
169  float buckets_xy_scalar[2]; /* (1.0 / (buckets_width + FLT_EPSILON)) * buckets_x */
170  unsigned int buckets_x;
171  unsigned int buckets_y;
172 
173  /* copied direct from #MaskLayer.--- */
174  /* blending options */
175  float alpha;
176  char blend;
178  char falloff;
179 
181 
182 typedef struct MaskRasterSplineInfo {
183  /* body of the spline */
184  unsigned int vertex_offset;
185  unsigned int vertex_total;
186 
187  /* capping for non-filled, non cyclic splines */
188  unsigned int vertex_total_cap_head;
189  unsigned int vertex_total_cap_tail;
190 
191  bool is_cyclic;
193 
199  unsigned int layers_tot;
200 
201  /* 2d bounds (to quickly skip bucket lookup) */
203 };
204 
205 /* --------------------------------------------------------------------- */
206 /* alloc / free functions */
207 /* --------------------------------------------------------------------- */
208 
210 {
211  MaskRasterHandle *mr_handle;
212 
213  mr_handle = MEM_callocN(sizeof(MaskRasterHandle), "MaskRasterHandle");
214 
215  return mr_handle;
216 }
217 
219 {
220  const unsigned int layers_tot = mr_handle->layers_tot;
221  MaskRasterLayer *layer = mr_handle->layers;
222 
223  for (uint i = 0; i < layers_tot; i++, layer++) {
224 
225  if (layer->face_array) {
226  MEM_freeN(layer->face_array);
227  }
228 
229  if (layer->face_coords) {
230  MEM_freeN(layer->face_coords);
231  }
232 
233  if (layer->buckets_face) {
234  const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y;
235  unsigned int bucket_index;
236  for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
237  unsigned int *face_index = layer->buckets_face[bucket_index];
238  if (face_index) {
239  MEM_freeN(face_index);
240  }
241  }
242 
243  MEM_freeN(layer->buckets_face);
244  }
245  }
246 
247  MEM_freeN(mr_handle->layers);
248  MEM_freeN(mr_handle);
249 }
250 
251 static void maskrasterize_spline_differentiate_point_outset(float (*diff_feather_points)[2],
252  float (*diff_points)[2],
253  const unsigned int tot_diff_point,
254  const float ofs,
255  const bool do_test)
256 {
257  unsigned int k_prev = tot_diff_point - 2;
258  unsigned int k_curr = tot_diff_point - 1;
259  unsigned int k_next = 0;
260 
261  unsigned int k;
262 
263  float d_prev[2];
264  float d_next[2];
265  float d[2];
266 
267  const float *co_prev;
268  const float *co_curr;
269  const float *co_next;
270 
271  const float ofs_squared = ofs * ofs;
272 
273  co_prev = diff_points[k_prev];
274  co_curr = diff_points[k_curr];
275  co_next = diff_points[k_next];
276 
277  /* precalc */
278  sub_v2_v2v2(d_prev, co_prev, co_curr);
279  normalize_v2(d_prev);
280 
281  for (k = 0; k < tot_diff_point; k++) {
282 
283  /* co_prev = diff_points[k_prev]; */ /* precalc */
284  co_curr = diff_points[k_curr];
285  co_next = diff_points[k_next];
286 
287  // sub_v2_v2v2(d_prev, co_prev, co_curr); /* precalc */
288  sub_v2_v2v2(d_next, co_curr, co_next);
289 
290  // normalize_v2(d_prev); /* precalc */
291  normalize_v2(d_next);
292 
293  if ((do_test == false) ||
294  (len_squared_v2v2(diff_feather_points[k], diff_points[k]) < ofs_squared)) {
295 
296  add_v2_v2v2(d, d_prev, d_next);
297 
298  normalize_v2(d);
299 
300  diff_feather_points[k][0] = diff_points[k][0] + (d[1] * ofs);
301  diff_feather_points[k][1] = diff_points[k][1] + (-d[0] * ofs);
302  }
303 
304  /* use next iter */
305  copy_v2_v2(d_prev, d_next);
306 
307  /* k_prev = k_curr; */ /* precalc */
308  k_curr = k_next;
309  k_next++;
310  }
311 }
312 
313 /* this function is not exact, sometimes it returns false positives,
314  * the main point of it is to clear out _almost_ all bucket/face non-intersections,
315  * returning true in corner cases is ok but missing an intersection is NOT.
316  *
317  * method used
318  * - check if the center of the buckets bounding box is intersecting the face
319  * - if not get the max radius to a corner of the bucket and see how close we
320  * are to any of the triangle edges.
321  */
322 static bool layer_bucket_isect_test(const MaskRasterLayer *layer,
323  unsigned int face_index,
324  const unsigned int bucket_x,
325  const unsigned int bucket_y,
326  const float bucket_size_x,
327  const float bucket_size_y,
328  const float bucket_max_rad_squared)
329 {
330  unsigned int *face = layer->face_array[face_index];
331  float(*cos)[3] = layer->face_coords;
332 
333  const float xmin = layer->bounds.xmin + (bucket_size_x * (float)bucket_x);
334  const float ymin = layer->bounds.ymin + (bucket_size_y * (float)bucket_y);
335  const float xmax = xmin + bucket_size_x;
336  const float ymax = ymin + bucket_size_y;
337 
338  const float cent[2] = {(xmin + xmax) * 0.5f, (ymin + ymax) * 0.5f};
339 
340  if (face[3] == TRI_VERT) {
341  const float *v1 = cos[face[0]];
342  const float *v2 = cos[face[1]];
343  const float *v3 = cos[face[2]];
344 
345  if (isect_point_tri_v2(cent, v1, v2, v3)) {
346  return true;
347  }
348 
349  if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
350  (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
351  (dist_squared_to_line_segment_v2(cent, v3, v1) < bucket_max_rad_squared)) {
352  return true;
353  }
354 
355  // printf("skip tri\n");
356  return false;
357  }
358 
359  const float *v1 = cos[face[0]];
360  const float *v2 = cos[face[1]];
361  const float *v3 = cos[face[2]];
362  const float *v4 = cos[face[3]];
363 
364  if (isect_point_tri_v2(cent, v1, v2, v3)) {
365  return true;
366  }
367  if (isect_point_tri_v2(cent, v1, v3, v4)) {
368  return true;
369  }
370 
371  if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
372  (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
373  (dist_squared_to_line_segment_v2(cent, v3, v4) < bucket_max_rad_squared) ||
374  (dist_squared_to_line_segment_v2(cent, v4, v1) < bucket_max_rad_squared)) {
375  return true;
376  }
377 
378  // printf("skip quad\n");
379  return false;
380 }
381 
383 {
384  layer->face_tot = 0;
385  layer->face_coords = NULL;
386  layer->face_array = NULL;
387 
388  layer->buckets_x = 0;
389  layer->buckets_y = 0;
390 
391  layer->buckets_xy_scalar[0] = 0.0f;
392  layer->buckets_xy_scalar[1] = 0.0f;
393 
394  layer->buckets_face = NULL;
395 
396  BLI_rctf_init(&layer->bounds, -1.0f, -1.0f, -1.0f, -1.0f);
397 }
398 
399 static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
400 {
401  MemArena *arena = BLI_memarena_new(MEM_SIZE_OPTIMAL(1 << 16), __func__);
402 
403  const float bucket_dim_x = BLI_rctf_size_x(&layer->bounds);
404  const float bucket_dim_y = BLI_rctf_size_y(&layer->bounds);
405 
406  layer->buckets_x = (unsigned int)((bucket_dim_x / pixel_size) / (float)BUCKET_PIXELS_PER_CELL);
407  layer->buckets_y = (unsigned int)((bucket_dim_y / pixel_size) / (float)BUCKET_PIXELS_PER_CELL);
408 
409  // printf("bucket size %ux%u\n", layer->buckets_x, layer->buckets_y);
410 
411  CLAMP(layer->buckets_x, 8, 512);
412  CLAMP(layer->buckets_y, 8, 512);
413 
414  layer->buckets_xy_scalar[0] = (1.0f / (bucket_dim_x + FLT_EPSILON)) * (float)layer->buckets_x;
415  layer->buckets_xy_scalar[1] = (1.0f / (bucket_dim_y + FLT_EPSILON)) * (float)layer->buckets_y;
416 
417  {
418  /* width and height of each bucket */
419  const float bucket_size_x = (bucket_dim_x + FLT_EPSILON) / (float)layer->buckets_x;
420  const float bucket_size_y = (bucket_dim_y + FLT_EPSILON) / (float)layer->buckets_y;
421  const float bucket_max_rad = (max_ff(bucket_size_x, bucket_size_y) * (float)M_SQRT2) +
422  FLT_EPSILON;
423  const float bucket_max_rad_squared = bucket_max_rad * bucket_max_rad;
424 
425  unsigned int *face = &layer->face_array[0][0];
426  float(*cos)[3] = layer->face_coords;
427 
428  const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y;
429  LinkNode **bucketstore = MEM_callocN(bucket_tot * sizeof(LinkNode *), __func__);
430  unsigned int *bucketstore_tot = MEM_callocN(bucket_tot * sizeof(unsigned int), __func__);
431 
432  unsigned int face_index;
433 
434  for (face_index = 0; face_index < layer->face_tot; face_index++, face += 4) {
435  float xmin;
436  float xmax;
437  float ymin;
438  float ymax;
439 
440  if (face[3] == TRI_VERT) {
441  const float *v1 = cos[face[0]];
442  const float *v2 = cos[face[1]];
443  const float *v3 = cos[face[2]];
444 
445  xmin = min_ff(v1[0], min_ff(v2[0], v3[0]));
446  xmax = max_ff(v1[0], max_ff(v2[0], v3[0]));
447  ymin = min_ff(v1[1], min_ff(v2[1], v3[1]));
448  ymax = max_ff(v1[1], max_ff(v2[1], v3[1]));
449  }
450  else {
451  const float *v1 = cos[face[0]];
452  const float *v2 = cos[face[1]];
453  const float *v3 = cos[face[2]];
454  const float *v4 = cos[face[3]];
455 
456  xmin = min_ff(v1[0], min_ff(v2[0], min_ff(v3[0], v4[0])));
457  xmax = max_ff(v1[0], max_ff(v2[0], max_ff(v3[0], v4[0])));
458  ymin = min_ff(v1[1], min_ff(v2[1], min_ff(v3[1], v4[1])));
459  ymax = max_ff(v1[1], max_ff(v2[1], max_ff(v3[1], v4[1])));
460  }
461 
462  /* not essential but may as will skip any faces outside the view */
463  if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) {
464 
465  CLAMP(xmin, 0.0f, 1.0f);
466  CLAMP(ymin, 0.0f, 1.0f);
467  CLAMP(xmax, 0.0f, 1.0f);
468  CLAMP(ymax, 0.0f, 1.0f);
469 
470  {
471  unsigned int xi_min = (unsigned int)((xmin - layer->bounds.xmin) *
472  layer->buckets_xy_scalar[0]);
473  unsigned int xi_max = (unsigned int)((xmax - layer->bounds.xmin) *
474  layer->buckets_xy_scalar[0]);
475  unsigned int yi_min = (unsigned int)((ymin - layer->bounds.ymin) *
476  layer->buckets_xy_scalar[1]);
477  unsigned int yi_max = (unsigned int)((ymax - layer->bounds.ymin) *
478  layer->buckets_xy_scalar[1]);
479  void *face_index_void = POINTER_FROM_UINT(face_index);
480 
481  unsigned int xi, yi;
482 
483  /* this should _almost_ never happen but since it can in extreme cases,
484  * we have to clamp the values or we overrun the buffer and crash */
485  if (xi_min >= layer->buckets_x) {
486  xi_min = layer->buckets_x - 1;
487  }
488  if (xi_max >= layer->buckets_x) {
489  xi_max = layer->buckets_x - 1;
490  }
491  if (yi_min >= layer->buckets_y) {
492  yi_min = layer->buckets_y - 1;
493  }
494  if (yi_max >= layer->buckets_y) {
495  yi_max = layer->buckets_y - 1;
496  }
497 
498  for (yi = yi_min; yi <= yi_max; yi++) {
499  unsigned int bucket_index = (layer->buckets_x * yi) + xi_min;
500  for (xi = xi_min; xi <= xi_max; xi++, bucket_index++) {
501  /* correct but do in outer loop */
502  // unsigned int bucket_index = (layer->buckets_x * yi) + xi;
503 
504  BLI_assert(xi < layer->buckets_x);
505  BLI_assert(yi < layer->buckets_y);
506  BLI_assert(bucket_index < bucket_tot);
507 
508  /* Check if the bucket intersects with the face. */
509  /* NOTE: there is a trade off here since checking box/tri intersections isn't as
510  * optimal as it could be, but checking pixels against faces they will never
511  * intersect with is likely the greater slowdown here -
512  * so check if the cell intersects the face. */
513  if (layer_bucket_isect_test(layer,
514  face_index,
515  xi,
516  yi,
517  bucket_size_x,
518  bucket_size_y,
519  bucket_max_rad_squared)) {
520  BLI_linklist_prepend_arena(&bucketstore[bucket_index], face_index_void, arena);
521  bucketstore_tot[bucket_index]++;
522  }
523  }
524  }
525  }
526  }
527  }
528 
529  if (1) {
530  /* now convert linknodes into arrays for faster per pixel access */
531  unsigned int **buckets_face = MEM_mallocN(bucket_tot * sizeof(*buckets_face), __func__);
532  unsigned int bucket_index;
533 
534  for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
535  if (bucketstore_tot[bucket_index]) {
536  unsigned int *bucket = MEM_mallocN(
537  (bucketstore_tot[bucket_index] + 1) * sizeof(unsigned int), __func__);
538  LinkNode *bucket_node;
539 
540  buckets_face[bucket_index] = bucket;
541 
542  for (bucket_node = bucketstore[bucket_index]; bucket_node;
543  bucket_node = bucket_node->next) {
544  *bucket = POINTER_AS_UINT(bucket_node->link);
545  bucket++;
546  }
547  *bucket = TRI_TERMINATOR_ID;
548  }
549  else {
550  buckets_face[bucket_index] = NULL;
551  }
552  }
553 
554  layer->buckets_face = buckets_face;
555  }
556 
557  MEM_freeN(bucketstore);
558  MEM_freeN(bucketstore_tot);
559  }
560 
561  BLI_memarena_free(arena);
562 }
563 
565  struct Mask *mask,
566  const int width,
567  const int height,
568  const bool do_aspect_correct,
569  const bool do_mask_aa,
570  const bool do_feather)
571 {
572  const rctf default_bounds = {0.0f, 1.0f, 0.0f, 1.0f};
573  const float pixel_size = 1.0f / (float)min_ii(width, height);
574  const float asp_xy[2] = {
575  (do_aspect_correct && width > height) ? (float)height / (float)width : 1.0f,
576  (do_aspect_correct && width < height) ? (float)width / (float)height : 1.0f};
577 
578  const float zvec[3] = {0.0f, 0.0f, -1.0f};
579  MaskLayer *masklay;
580  unsigned int masklay_index;
581  MemArena *sf_arena;
582 
583  mr_handle->layers_tot = (unsigned int)BLI_listbase_count(&mask->masklayers);
584  mr_handle->layers = MEM_mallocN(sizeof(MaskRasterLayer) * mr_handle->layers_tot,
585  "MaskRasterLayer");
586  BLI_rctf_init_minmax(&mr_handle->bounds);
587 
588  sf_arena = BLI_memarena_new(BLI_SCANFILL_ARENA_SIZE, __func__);
589 
590  for (masklay = mask->masklayers.first, masklay_index = 0; masklay;
591  masklay = masklay->next, masklay_index++) {
592 
593  /* we need to store vertex ranges for open splines for filling */
594  unsigned int tot_splines;
595  MaskRasterSplineInfo *open_spline_ranges;
596  unsigned int open_spline_index = 0;
597 
598  MaskSpline *spline;
599 
600  /* scanfill */
601  ScanFillContext sf_ctx;
602  ScanFillVert *sf_vert = NULL;
603  ScanFillVert *sf_vert_next = NULL;
604  ScanFillFace *sf_tri;
605 
606  unsigned int sf_vert_tot = 0;
607  unsigned int tot_feather_quads = 0;
608 
609 #ifdef USE_SCANFILL_EDGE_WORKAROUND
610  unsigned int tot_boundary_used = 0;
611  unsigned int tot_boundary_found = 0;
612 #endif
613 
614  if (masklay->visibility_flag & MASK_HIDE_RENDER) {
615  /* skip the layer */
616  mr_handle->layers_tot--;
617  masklay_index--;
618  continue;
619  }
620 
621  tot_splines = (unsigned int)BLI_listbase_count(&masklay->splines);
622  open_spline_ranges = MEM_callocN(sizeof(*open_spline_ranges) * tot_splines, __func__);
623 
624  BLI_scanfill_begin_arena(&sf_ctx, sf_arena);
625 
626  for (spline = masklay->splines.first; spline; spline = spline->next) {
627  const bool is_cyclic = (spline->flag & MASK_SPLINE_CYCLIC) != 0;
628  const bool is_fill = (spline->flag & MASK_SPLINE_NOFILL) == 0;
629 
630  float(*diff_points)[2];
631  unsigned int tot_diff_point;
632 
633  float(*diff_feather_points)[2];
634  float(*diff_feather_points_flip)[2];
635  unsigned int tot_diff_feather_points;
636 
637  const unsigned int resol_a = BKE_mask_spline_resolution(spline, width, height) / 4;
638  const unsigned int resol_b = BKE_mask_spline_feather_resolution(spline, width, height) / 4;
639  const unsigned int resol = CLAMPIS(MAX2(resol_a, resol_b), 4, 512);
640 
641  diff_points = BKE_mask_spline_differentiate_with_resolution(spline, resol, &tot_diff_point);
642 
643  if (do_feather) {
645  spline, resol, false, &tot_diff_feather_points);
646  BLI_assert(diff_feather_points);
647  }
648  else {
649  tot_diff_feather_points = 0;
650  diff_feather_points = NULL;
651  }
652 
653  if (tot_diff_point > 3) {
654  ScanFillVert *sf_vert_prev;
655  unsigned int j;
656 
657  sf_ctx.poly_nr++;
658 
659  if (do_aspect_correct) {
660  if (width != height) {
661  float *fp;
662  float *ffp;
663  float asp;
664 
665  if (width < height) {
666  fp = &diff_points[0][0];
667  ffp = tot_diff_feather_points ? &diff_feather_points[0][0] : NULL;
668  asp = (float)width / (float)height;
669  }
670  else {
671  fp = &diff_points[0][1];
672  ffp = tot_diff_feather_points ? &diff_feather_points[0][1] : NULL;
673  asp = (float)height / (float)width;
674  }
675 
676  for (uint i = 0; i < tot_diff_point; i++, fp += 2) {
677  (*fp) = (((*fp) - 0.5f) / asp) + 0.5f;
678  }
679 
680  if (tot_diff_feather_points) {
681  for (uint i = 0; i < tot_diff_feather_points; i++, ffp += 2) {
682  (*ffp) = (((*ffp) - 0.5f) / asp) + 0.5f;
683  }
684  }
685  }
686  }
687 
688  /* fake aa, using small feather */
689  if (do_mask_aa == true) {
690  if (do_feather == false) {
691  tot_diff_feather_points = tot_diff_point;
692  diff_feather_points = MEM_mallocN(
693  sizeof(*diff_feather_points) * (size_t)tot_diff_feather_points, __func__);
694  /* add single pixel feather */
696  diff_feather_points, diff_points, tot_diff_point, pixel_size, false);
697  }
698  else {
699  /* ensure single pixel feather, on any zero feather areas */
701  diff_feather_points, diff_points, tot_diff_point, pixel_size, true);
702  }
703  }
704 
705  if (is_fill) {
706  /* Apply intersections depending on fill settings. */
707  if (spline->flag & MASK_SPLINE_NOINTERSECT) {
709  spline, diff_feather_points, tot_diff_feather_points);
710  }
711 
712  sf_vert_prev = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[0], 0.0f);
713  sf_vert_prev->tmp.u = sf_vert_tot;
714 
715  /* Absolute index of feather vert. */
716  sf_vert_prev->keyindex = sf_vert_tot + tot_diff_point;
717 
718  sf_vert_tot++;
719 
720  for (j = 1; j < tot_diff_point; j++) {
721  sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[j], 0.0f);
722  sf_vert->tmp.u = sf_vert_tot;
723  sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */
724  sf_vert_tot++;
725  }
726 
727  sf_vert = sf_vert_prev;
728  sf_vert_prev = sf_ctx.fillvertbase.last;
729 
730  for (j = 0; j < tot_diff_point; j++) {
731  ScanFillEdge *sf_edge = BLI_scanfill_edge_add(&sf_ctx, sf_vert_prev, sf_vert);
732 
733 #ifdef USE_SCANFILL_EDGE_WORKAROUND
734  if (diff_feather_points) {
735  sf_edge->tmp.c = SF_EDGE_IS_BOUNDARY;
736  tot_boundary_used++;
737  }
738 #else
739  (void)sf_edge;
740 #endif
741  sf_vert_prev = sf_vert;
742  sf_vert = sf_vert->next;
743  }
744 
745  if (diff_feather_points) {
746  BLI_assert(tot_diff_feather_points == tot_diff_point);
747 
748  /* NOTE: only added for convenience, we don't in fact use these to scan-fill,
749  * only to create feather faces after scan-fill. */
750  for (j = 0; j < tot_diff_feather_points; j++) {
751  sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_feather_points[j], 1.0f);
752  sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
753  sf_vert_tot++;
754  }
755 
756  tot_feather_quads += tot_diff_point;
757  }
758  }
759  else {
760  /* unfilled spline */
761  if (diff_feather_points) {
762 
763  if (spline->flag & MASK_SPLINE_NOINTERSECT) {
764  diff_feather_points_flip = MEM_mallocN(sizeof(float[2]) * tot_diff_feather_points,
765  "diff_feather_points_flip");
766 
767  float co_diff[2];
768  for (j = 0; j < tot_diff_point; j++) {
769  sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
770  add_v2_v2v2(diff_feather_points_flip[j], diff_points[j], co_diff);
771  }
772 
774  spline, diff_feather_points, tot_diff_feather_points);
776  spline, diff_feather_points_flip, tot_diff_feather_points);
777  }
778  else {
779  diff_feather_points_flip = NULL;
780  }
781 
782  open_spline_ranges[open_spline_index].vertex_offset = sf_vert_tot;
783  open_spline_ranges[open_spline_index].vertex_total = tot_diff_point;
784 
785  /* TODO: an alternate functions so we can avoid double vector copy! */
786  for (j = 0; j < tot_diff_point; j++) {
787 
788  /* center vert */
789  sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[j], 0.0f);
790  sf_vert->tmp.u = sf_vert_tot;
791  sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
792  sf_vert_tot++;
793 
794  /* feather vert A */
795  sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_feather_points[j], 1.0f);
796  sf_vert->tmp.u = sf_vert_tot;
797  sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
798  sf_vert_tot++;
799 
800  /* feather vert B */
801  if (diff_feather_points_flip) {
803  &sf_ctx, diff_feather_points_flip[j], 1.0f);
804  }
805  else {
806  float co_diff[2];
807  sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
808  add_v2_v2v2(co_diff, diff_points[j], co_diff);
809  sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_diff, 1.0f);
810  }
811 
812  sf_vert->tmp.u = sf_vert_tot;
813  sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
814  sf_vert_tot++;
815 
816  tot_feather_quads += 2;
817  }
818 
819  if (!is_cyclic) {
820  tot_feather_quads -= 2;
821  }
822 
823  if (diff_feather_points_flip) {
824  MEM_freeN(diff_feather_points_flip);
825  diff_feather_points_flip = NULL;
826  }
827 
828  /* cap ends */
829 
830  /* dummy init value */
831  open_spline_ranges[open_spline_index].vertex_total_cap_head = 0;
832  open_spline_ranges[open_spline_index].vertex_total_cap_tail = 0;
833 
834  if (!is_cyclic) {
835  const float *fp_cent;
836  const float *fp_turn;
837 
838  unsigned int k;
839 
840  fp_cent = diff_points[0];
841  fp_turn = diff_feather_points[0];
842 
843 #define CALC_CAP_RESOL \
844  clampis_uint( \
845  (unsigned int)(len_v2v2(fp_cent, fp_turn) / (pixel_size * SPLINE_RESOL_CAP_PER_PIXEL)), \
846  SPLINE_RESOL_CAP_MIN, \
847  SPLINE_RESOL_CAP_MAX)
848 
849  {
850  const unsigned int vertex_total_cap = CALC_CAP_RESOL;
851 
852  for (k = 1; k < vertex_total_cap; k++) {
853  const float angle = (float)k * (1.0f / (float)vertex_total_cap) * (float)M_PI;
854  float co_feather[2];
855  rotate_point_v2(co_feather, fp_turn, fp_cent, angle, asp_xy);
856 
857  sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_feather, 1.0f);
858  sf_vert->tmp.u = sf_vert_tot;
859  sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
860  sf_vert_tot++;
861  }
862  tot_feather_quads += vertex_total_cap;
863 
864  open_spline_ranges[open_spline_index].vertex_total_cap_head = vertex_total_cap;
865  }
866 
867  fp_cent = diff_points[tot_diff_point - 1];
868  fp_turn = diff_feather_points[tot_diff_point - 1];
869 
870  {
871  const unsigned int vertex_total_cap = CALC_CAP_RESOL;
872 
873  for (k = 1; k < vertex_total_cap; k++) {
874  const float angle = (float)k * (1.0f / (float)vertex_total_cap) * (float)M_PI;
875  float co_feather[2];
876  rotate_point_v2(co_feather, fp_turn, fp_cent, -angle, asp_xy);
877 
878  sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_feather, 1.0f);
879  sf_vert->tmp.u = sf_vert_tot;
880  sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
881  sf_vert_tot++;
882  }
883  tot_feather_quads += vertex_total_cap;
884 
885  open_spline_ranges[open_spline_index].vertex_total_cap_tail = vertex_total_cap;
886  }
887  }
888 
889  open_spline_ranges[open_spline_index].is_cyclic = is_cyclic;
890  open_spline_index++;
891 
892 #undef CALC_CAP_RESOL
893  /* end capping */
894  }
895  }
896  }
897 
898  if (diff_points) {
899  MEM_freeN(diff_points);
900  }
901 
902  if (diff_feather_points) {
903  MEM_freeN(diff_feather_points);
904  }
905  }
906 
907  {
908  unsigned int(*face_array)[4], *face; /* access coords */
909  float(*face_coords)[3], *cos; /* xy, z 0-1 (1.0 == filled) */
910  unsigned int sf_tri_tot;
911  rctf bounds;
912  unsigned int face_index;
913  int scanfill_flag = 0;
914 
915  bool is_isect = false;
916  ListBase isect_remvertbase = {NULL, NULL};
917  ListBase isect_remedgebase = {NULL, NULL};
918 
919  /* now we have all the splines */
920  face_coords = MEM_mallocN(sizeof(float[3]) * sf_vert_tot, "maskrast_face_coords");
921 
922  /* init bounds */
924 
925  /* coords */
926  cos = (float *)face_coords;
927  for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert_next) {
928  sf_vert_next = sf_vert->next;
929  copy_v3_v3(cos, sf_vert->co);
930 
931  /* remove so as not to interfere with fill (called after) */
932  if (sf_vert->keyindex == SF_KEYINDEX_TEMP_ID) {
933  BLI_remlink(&sf_ctx.fillvertbase, sf_vert);
934  }
935 
936  /* bounds */
938 
939  cos += 3;
940  }
941 
942  /* --- inefficient self-intersect case --- */
943  /* if self intersections are found, its too tricky to attempt to map vertices
944  * so just realloc and add entirely new vertices - the result of the self-intersect check.
945  */
946  if ((masklay->flag & MASK_LAYERFLAG_FILL_OVERLAP) &&
947  (is_isect = BLI_scanfill_calc_self_isect(
948  &sf_ctx, &isect_remvertbase, &isect_remedgebase))) {
949  unsigned int sf_vert_tot_isect = (unsigned int)BLI_listbase_count(&sf_ctx.fillvertbase);
950  unsigned int i = sf_vert_tot;
951 
952  face_coords = MEM_reallocN(face_coords,
953  sizeof(float[3]) * (sf_vert_tot + sf_vert_tot_isect));
954 
955  cos = (float *)&face_coords[sf_vert_tot][0];
956 
957  for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert->next) {
958  copy_v3_v3(cos, sf_vert->co);
959  sf_vert->tmp.u = i++;
960  cos += 3;
961  }
962 
963  sf_vert_tot += sf_vert_tot_isect;
964 
965  /* we need to calc polys after self intersect */
966  scanfill_flag |= BLI_SCANFILL_CALC_POLYS;
967  }
968  /* --- end inefficient code --- */
969 
970  /* main scan-fill */
971  if ((masklay->flag & MASK_LAYERFLAG_FILL_DISCRETE) == 0) {
972  scanfill_flag |= BLI_SCANFILL_CALC_HOLES;
973  }
974 
975  sf_tri_tot = (unsigned int)BLI_scanfill_calc_ex(&sf_ctx, scanfill_flag, zvec);
976 
977  if (is_isect) {
978  /* add removed data back, we only need edges for feather,
979  * but add verts back so they get freed along with others */
980  BLI_movelisttolist(&sf_ctx.fillvertbase, &isect_remvertbase);
981  BLI_movelisttolist(&sf_ctx.filledgebase, &isect_remedgebase);
982  }
983 
984  face_array = MEM_mallocN(sizeof(*face_array) *
985  ((size_t)sf_tri_tot + (size_t)tot_feather_quads),
986  "maskrast_face_index");
987  face_index = 0;
988 
989  /* faces */
990  face = (unsigned int *)face_array;
991  for (sf_tri = sf_ctx.fillfacebase.first; sf_tri; sf_tri = sf_tri->next) {
992  *(face++) = sf_tri->v3->tmp.u;
993  *(face++) = sf_tri->v2->tmp.u;
994  *(face++) = sf_tri->v1->tmp.u;
995  *(face++) = TRI_VERT;
996  face_index++;
997  FACE_ASSERT(face - 4, sf_vert_tot);
998  }
999 
1000  /* start of feather faces... if we have this set,
1001  * 'face_index' is kept from loop above */
1002 
1003  BLI_assert(face_index == sf_tri_tot);
1004 
1005  if (tot_feather_quads) {
1006  ScanFillEdge *sf_edge;
1007 
1008  for (sf_edge = sf_ctx.filledgebase.first; sf_edge; sf_edge = sf_edge->next) {
1009  if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) {
1010  *(face++) = sf_edge->v1->tmp.u;
1011  *(face++) = sf_edge->v2->tmp.u;
1012  *(face++) = sf_edge->v2->keyindex;
1013  *(face++) = sf_edge->v1->keyindex;
1014  face_index++;
1015  FACE_ASSERT(face - 4, sf_vert_tot);
1016 
1017 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1018  tot_boundary_found++;
1019 #endif
1020  }
1021  }
1022  }
1023 
1024 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1025  if (tot_boundary_found != tot_boundary_used) {
1026  BLI_assert(tot_boundary_found < tot_boundary_used);
1027  }
1028 #endif
1029 
1030  /* feather only splines */
1031  while (open_spline_index > 0) {
1032  const unsigned int vertex_offset = open_spline_ranges[--open_spline_index].vertex_offset;
1033  unsigned int vertex_total = open_spline_ranges[open_spline_index].vertex_total;
1034  unsigned int vertex_total_cap_head =
1035  open_spline_ranges[open_spline_index].vertex_total_cap_head;
1036  unsigned int vertex_total_cap_tail =
1037  open_spline_ranges[open_spline_index].vertex_total_cap_tail;
1038  unsigned int k, j;
1039 
1040  j = vertex_offset;
1041 
1042  /* subtract one since we reference next vertex triple */
1043  for (k = 0; k < vertex_total - 1; k++, j += 3) {
1044 
1045  BLI_assert(j == vertex_offset + (k * 3));
1046 
1047  *(face++) = j + 3; /* next span */ /* z 1 */
1048  *(face++) = j + 0; /* z 1 */
1049  *(face++) = j + 1; /* z 0 */
1050  *(face++) = j + 4; /* next span */ /* z 0 */
1051  face_index++;
1052  FACE_ASSERT(face - 4, sf_vert_tot);
1053 
1054  *(face++) = j + 0; /* z 1 */
1055  *(face++) = j + 3; /* next span */ /* z 1 */
1056  *(face++) = j + 5; /* next span */ /* z 0 */
1057  *(face++) = j + 2; /* z 0 */
1058  face_index++;
1059  FACE_ASSERT(face - 4, sf_vert_tot);
1060  }
1061 
1062  if (open_spline_ranges[open_spline_index].is_cyclic) {
1063  *(face++) = vertex_offset + 0; /* next span */ /* z 1 */
1064  *(face++) = j + 0; /* z 1 */
1065  *(face++) = j + 1; /* z 0 */
1066  *(face++) = vertex_offset + 1; /* next span */ /* z 0 */
1067  face_index++;
1068  FACE_ASSERT(face - 4, sf_vert_tot);
1069 
1070  *(face++) = j + 0; /* z 1 */
1071  *(face++) = vertex_offset + 0; /* next span */ /* z 1 */
1072  *(face++) = vertex_offset + 2; /* next span */ /* z 0 */
1073  *(face++) = j + 2; /* z 0 */
1074  face_index++;
1075  FACE_ASSERT(face - 4, sf_vert_tot);
1076  }
1077  else {
1078  unsigned int midvidx = vertex_offset;
1079 
1080  /***************
1081  * cap end 'a' */
1082  j = midvidx + (vertex_total * 3);
1083 
1084  for (k = 0; k < vertex_total_cap_head - 2; k++, j++) {
1085  *(face++) = midvidx + 0; /* z 1 */
1086  *(face++) = midvidx + 0; /* z 1 */
1087  *(face++) = j + 0; /* z 0 */
1088  *(face++) = j + 1; /* z 0 */
1089  face_index++;
1090  FACE_ASSERT(face - 4, sf_vert_tot);
1091  }
1092 
1093  j = vertex_offset + (vertex_total * 3);
1094 
1095  /* 2 tris that join the original */
1096  *(face++) = midvidx + 0; /* z 1 */
1097  *(face++) = midvidx + 0; /* z 1 */
1098  *(face++) = midvidx + 1; /* z 0 */
1099  *(face++) = j + 0; /* z 0 */
1100  face_index++;
1101  FACE_ASSERT(face - 4, sf_vert_tot);
1102 
1103  *(face++) = midvidx + 0; /* z 1 */
1104  *(face++) = midvidx + 0; /* z 1 */
1105  *(face++) = j + vertex_total_cap_head - 2; /* z 0 */
1106  *(face++) = midvidx + 2; /* z 0 */
1107  face_index++;
1108  FACE_ASSERT(face - 4, sf_vert_tot);
1109 
1110  /***************
1111  * cap end 'b' */
1112  /* ... same as previous but v 2-3 flipped, and different initial offsets */
1113 
1114  j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1115 
1116  midvidx = vertex_offset + (vertex_total * 3) - 3;
1117 
1118  for (k = 0; k < vertex_total_cap_tail - 2; k++, j++) {
1119  *(face++) = midvidx; /* z 1 */
1120  *(face++) = midvidx; /* z 1 */
1121  *(face++) = j + 1; /* z 0 */
1122  *(face++) = j + 0; /* z 0 */
1123  face_index++;
1124  FACE_ASSERT(face - 4, sf_vert_tot);
1125  }
1126 
1127  j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1128 
1129  /* 2 tris that join the original */
1130  *(face++) = midvidx + 0; /* z 1 */
1131  *(face++) = midvidx + 0; /* z 1 */
1132  *(face++) = j + 0; /* z 0 */
1133  *(face++) = midvidx + 1; /* z 0 */
1134  face_index++;
1135  FACE_ASSERT(face - 4, sf_vert_tot);
1136 
1137  *(face++) = midvidx + 0; /* z 1 */
1138  *(face++) = midvidx + 0; /* z 1 */
1139  *(face++) = midvidx + 2; /* z 0 */
1140  *(face++) = j + vertex_total_cap_tail - 2; /* z 0 */
1141  face_index++;
1142  FACE_ASSERT(face - 4, sf_vert_tot);
1143  }
1144  }
1145 
1146  MEM_freeN(open_spline_ranges);
1147 
1148 #if 0
1149  fprintf(stderr,
1150  "%u %u (%u %u), %u\n",
1151  face_index,
1152  sf_tri_tot + tot_feather_quads,
1153  sf_tri_tot,
1154  tot_feather_quads,
1155  tot_boundary_used - tot_boundary_found);
1156 #endif
1157 
1158 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1159  BLI_assert(face_index + (tot_boundary_used - tot_boundary_found) ==
1160  sf_tri_tot + tot_feather_quads);
1161 #else
1162  BLI_assert(face_index == sf_tri_tot + tot_feather_quads);
1163 #endif
1164  {
1165  MaskRasterLayer *layer = &mr_handle->layers[masklay_index];
1166 
1167  if (BLI_rctf_isect(&default_bounds, &bounds, &bounds)) {
1168 #ifdef USE_SCANFILL_EDGE_WORKAROUND
1169  layer->face_tot = (sf_tri_tot + tot_feather_quads) -
1170  (tot_boundary_used - tot_boundary_found);
1171 #else
1172  layer->face_tot = (sf_tri_tot + tot_feather_quads);
1173 #endif
1174  layer->face_coords = face_coords;
1175  layer->face_array = face_array;
1176  layer->bounds = bounds;
1177 
1178  layer_bucket_init(layer, pixel_size);
1179 
1180  BLI_rctf_union(&mr_handle->bounds, &bounds);
1181  }
1182  else {
1183  MEM_freeN(face_coords);
1184  MEM_freeN(face_array);
1185 
1186  layer_bucket_init_dummy(layer);
1187  }
1188 
1189  /* copy as-is */
1190  layer->alpha = masklay->alpha;
1191  layer->blend = masklay->blend;
1192  layer->blend_flag = masklay->blend_flag;
1193  layer->falloff = masklay->falloff;
1194  }
1195 
1196  // printf("tris %d, feather tris %d\n", sf_tri_tot, tot_feather_quads);
1197  }
1198 
1199  /* add trianges */
1200  BLI_scanfill_end_arena(&sf_ctx, sf_arena);
1201  }
1202 
1203  BLI_memarena_free(sf_arena);
1204 }
1205 
1206 /* --------------------------------------------------------------------- */
1207 /* functions that run inside the sampling thread (keep fast!) */
1208 /* --------------------------------------------------------------------- */
1209 
1210 /* 2D ray test */
1211 #if 0
1212 static float maskrasterize_layer_z_depth_tri(const float pt[2],
1213  const float v1[3],
1214  const float v2[3],
1215  const float v3[3])
1216 {
1217  float w[3];
1218  barycentric_weights_v2(v1, v2, v3, pt, w);
1219  return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]);
1220 }
1221 #endif
1222 
1224  const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
1225 {
1226  float w[4];
1227  barycentric_weights_v2_quad(v1, v2, v3, v4, pt, w);
1228  // return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]) + (v4[2] * w[3]);
1229  return w[2] + w[3]; /* we can make this assumption for small speedup */
1230 }
1231 
1232 static float maskrasterize_layer_isect(const unsigned int *face,
1233  float (*cos)[3],
1234  const float dist_orig,
1235  const float xy[2])
1236 {
1237  /* we always cast from same place only need xy */
1238  if (face[3] == TRI_VERT) {
1239  /* --- tri --- */
1240 
1241 #if 0
1242  /* not essential but avoids unneeded extra lookups */
1243  if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig)) {
1244  if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1245  /* we know all tris are close for now */
1246  return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
1247  }
1248  }
1249 #else
1250  /* we know all tris are close for now */
1251  if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1252  return 0.0f;
1253  }
1254 #endif
1255  }
1256  else {
1257  /* --- quad --- */
1258 
1259  /* not essential but avoids unneeded extra lookups */
1260  if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig) ||
1261  (cos[3][2] < dist_orig)) {
1262 
1263  /* needs work */
1264 #if 1
1265  /* quad check fails for bow-tie, so keep using 2 tri checks */
1266  // if (isect_point_quad_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]))
1267  if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]]) ||
1268  isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]])) {
1270  xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]);
1271  }
1272 #elif 1
1273  /* don't use isect_point_tri_v2_cw because we could have bow-tie quads */
1274 
1275  if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1276  return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
1277  }
1278  else if (isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]])) {
1279  return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[2]], cos[face[3]]);
1280  }
1281 #else
1282  /* cheat - we know first 2 verts are z0.0f and second 2 are z 1.0f */
1283  /* ... worth looking into */
1284 #endif
1285  }
1286  }
1287 
1288  return 1.0f;
1289 }
1290 
1291 BLI_INLINE unsigned int layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2])
1292 {
1294 
1295  return ((unsigned int)((xy[0] - layer->bounds.xmin) * layer->buckets_xy_scalar[0])) +
1296  (((unsigned int)((xy[1] - layer->bounds.ymin) * layer->buckets_xy_scalar[1])) *
1297  layer->buckets_x);
1298 }
1299 
1300 static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
1301 {
1302  unsigned int index = layer_bucket_index_from_xy(layer, xy);
1303  unsigned int *face_index = layer->buckets_face[index];
1304 
1305  if (face_index) {
1306  unsigned int(*face_array)[4] = layer->face_array;
1307  float(*cos)[3] = layer->face_coords;
1308  float best_dist = 1.0f;
1309  while (*face_index != TRI_TERMINATOR_ID) {
1310  const float test_dist = maskrasterize_layer_isect(
1311  face_array[*face_index], cos, best_dist, xy);
1312  if (test_dist < best_dist) {
1313  best_dist = test_dist;
1314  /* comparing with 0.0f is OK here because triangles are always zero depth */
1315  if (best_dist == 0.0f) {
1316  /* bail early, we're as close as possible */
1317  return 0.0f;
1318  }
1319  }
1320  face_index++;
1321  }
1322  return best_dist;
1323  }
1324 
1325  return 1.0f;
1326 }
1327 
1328 float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
1329 {
1330  /* can't do this because some layers may invert */
1331  /* if (BLI_rctf_isect_pt_v(&mr_handle->bounds, xy)) */
1332 
1333  const unsigned int layers_tot = mr_handle->layers_tot;
1334  MaskRasterLayer *layer = mr_handle->layers;
1335 
1336  /* return value */
1337  float value = 0.0f;
1338 
1339  for (uint i = 0; i < layers_tot; i++, layer++) {
1340  float value_layer;
1341 
1342  /* also used as signal for unused layer (when render is disabled) */
1343  if (layer->alpha != 0.0f && BLI_rctf_isect_pt_v(&layer->bounds, xy)) {
1344  value_layer = 1.0f - layer_bucket_depth_from_xy(layer, xy);
1345 
1346  switch (layer->falloff) {
1347  case PROP_SMOOTH:
1348  /* ease - gives less hard lines for dilate/erode feather */
1349  value_layer = (3.0f * value_layer * value_layer -
1350  2.0f * value_layer * value_layer * value_layer);
1351  break;
1352  case PROP_SPHERE:
1353  value_layer = sqrtf(2.0f * value_layer - value_layer * value_layer);
1354  break;
1355  case PROP_ROOT:
1356  value_layer = sqrtf(value_layer);
1357  break;
1358  case PROP_SHARP:
1359  value_layer = value_layer * value_layer;
1360  break;
1361  case PROP_INVSQUARE:
1362  value_layer = value_layer * (2.0f - value_layer);
1363  break;
1364  case PROP_LIN:
1365  default:
1366  /* nothing */
1367  break;
1368  }
1369 
1370  if (layer->blend != MASK_BLEND_REPLACE) {
1371  value_layer *= layer->alpha;
1372  }
1373  }
1374  else {
1375  value_layer = 0.0f;
1376  }
1377 
1378  if (layer->blend_flag & MASK_BLENDFLAG_INVERT) {
1379  value_layer = 1.0f - value_layer;
1380  }
1381 
1382  switch (layer->blend) {
1383  case MASK_BLEND_MERGE_ADD:
1384  value += value_layer * (1.0f - value);
1385  break;
1387  value -= value_layer * value;
1388  break;
1389  case MASK_BLEND_ADD:
1390  value += value_layer;
1391  break;
1392  case MASK_BLEND_SUBTRACT:
1393  value -= value_layer;
1394  break;
1395  case MASK_BLEND_LIGHTEN:
1396  value = max_ff(value, value_layer);
1397  break;
1398  case MASK_BLEND_DARKEN:
1399  value = min_ff(value, value_layer);
1400  break;
1401  case MASK_BLEND_MUL:
1402  value *= value_layer;
1403  break;
1404  case MASK_BLEND_REPLACE:
1405  value = (value * (1.0f - layer->alpha)) + (value_layer * layer->alpha);
1406  break;
1407  case MASK_BLEND_DIFFERENCE:
1408  value = fabsf(value - value_layer);
1409  break;
1410  default: /* same as add */
1411  CLOG_ERROR(&LOG, "unhandled blend type: %d", layer->blend);
1412  BLI_assert(0);
1413  value += value_layer;
1414  break;
1415  }
1416 
1417  /* clamp after applying each layer so we don't get
1418  * issues subtracting after accumulating over 1.0f */
1419  CLAMP(value, 0.0f, 1.0f);
1420  }
1421 
1422  return value;
1423 }
1424 
1425 typedef struct MaskRasterizeBufferData {
1427  float x_inv, y_inv;
1430 
1431  float *buffer;
1433 
1434 static void maskrasterize_buffer_cb(void *__restrict userdata,
1435  const int y,
1436  const TaskParallelTLS *__restrict UNUSED(tls))
1437 {
1438  MaskRasterizeBufferData *data = userdata;
1439 
1440  MaskRasterHandle *mr_handle = data->mr_handle;
1441  float *buffer = data->buffer;
1442 
1443  const uint width = data->width;
1444  const float x_inv = data->x_inv;
1445  const float x_px_ofs = data->x_px_ofs;
1446 
1447  uint i = (uint)y * width;
1448  float xy[2];
1449  xy[1] = ((float)y * data->y_inv) + data->y_px_ofs;
1450  for (uint x = 0; x < width; x++, i++) {
1451  xy[0] = ((float)x * x_inv) + x_px_ofs;
1452 
1453  buffer[i] = BKE_maskrasterize_handle_sample(mr_handle, xy);
1454  }
1455 }
1456 
1458  const unsigned int width,
1459  const unsigned int height,
1460  /* Cannot be const, because it is assigned to non-const variable.
1461  * NOLINTNEXTLINE: readability-non-const-parameter. */
1462  float *buffer)
1463 {
1464  const float x_inv = 1.0f / (float)width;
1465  const float y_inv = 1.0f / (float)height;
1466 
1468  .mr_handle = mr_handle,
1469  .x_inv = x_inv,
1470  .y_inv = y_inv,
1471  .x_px_ofs = x_inv * 0.5f,
1472  .y_px_ofs = y_inv * 0.5f,
1473  .width = width,
1474  .buffer = buffer,
1475  };
1476  TaskParallelSettings settings;
1478  settings.use_threading = ((size_t)height * width > 10000);
1480 }
typedef float(TangentPoint)[2]
float(* BKE_mask_spline_differentiate_with_resolution(struct MaskSpline *spline, unsigned int resol, unsigned int *r_tot_diff_point))[2]
unsigned int BKE_mask_spline_feather_resolution(struct MaskSpline *spline, int width, int height)
Definition: mask_evaluate.c:66
void BKE_mask_spline_feather_collapse_inner_loops(struct MaskSpline *spline, float(*feather_points)[2], unsigned int tot_feather_point)
float(* BKE_mask_spline_feather_differentiated_points_with_resolution(struct MaskSpline *spline, unsigned int resol, bool do_feather_isect, unsigned int *r_tot_feather_point))[2]
unsigned int BKE_mask_spline_resolution(struct MaskSpline *spline, int width, int height)
Definition: mask_evaluate.c:27
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_INLINE
void void void BLI_movelisttolist(struct ListBase *dst, struct ListBase *src) ATTR_NONNULL(1
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:100
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
#define M_SQRT2
Definition: BLI_math_base.h:29
MINLINE float min_ff(float a, float b)
#define M_PI
Definition: BLI_math_base.h:20
void barycentric_weights_v2(const float v1[2], const float v2[2], const float v3[2], const float co[2], float w[3])
Definition: math_geom.c:3700
bool isect_point_tri_v2_cw(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
Definition: math_geom.c:1500
void barycentric_weights_v2_quad(const float v1[2], const float v2[2], const float v3[2], const float v4[2], const float co[2], float w[4])
Definition: math_geom.c:3769
int isect_point_tri_v2(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
Definition: math_geom.c:1516
float dist_squared_to_line_segment_v2(const float p[2], const float l1[2], const float l2[2])
Definition: math_geom.c:283
MINLINE float len_squared_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void add_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE float normalize_v2(float r[2])
void BLI_memarena_free(struct MemArena *ma) ATTR_NONNULL(1)
Definition: BLI_memarena.c:94
struct MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
Definition: BLI_memarena.c:64
void BLI_rctf_union(struct rctf *rct_a, const struct rctf *rct_b)
bool BLI_rctf_isect_pt_v(const struct rctf *rect, const float xy[2])
bool BLI_rctf_isect(const struct rctf *src1, const struct rctf *src2, struct rctf *dest)
void BLI_rctf_init(struct rctf *rect, float xmin, float xmax, float ymin, float ymax)
Definition: rct.c:407
BLI_INLINE float BLI_rctf_size_x(const struct rctf *rct)
Definition: BLI_rect.h:194
void BLI_rctf_do_minmax_v(struct rctf *rect, const float xy[2])
Definition: rct.c:513
BLI_INLINE float BLI_rctf_size_y(const struct rctf *rct)
Definition: BLI_rect.h:198
void BLI_rctf_init_minmax(struct rctf *rect)
Definition: rct.c:483
struct ScanFillVert * BLI_scanfill_vert_add(ScanFillContext *sf_ctx, const float vec[3])
Definition: scanfill.c:112
struct ScanFillEdge * BLI_scanfill_edge_add(ScanFillContext *sf_ctx, struct ScanFillVert *v1, struct ScanFillVert *v2)
Definition: scanfill.c:134
bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx, ListBase *fillvertbase, ListBase *filledgebase)
void BLI_scanfill_begin_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
Definition: scanfill.c:786
unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, int flag, const float nor_proj[3])
Definition: scanfill.c:813
#define BLI_SCANFILL_ARENA_SIZE
Definition: BLI_scanfill.h:29
void BLI_scanfill_end_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
Definition: scanfill.c:803
@ BLI_SCANFILL_CALC_POLYS
Definition: BLI_scanfill.h:91
@ BLI_SCANFILL_CALC_HOLES
Definition: BLI_scanfill.h:95
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
unsigned int uint
Definition: BLI_sys_types.h:67
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition: task_range.cc:94
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition: BLI_task.h:293
#define CLAMPIS(a, b, c)
#define POINTER_AS_UINT(i)
#define UNUSED(x)
#define MAX2(a, b)
#define POINTER_FROM_UINT(i)
#define CLOG_ERROR(clg_ref,...)
Definition: CLG_log.h:190
@ MASK_LAYERFLAG_FILL_OVERLAP
@ MASK_LAYERFLAG_FILL_DISCRETE
#define MASK_HIDE_RENDER
@ MASK_SPLINE_CYCLIC
@ MASK_SPLINE_NOINTERSECT
@ MASK_SPLINE_NOFILL
@ MASK_BLEND_ADD
@ MASK_BLEND_REPLACE
@ MASK_BLEND_DARKEN
@ MASK_BLEND_DIFFERENCE
@ MASK_BLEND_LIGHTEN
@ MASK_BLEND_MERGE_ADD
@ MASK_BLEND_SUBTRACT
@ MASK_BLEND_MUL
@ MASK_BLEND_MERGE_SUBTRACT
@ MASK_BLENDFLAG_INVERT
#define PROP_LIN
#define PROP_SPHERE
#define PROP_SMOOTH
#define PROP_ROOT
#define PROP_INVSQUARE
#define PROP_SHARP
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
Read Guarded memory(de)allocation.
#define MEM_SIZE_OPTIMAL(size)
#define MEM_reallocN(vmemh, len)
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Bright Control the brightness and contrast of the input color Vector Map an input vectors to used to fine tune the interpolation of the input Camera Retrieve information about the camera and how it relates to the current shading point s position CLAMP
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition: btDbvt.cpp:299
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition: btQuadWord.h:119
SIMD_FORCE_INLINE btScalar angle(const btVector3 &v) const
Return the angle between this and another vector.
Definition: btVector3.h:356
#define sinf(x)
Definition: cuda/compat.h:102
#define cosf(x)
Definition: cuda/compat.h:101
SyclQueue void void size_t num_bytes void
static bool is_cyclic(const Nurb *nu)
ccl_global float * buffer
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
#define SF_EDGE_IS_BOUNDARY
struct MaskRasterSplineInfo MaskRasterSplineInfo
void BKE_maskrasterize_handle_free(MaskRasterHandle *mr_handle)
static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
void BKE_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mask, const int width, const int height, const bool do_aspect_correct, const bool do_mask_aa, const bool do_feather)
struct MaskRasterizeBufferData MaskRasterizeBufferData
float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
void BKE_maskrasterize_buffer(MaskRasterHandle *mr_handle, const unsigned int width, const unsigned int height, float *buffer)
Rasterize a buffer from a single mask (threaded execution).
#define FACE_ASSERT(face, vert_max)
#define TRI_VERT
#define SF_KEYINDEX_TEMP_ID
static void rotate_point_v2(float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2])
static float maskrasterize_layer_z_depth_quad(const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
static void maskrasterize_spline_differentiate_point_outset(float(*diff_feather_points)[2], float(*diff_points)[2], const unsigned int tot_diff_point, const float ofs, const bool do_test)
static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
static float maskrasterize_layer_isect(const unsigned int *face, float(*cos)[3], const float dist_orig, const float xy[2])
BLI_INLINE unsigned int layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2])
static ScanFillVert * scanfill_vert_add_v2_with_depth(ScanFillContext *sf_ctx, const float co_xy[2], const float co_z)
#define TRI_TERMINATOR_ID
#define BUCKET_PIXELS_PER_CELL
static CLG_LogRef LOG
static void layer_bucket_init_dummy(MaskRasterLayer *layer)
BLI_INLINE unsigned int clampis_uint(const unsigned int v, const unsigned int min, const unsigned int max)
static bool layer_bucket_isect_test(const MaskRasterLayer *layer, unsigned int face_index, const unsigned int bucket_x, const unsigned int bucket_y, const float bucket_size_x, const float bucket_size_y, const float bucket_max_rad_squared)
MaskRasterHandle * BKE_maskrasterize_handle_new(void)
#define CALC_CAP_RESOL
struct MaskRasterLayer MaskRasterLayer
static void maskrasterize_buffer_cb(void *__restrict userdata, const int y, const TaskParallelTLS *__restrict UNUSED(tls))
ccl_device_inline float4 mask(const int4 &mask, const float4 &a)
Definition: math_float4.h:513
#define fabsf(x)
Definition: metal/compat.h:219
#define sqrtf(x)
Definition: metal/compat.h:243
static unsigned c
Definition: RandGen.cpp:83
INLINE Rall1d< T, V, S > cos(const Rall1d< T, V, S > &arg)
Definition: rall1d.h:319
#define min(a, b)
Definition: sort.c:35
void * link
Definition: BLI_linklist.h:24
struct LinkNode * next
Definition: BLI_linklist.h:23
void * last
Definition: DNA_listBase.h:31
void * first
Definition: DNA_listBase.h:31
struct MaskLayer * next
char visibility_flag
ListBase splines
MaskRasterLayer * layers
unsigned int layers_tot
unsigned int ** buckets_face
unsigned int buckets_x
unsigned int(* face_array)[4]
float buckets_xy_scalar[2]
unsigned int face_tot
float(* face_coords)[3]
unsigned int buckets_y
unsigned int vertex_total
unsigned int vertex_offset
unsigned int vertex_total_cap_tail
unsigned int vertex_total_cap_head
MaskRasterHandle * mr_handle
struct MaskSpline * next
ListBase fillvertbase
Definition: BLI_scanfill.h:17
ListBase filledgebase
Definition: BLI_scanfill.h:18
unsigned short poly_nr
Definition: BLI_scanfill.h:23
ListBase fillfacebase
Definition: BLI_scanfill.h:19
struct ScanFillVert * v1
Definition: BLI_scanfill.h:63
struct ScanFillVert * v2
Definition: BLI_scanfill.h:63
unsigned char c
Definition: BLI_scanfill.h:68
struct ScanFillEdge * next
Definition: BLI_scanfill.h:62
union ScanFillEdge::@122 tmp
struct ScanFillFace * next
Definition: BLI_scanfill.h:73
struct ScanFillVert * v2
Definition: BLI_scanfill.h:74
struct ScanFillVert * v3
Definition: BLI_scanfill.h:74
struct ScanFillVert * v1
Definition: BLI_scanfill.h:74
struct ScanFillVert * next
Definition: BLI_scanfill.h:39
float co[3]
Definition: BLI_scanfill.h:47
union ScanFillVert::@121 tmp
unsigned int u
Definition: BLI_scanfill.h:44
unsigned int keyindex
Definition: BLI_scanfill.h:51
float xmin
Definition: DNA_vec_types.h:69
float ymin
Definition: DNA_vec_types.h:70
float max
int xy[2]
Definition: wm_draw.c:135