Blender  V3.3
index_mask.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #include "BLI_index_mask.hh"
4 #include "BLI_index_mask_ops.hh"
5 
6 namespace blender {
7 
9 {
10  return this->slice(IndexRange(start, size));
11 }
12 
14 {
15  return IndexMask(indices_.slice(slice));
16 }
17 
19 {
20  const int slice_size = slice.size();
21  if (slice_size == 0) {
22  return {};
23  }
24  IndexMask sliced_mask{indices_.slice(slice)};
25  if (sliced_mask.is_range()) {
26  return IndexMask(slice_size);
27  }
28  const int64_t offset = sliced_mask.indices().first();
29  if (offset == 0) {
30  return sliced_mask;
31  }
32  r_new_indices.resize(slice_size);
33  for (const int i : IndexRange(slice_size)) {
34  r_new_indices[i] = sliced_mask[i] - offset;
35  }
36  return IndexMask(r_new_indices.as_span());
37 }
38 
39 IndexMask IndexMask::invert(const IndexRange full_range, Vector<int64_t> &r_new_indices) const
40 {
41  BLI_assert(this->contained_in(full_range));
42  if (full_range.size() == indices_.size()) {
43  return {};
44  }
45  if (indices_.is_empty()) {
46  return full_range;
47  }
48  r_new_indices.clear();
49 
50  const Vector<IndexRange> ranges = this->extract_ranges_invert(full_range, nullptr);
51  for (const IndexRange &range : ranges) {
52  for (const int64_t index : range) {
53  r_new_indices.append(index);
54  }
55  }
56  return r_new_indices.as_span();
57 }
58 
60 {
61  Vector<IndexRange> ranges;
62  int64_t range_start = 0;
63  while (range_start < indices_.size()) {
64  int64_t current_range_end = range_start + 1;
65  int64_t step_size = 1;
66 
67  while (true) {
68  const int64_t possible_range_end = current_range_end + step_size;
69  if (possible_range_end > indices_.size()) {
70  break;
71  }
72  if (!this->slice(range_start, possible_range_end - range_start).is_range()) {
73  break;
74  }
75  current_range_end = possible_range_end;
76  step_size *= 2;
77  }
78 
79  /* This step size was tried already, no need to try it again. */
80  step_size /= 2;
81 
82  while (step_size > 0) {
83  const int64_t possible_range_end = current_range_end + step_size;
84  step_size /= 2;
85  if (possible_range_end > indices_.size()) {
86  continue;
87  }
88  if (!this->slice(range_start, possible_range_end - range_start).is_range()) {
89  continue;
90  }
91  current_range_end = possible_range_end;
92  }
93 
94  ranges.append(IndexRange{indices_[range_start], current_range_end - range_start});
95  range_start = current_range_end;
96  }
97  return ranges;
98 }
99 
101  Vector<int64_t> *r_skip_amounts) const
102 {
103  BLI_assert(this->contained_in(full_range));
104  const Vector<IndexRange> ranges = this->extract_ranges();
105  Vector<IndexRange> inverted_ranges;
106 
107  int64_t skip_amount = 0;
108  int64_t next_start = full_range.start();
109  for (const int64_t i : ranges.index_range()) {
110  const IndexRange range = ranges[i];
111  if (range.start() > next_start) {
112  inverted_ranges.append({next_start, range.start() - next_start});
113  if (r_skip_amounts != nullptr) {
114  r_skip_amounts->append(skip_amount);
115  }
116  }
117  next_start = range.one_after_last();
118  skip_amount += range.size();
119  }
120  if (next_start < full_range.one_after_last()) {
121  inverted_ranges.append({next_start, full_range.one_after_last() - next_start});
122  if (r_skip_amounts != nullptr) {
123  r_skip_amounts->append(skip_amount);
124  }
125  }
126  return inverted_ranges;
127 }
128 
129 } // namespace blender
130 
131 namespace blender::index_mask_ops {
132 
133 namespace detail {
134 
136  IndexMask indices_to_check,
138  Vector<int64_t> &r_indices)
139 {
140  /* Gather vectors that have been generated by possibly multiple threads. */
141  Vector<Vector<int64_t> *> all_vectors;
142  int64_t result_mask_size = 0;
143  for (Vector<Vector<int64_t>> &local_sub_masks : sub_masks) {
144  for (Vector<int64_t> &sub_mask : local_sub_masks) {
145  BLI_assert(!sub_mask.is_empty());
146  all_vectors.append(&sub_mask);
147  result_mask_size += sub_mask.size();
148  }
149  }
150 
151  if (all_vectors.is_empty()) {
152  /* Special case when the predicate was false for all elements. */
153  return {};
154  }
155  if (result_mask_size == indices_to_check.size()) {
156  /* Special case when the predicate was true for all elements. */
157  return indices_to_check;
158  }
159  if (all_vectors.size() == 1) {
160  /* Special case when all indices for which the predicate is true happen to be in a single
161  * vector. */
162  r_indices = std::move(*all_vectors[0]);
163  return r_indices.as_span();
164  }
165 
166  /* Indices in separate vectors don't overlap. So it is ok to sort the vectors just by looking at
167  * the first element. */
168  std::sort(all_vectors.begin(),
169  all_vectors.end(),
170  [](const Vector<int64_t> *a, const Vector<int64_t> *b) { return (*a)[0] < (*b)[0]; });
171 
172  /* Precompute the offsets for the individual vectors, so that the indices can be copied into the
173  * final vector in parallel. */
174  Vector<int64_t> offsets;
175  offsets.reserve(all_vectors.size() + 1);
176  offsets.append(0);
177  for (Vector<int64_t> *vector : all_vectors) {
178  offsets.append(offsets.last() + vector->size());
179  }
180 
181  r_indices.resize(result_mask_size);
182 
183  /* Fill the final index mask in parallel again. */
184  threading::parallel_for(all_vectors.index_range(), 100, [&](const IndexRange all_vectors_range) {
185  for (const int64_t vector_index : all_vectors_range) {
186  Vector<int64_t> &vector = *all_vectors[vector_index];
187  const int64_t offset = offsets[vector_index];
188  threading::parallel_for(vector.index_range(), 1024, [&](const IndexRange range) {
189  initialized_copy_n(vector.data() + range.start(),
190  range.size(),
191  r_indices.data() + offset + range.start());
192  });
193  }
194  });
195 
196  return r_indices.as_span();
197 }
198 
199 } // namespace detail
200 
202  const VArray<bool> &virtual_array,
203  const int64_t parallel_grain_size,
204  Vector<int64_t> &r_indices)
205 {
206  if (virtual_array.is_single()) {
207  return virtual_array.get_internal_single() ? indices_to_check : IndexMask(0);
208  }
209  if (virtual_array.is_span()) {
210  const Span<bool> span = virtual_array.get_internal_span();
212  indices_to_check, 4096, r_indices, [&](const int64_t i) { return span[i]; });
213  }
214 
217 
219  indices_to_check.index_range(), parallel_grain_size, [&](const IndexRange range) {
220  const IndexMask sliced_mask = indices_to_check.slice(range);
221 
222  /* To avoid virtual function call overhead from accessing the virtual array,
223  * materialize the necessary indices for this chunk into a reused buffer. */
224  Vector<bool> &buffer = materialize_buffers.local();
225  buffer.reinitialize(sliced_mask.size());
226  virtual_array.materialize_compressed(sliced_mask, buffer);
227 
228  Vector<int64_t> masked_indices;
229  sliced_mask.to_best_mask_type([&](auto best_mask) {
230  for (const int64_t i : IndexRange(best_mask.size())) {
231  if (buffer[i]) {
232  masked_indices.append(best_mask[i]);
233  }
234  }
235  });
236  if (!masked_indices.is_empty()) {
237  sub_masks.local().append(std::move(masked_indices));
238  }
239  });
240 
241  return detail::find_indices_based_on_predicate__merge(indices_to_check, sub_masks, r_indices);
242 }
243 
244 } // namespace blender::index_mask_ops
#define BLI_assert(a)
Definition: BLI_assert.h:46
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
void sort(btMatrix3x3 &U, btVector3 &sigma, btMatrix3x3 &V, int t)
Helper function of 3X3 SVD for sorting singular values.
int64_t size() const
bool is_range() const
Vector< IndexRange > extract_ranges_invert(const IndexRange full_range, Vector< int64_t > *r_skip_amounts=nullptr) const
Definition: index_mask.cc:100
IndexMask slice_and_offset(IndexRange slice, Vector< int64_t > &r_new_indices) const
Definition: index_mask.cc:18
IndexRange index_range() const
Vector< IndexRange > extract_ranges() const
Definition: index_mask.cc:59
IndexMask invert(const IndexRange full_range, Vector< int64_t > &r_new_indices) const
Definition: index_mask.cc:39
bool contained_in(const IndexRange range) const
IndexMask slice(int64_t start, int64_t size) const
Definition: index_mask.cc:8
constexpr int64_t one_after_last() const
constexpr int64_t size() const
constexpr int64_t start() const
constexpr Span slice(int64_t start, int64_t size) const
Definition: BLI_span.hh:142
constexpr int64_t size() const
Definition: BLI_span.hh:240
constexpr bool is_empty() const
Definition: BLI_span.hh:248
Span< T > get_internal_span() const
int64_t size() const
Definition: BLI_vector.hh:694
void append(const T &value)
Definition: BLI_vector.hh:433
const T & last(const int64_t n=0) const
Definition: BLI_vector.hh:663
Span< T > as_span() const
Definition: BLI_vector.hh:325
bool is_empty() const
Definition: BLI_vector.hh:706
IndexRange index_range() const
Definition: BLI_vector.hh:920
void resize(const int64_t new_size)
Definition: BLI_vector.hh:353
void reserve(const int64_t min_capacity)
Definition: BLI_vector.hh:340
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
static unsigned a[3]
Definition: RandGen.cpp:78
IndexMask find_indices_based_on_predicate__merge(IndexMask indices_to_check, threading::EnumerableThreadSpecific< Vector< Vector< int64_t >>> &sub_masks, Vector< int64_t > &r_indices)
Definition: index_mask.cc:135
IndexMask find_indices_based_on_predicate(const IndexMask indices_to_check, const int64_t parallel_grain_size, Vector< int64_t > &r_indices, const Predicate &predicate)
IndexMask find_indices_from_virtual_array(IndexMask indices_to_check, const VArray< bool > &virtual_array, int64_t parallel_grain_size, Vector< int64_t > &r_indices)
Definition: index_mask.cc:201
void parallel_for(IndexRange range, int64_t grain_size, const Function &function)
Definition: BLI_task.hh:51
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
__int64 int64_t
Definition: stdint.h:89