21 if (slice_size == 0) {
25 if (sliced_mask.is_range()) {
32 r_new_indices.
resize(slice_size);
34 r_new_indices[i] = sliced_mask[i] -
offset;
42 if (full_range.
size() == indices_.
size()) {
48 r_new_indices.
clear();
52 for (
const int64_t index : range) {
53 r_new_indices.
append(index);
63 while (range_start < indices_.
size()) {
64 int64_t current_range_end = range_start + 1;
68 const int64_t possible_range_end = current_range_end + step_size;
69 if (possible_range_end > indices_.
size()) {
72 if (!this->
slice(range_start, possible_range_end - range_start).
is_range()) {
75 current_range_end = possible_range_end;
82 while (step_size > 0) {
83 const int64_t possible_range_end = current_range_end + step_size;
85 if (possible_range_end > indices_.
size()) {
88 if (!this->
slice(range_start, possible_range_end - range_start).
is_range()) {
91 current_range_end = possible_range_end;
94 ranges.
append(
IndexRange{indices_[range_start], current_range_end - range_start});
95 range_start = current_range_end;
111 if (range.
start() > next_start) {
112 inverted_ranges.
append({next_start, range.
start() - next_start});
113 if (r_skip_amounts !=
nullptr) {
114 r_skip_amounts->
append(skip_amount);
118 skip_amount += range.
size();
122 if (r_skip_amounts !=
nullptr) {
123 r_skip_amounts->
append(skip_amount);
126 return inverted_ranges;
146 all_vectors.
append(&sub_mask);
147 result_mask_size += sub_mask.size();
155 if (result_mask_size == indices_to_check.
size()) {
157 return indices_to_check;
159 if (all_vectors.
size() == 1) {
162 r_indices = std::move(*all_vectors[0]);
181 r_indices.
resize(result_mask_size);
185 for (const int64_t vector_index : all_vectors_range) {
186 Vector<int64_t> &vector = *all_vectors[vector_index];
187 const int64_t offset = offsets[vector_index];
188 threading::parallel_for(vector.index_range(), 1024, [&](const IndexRange range) {
189 initialized_copy_n(vector.data() + range.start(),
191 r_indices.data() + offset + range.start());
196 return r_indices.as_span();
203 const int64_t parallel_grain_size,
212 indices_to_check, 4096, r_indices, [&](
const int64_t i) {
return span[i]; });
220 const IndexMask sliced_mask = indices_to_check.slice(range);
224 Vector<bool> &buffer = materialize_buffers.local();
225 buffer.reinitialize(sliced_mask.size());
226 virtual_array.materialize_compressed(sliced_mask, buffer);
228 Vector<int64_t> masked_indices;
229 sliced_mask.to_best_mask_type([&](auto best_mask) {
230 for (const int64_t i : IndexRange(best_mask.size())) {
232 masked_indices.append(best_mask[i]);
236 if (!masked_indices.is_empty()) {
237 sub_masks.
local().append(std::move(masked_indices));
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
void sort(btMatrix3x3 &U, btVector3 &sigma, btMatrix3x3 &V, int t)
Helper function of 3X3 SVD for sorting singular values.
Vector< IndexRange > extract_ranges_invert(const IndexRange full_range, Vector< int64_t > *r_skip_amounts=nullptr) const
IndexMask slice_and_offset(IndexRange slice, Vector< int64_t > &r_new_indices) const
IndexRange index_range() const
Vector< IndexRange > extract_ranges() const
IndexMask invert(const IndexRange full_range, Vector< int64_t > &r_new_indices) const
bool contained_in(const IndexRange range) const
IndexMask slice(int64_t start, int64_t size) const
constexpr int64_t one_after_last() const
constexpr int64_t size() const
constexpr int64_t start() const
constexpr Span slice(int64_t start, int64_t size) const
constexpr int64_t size() const
constexpr bool is_empty() const
T get_internal_single() const
Span< T > get_internal_span() const
void append(const T &value)
const T & last(const int64_t n=0) const
Span< T > as_span() const
IndexRange index_range() const
void resize(const int64_t new_size)
void reserve(const int64_t min_capacity)
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
IndexMask find_indices_based_on_predicate__merge(IndexMask indices_to_check, threading::EnumerableThreadSpecific< Vector< Vector< int64_t >>> &sub_masks, Vector< int64_t > &r_indices)
IndexMask find_indices_based_on_predicate(const IndexMask indices_to_check, const int64_t parallel_grain_size, Vector< int64_t > &r_indices, const Predicate &predicate)
IndexMask find_indices_from_virtual_array(IndexMask indices_to_check, const VArray< bool > &virtual_array, int64_t parallel_grain_size, Vector< int64_t > &r_indices)
void parallel_for(IndexRange range, int64_t grain_size, const Function &function)
static const pxr::TfToken b("b", pxr::TfToken::Immortal)