Blender  V3.3
BLI_virtual_array.hh
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #pragma once
4 
26 #include "BLI_any.hh"
27 #include "BLI_array.hh"
28 #include "BLI_index_mask.hh"
29 #include "BLI_span.hh"
30 
31 namespace blender {
32 
34 class GVArray;
35 class GVMutableArray;
36 
42  enum class Type : uint8_t {
43  /* Is not one of the common special types below. */
44  Any,
45  Span,
46  Single,
47  };
48 
50 
52  bool may_have_ownership = true;
53 
58  const void *data;
59 
60  CommonVArrayInfo() = default;
61  CommonVArrayInfo(const Type _type, const bool _may_have_ownership, const void *_data)
62  : type(_type), may_have_ownership(_may_have_ownership), data(_data)
63  {
64  }
65 };
66 
71 template<typename T> class VArrayImpl {
72  protected:
78 
79  public:
81  {
82  BLI_assert(size_ >= 0);
83  }
84 
85  virtual ~VArrayImpl() = default;
86 
87  int64_t size() const
88  {
89  return size_;
90  }
91 
96  virtual T get(int64_t index) const = 0;
97 
98  virtual CommonVArrayInfo common_info() const
99  {
100  return {};
101  }
102 
107  virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
108  {
109  T *dst = r_span.data();
110  /* Optimize for a few different common cases. */
111  const CommonVArrayInfo info = this->common_info();
112  switch (info.type) {
114  mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
115  break;
116  }
118  const T *src = static_cast<const T *>(info.data);
119  mask.foreach_index([&](const int64_t i) { dst[i] = src[i]; });
120  break;
121  }
123  const T single = *static_cast<const T *>(info.data);
124  mask.foreach_index([&](const int64_t i) { dst[i] = single; });
125  break;
126  }
127  }
128  }
129 
134  {
135  T *dst = r_span.data();
136  /* Optimize for a few different common cases. */
137  const CommonVArrayInfo info = this->common_info();
138  switch (info.type) {
140  mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
141  break;
142  }
144  const T *src = static_cast<const T *>(info.data);
145  mask.foreach_index([&](const int64_t i) { new (dst + i) T(src[i]); });
146  break;
147  }
149  const T single = *static_cast<const T *>(info.data);
150  mask.foreach_index([&](const int64_t i) { new (dst + i) T(single); });
151  break;
152  }
153  }
154  }
155 
162  {
163  BLI_assert(mask.size() == r_span.size());
164  mask.to_best_mask_type([&](auto best_mask) {
165  for (const int64_t i : IndexRange(best_mask.size())) {
166  r_span[i] = this->get(best_mask[i]);
167  }
168  });
169  }
170 
175  {
176  BLI_assert(mask.size() == r_span.size());
177  T *dst = r_span.data();
178  mask.to_best_mask_type([&](auto best_mask) {
179  for (const int64_t i : IndexRange(best_mask.size())) {
180  new (dst + i) T(this->get(best_mask[i]));
181  }
182  });
183  }
184 
191  virtual bool try_assign_GVArray(GVArray &UNUSED(varray)) const
192  {
193  return false;
194  }
195 
200  virtual bool is_same(const VArrayImpl<T> &UNUSED(other)) const
201  {
202  return false;
203  }
204 };
205 
207 template<typename T> class VMutableArrayImpl : public VArrayImpl<T> {
208  public:
210 
214  virtual void set(int64_t index, T value) = 0;
215 
219  virtual void set_all(Span<T> src)
220  {
221  const CommonVArrayInfo info = this->common_info();
222  if (info.type == CommonVArrayInfo::Type::Span) {
224  src.data(), this->size_, const_cast<T *>(static_cast<const T *>(info.data)));
225  }
226  else {
227  const int64_t size = this->size_;
228  for (int64_t i = 0; i < size; i++) {
229  this->set(i, src[i]);
230  }
231  }
232  }
233 
237  virtual bool try_assign_GVMutableArray(GVMutableArray &UNUSED(varray)) const
238  {
239  return false;
240  }
241 };
242 
247 template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
248  protected:
249  T *data_ = nullptr;
250 
251  public:
254  {
255  }
256 
257  protected:
259  {
260  }
261 
262  T get(const int64_t index) const final
263  {
264  return data_[index];
265  }
266 
267  void set(const int64_t index, T value) final
268  {
269  data_[index] = value;
270  }
271 
272  CommonVArrayInfo common_info() const override
273  {
275  }
276 
277  bool is_same(const VArrayImpl<T> &other) const final
278  {
279  if (other.size() != this->size_) {
280  return false;
281  }
282  const CommonVArrayInfo other_info = other.common_info();
283  if (other_info.type != CommonVArrayInfo::Type::Span) {
284  return false;
285  }
286  return data_ == static_cast<const T *>(other_info.data);
287  }
288 
290  {
291  mask.to_best_mask_type([&](auto best_mask) {
292  for (const int64_t i : IndexRange(best_mask.size())) {
293  r_span[i] = data_[best_mask[i]];
294  }
295  });
296  }
297 
299  MutableSpan<T> r_span) const override
300  {
301  T *dst = r_span.data();
302  mask.to_best_mask_type([&](auto best_mask) {
303  for (const int64_t i : IndexRange(best_mask.size())) {
304  new (dst + i) T(data_[best_mask[i]]);
305  }
306  });
307  }
308 };
309 
314 template<typename T> class VArrayImpl_For_Span_final final : public VArrayImpl_For_Span<T> {
315  public:
317 
318  private:
319  CommonVArrayInfo common_info() const final
320  {
322  }
323 };
324 
325 template<typename T>
326 inline constexpr bool is_trivial_extended_v<VArrayImpl_For_Span_final<T>> = true;
327 
334 template<typename Container, typename T = typename Container::value_type>
336  private:
337  Container container_;
338 
339  public:
340  VArrayImpl_For_ArrayContainer(Container container)
341  : VArrayImpl_For_Span<T>((int64_t)container.size()), container_(std::move(container))
342  {
343  this->data_ = const_cast<T *>(container_.data());
344  }
345 };
346 
352 template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
353  private:
354  T value_;
355 
356  public:
358  : VArrayImpl<T>(size), value_(std::move(value))
359  {
360  }
361 
362  protected:
363  T get(const int64_t UNUSED(index)) const override
364  {
365  return value_;
366  }
367 
368  CommonVArrayInfo common_info() const override
369  {
370  return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_);
371  }
372 
374  {
375  BLI_assert(mask.size() == r_span.size());
377  r_span.fill(value_);
378  }
379 
381  MutableSpan<T> r_span) const override
382  {
383  BLI_assert(mask.size() == r_span.size());
384  uninitialized_fill_n(r_span.data(), mask.size(), value_);
385  }
386 };
387 
388 template<typename T>
389 inline constexpr bool is_trivial_extended_v<VArrayImpl_For_Single<T>> = is_trivial_extended_v<T>;
390 
395 template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public VArrayImpl<T> {
396  private:
397  GetFunc get_func_;
398 
399  public:
400  VArrayImpl_For_Func(const int64_t size, GetFunc get_func)
401  : VArrayImpl<T>(size), get_func_(std::move(get_func))
402  {
403  }
404 
405  private:
406  T get(const int64_t index) const override
407  {
408  return get_func_(index);
409  }
410 
411  void materialize(IndexMask mask, MutableSpan<T> r_span) const override
412  {
413  T *dst = r_span.data();
414  mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
415  }
416 
417  void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
418  {
419  T *dst = r_span.data();
420  mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
421  }
422 
423  void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
424  {
425  BLI_assert(mask.size() == r_span.size());
426  T *dst = r_span.data();
427  mask.to_best_mask_type([&](auto best_mask) {
428  for (const int64_t i : IndexRange(best_mask.size())) {
429  dst[i] = get_func_(best_mask[i]);
430  }
431  });
432  }
433 
434  void materialize_compressed_to_uninitialized(IndexMask mask,
435  MutableSpan<T> r_span) const override
436  {
437  BLI_assert(mask.size() == r_span.size());
438  T *dst = r_span.data();
439  mask.to_best_mask_type([&](auto best_mask) {
440  for (const int64_t i : IndexRange(best_mask.size())) {
441  new (dst + i) T(get_func_(best_mask[i]));
442  }
443  });
444  }
445 };
446 
450 template<typename StructT,
451  typename ElemT,
452  ElemT (*GetFunc)(const StructT &),
453  void (*SetFunc)(StructT &, ElemT) = nullptr>
455  private:
456  StructT *data_;
457 
458  public:
460  : VMutableArrayImpl<ElemT>(data.size()), data_(data.data())
461  {
462  }
463 
464  template<typename OtherStructT,
465  typename OtherElemT,
466  OtherElemT (*OtherGetFunc)(const OtherStructT &),
467  void (*OtherSetFunc)(OtherStructT &, OtherElemT)>
469 
470  private:
471  ElemT get(const int64_t index) const override
472  {
473  return GetFunc(data_[index]);
474  }
475 
476  void set(const int64_t index, ElemT value) override
477  {
478  SetFunc(data_[index], std::move(value));
479  }
480 
481  void materialize(IndexMask mask, MutableSpan<ElemT> r_span) const override
482  {
483  ElemT *dst = r_span.data();
484  mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
485  }
486 
487  void materialize_to_uninitialized(IndexMask mask, MutableSpan<ElemT> r_span) const override
488  {
489  ElemT *dst = r_span.data();
490  mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
491  }
492 
493  void materialize_compressed(IndexMask mask, MutableSpan<ElemT> r_span) const override
494  {
495  BLI_assert(mask.size() == r_span.size());
496  ElemT *dst = r_span.data();
497  mask.to_best_mask_type([&](auto best_mask) {
498  for (const int64_t i : IndexRange(best_mask.size())) {
499  dst[i] = GetFunc(data_[best_mask[i]]);
500  }
501  });
502  }
503 
504  void materialize_compressed_to_uninitialized(IndexMask mask,
505  MutableSpan<ElemT> r_span) const override
506  {
507  BLI_assert(mask.size() == r_span.size());
508  ElemT *dst = r_span.data();
509  mask.to_best_mask_type([&](auto best_mask) {
510  for (const int64_t i : IndexRange(best_mask.size())) {
511  new (dst + i) ElemT(GetFunc(data_[best_mask[i]]));
512  }
513  });
514  }
515 
516  bool is_same(const VArrayImpl<ElemT> &other) const override
517  {
518  if (other.size() != this->size_) {
519  return false;
520  }
521  if (const VArrayImpl_For_DerivedSpan<StructT, ElemT, GetFunc> *other_typed =
522  dynamic_cast<const VArrayImpl_For_DerivedSpan<StructT, ElemT, GetFunc> *>(&other)) {
523  return other_typed->data_ == data_;
524  }
525  if (const VArrayImpl_For_DerivedSpan<StructT, ElemT, GetFunc, SetFunc> *other_typed =
526  dynamic_cast<const VArrayImpl_For_DerivedSpan<StructT, ElemT, GetFunc, SetFunc> *>(
527  &other)) {
528  return other_typed->data_ == data_;
529  }
530  return false;
531  }
532 };
533 
534 template<typename StructT,
535  typename ElemT,
536  ElemT (*GetFunc)(const StructT &),
537  void (*SetFunc)(StructT &, ElemT)>
538 inline constexpr bool
540 
541 namespace detail {
542 
547 template<typename T> struct VArrayAnyExtraInfo {
551  const VArrayImpl<T> *(*get_varray)(const void *buffer);
552 
553  template<typename StorageT> static constexpr VArrayAnyExtraInfo get()
554  {
555  /* These are the only allowed types in the #Any. */
556  static_assert(
557  std::is_base_of_v<VArrayImpl<T>, StorageT> ||
558  is_same_any_v<StorageT, const VArrayImpl<T> *, std::shared_ptr<const VArrayImpl<T>>>);
559 
560  /* Depending on how the virtual array implementation is stored in the #Any, a different
561  * #get_varray function is required. */
562  if constexpr (std::is_base_of_v<VArrayImpl<T>, StorageT>) {
563  return {[](const void *buffer) {
564  return static_cast<const VArrayImpl<T> *>((const StorageT *)buffer);
565  }};
566  }
567  else if constexpr (std::is_same_v<StorageT, const VArrayImpl<T> *>) {
568  return {[](const void *buffer) { return *(const StorageT *)buffer; }};
569  }
570  else if constexpr (std::is_same_v<StorageT, std::shared_ptr<const VArrayImpl<T>>>) {
571  return {[](const void *buffer) { return ((const StorageT *)buffer)->get(); }};
572  }
573  else {
575  return {};
576  }
577  }
578 };
579 
580 } // namespace detail
581 
589 template<typename T> class VArrayCommon {
590  protected:
599 
603  const VArrayImpl<T> *impl_ = nullptr;
612 
613  protected:
614  VArrayCommon() = default;
615 
617  VArrayCommon(const VArrayCommon &other) : storage_(other.storage_)
618  {
619  impl_ = this->impl_from_storage();
620  }
621 
623  VArrayCommon(VArrayCommon &&other) noexcept : storage_(std::move(other.storage_))
624  {
625  impl_ = this->impl_from_storage();
626  other.storage_.reset();
627  other.impl_ = nullptr;
628  }
629 
635  {
636  storage_ = impl_;
637  }
638 
642  VArrayCommon(std::shared_ptr<const VArrayImpl<T>> impl) : impl_(impl.get())
643  {
644  if (impl) {
645  storage_ = std::move(impl);
646  }
647  }
648 
652  template<typename ImplT, typename... Args> void emplace(Args &&...args)
653  {
654  /* Make sure we are actually constructing a #VArrayImpl. */
655  static_assert(std::is_base_of_v<VArrayImpl<T>, ImplT>);
656  if constexpr (std::is_copy_constructible_v<ImplT> && Storage::template is_inline_v<ImplT>) {
657  /* Only inline the implementation when it is copyable and when it fits into the inline
658  * buffer of the storage. */
659  impl_ = &storage_.template emplace<ImplT>(std::forward<Args>(args)...);
660  }
661  else {
662  /* If it can't be inlined, create a new #std::shared_ptr instead and store that in the
663  * storage. */
664  std::shared_ptr<const VArrayImpl<T>> ptr = std::make_shared<ImplT>(
665  std::forward<Args>(args)...);
666  impl_ = &*ptr;
667  storage_ = std::move(ptr);
668  }
669  }
670 
672  void copy_from(const VArrayCommon &other)
673  {
674  if (this == &other) {
675  return;
676  }
677  storage_ = other.storage_;
678  impl_ = this->impl_from_storage();
679  }
680 
682  void move_from(VArrayCommon &&other) noexcept
683  {
684  if (this == &other) {
685  return;
686  }
687  storage_ = std::move(other.storage_);
688  impl_ = this->impl_from_storage();
689  other.storage_.reset();
690  other.impl_ = nullptr;
691  }
692 
696  {
697  if (!storage_.has_value()) {
698  return nullptr;
699  }
700  return storage_.extra_info().get_varray(storage_.get());
701  }
702 
703  public:
705  operator bool() const
706  {
707  return impl_ != nullptr;
708  }
709 
715  T operator[](const int64_t index) const
716  {
717  BLI_assert(*this);
718  BLI_assert(index >= 0);
719  BLI_assert(index < this->size());
720  return impl_->get(index);
721  }
722 
727  T get(const int64_t index) const
728  {
729  return (*this)[index];
730  }
731 
736  int64_t size() const
737  {
738  if (impl_ == nullptr) {
739  return 0;
740  }
741  return impl_->size();
742  }
743 
745  bool is_empty() const
746  {
747  return this->size() == 0;
748  }
749 
751  {
752  return IndexRange(this->size());
753  }
754 
756  {
757  BLI_assert(*this);
758  return impl_->common_info();
759  }
760 
762  bool is_span() const
763  {
764  BLI_assert(*this);
765  const CommonVArrayInfo info = impl_->common_info();
766  return info.type == CommonVArrayInfo::Type::Span;
767  }
768 
774  {
775  BLI_assert(this->is_span());
776  const CommonVArrayInfo info = impl_->common_info();
777  return Span<T>(static_cast<const T *>(info.data), this->size());
778  }
779 
781  bool is_single() const
782  {
783  BLI_assert(*this);
784  const CommonVArrayInfo info = impl_->common_info();
785  return info.type == CommonVArrayInfo::Type::Single;
786  }
787 
793  {
794  BLI_assert(this->is_single());
795  const CommonVArrayInfo info = impl_->common_info();
796  return *static_cast<const T *>(info.data);
797  }
798 
802  bool is_same(const VArrayCommon<T> &other) const
803  {
804  if (!*this || !other) {
805  return false;
806  }
807  /* Check in both directions in case one does not know how to compare to the other
808  * implementation. */
809  if (impl_->is_same(*other.impl_)) {
810  return true;
811  }
812  if (other.impl_->is_same(*impl_)) {
813  return true;
814  }
815  return false;
816  }
817 
819  void materialize(MutableSpan<T> r_span) const
820  {
821  this->materialize(IndexMask(this->size()), r_span);
822  }
823 
826  {
827  BLI_assert(mask.min_array_size() <= this->size());
828  impl_->materialize(mask, r_span);
829  }
830 
832  {
833  this->materialize_to_uninitialized(IndexMask(this->size()), r_span);
834  }
835 
837  {
838  BLI_assert(mask.min_array_size() <= this->size());
839  impl_->materialize_to_uninitialized(mask, r_span);
840  }
841 
844  {
845  impl_->materialize_compressed(mask, r_span);
846  }
847 
849  {
851  }
852 
854  bool try_assign_GVArray(GVArray &varray) const
855  {
856  return impl_->try_assign_GVArray(varray);
857  }
858 };
859 
860 template<typename T> class VMutableArray;
861 
869 namespace varray_tag {
870 struct span {
871 };
872 struct single_ref {
873 };
874 struct single {
875 };
876 } // namespace varray_tag
877 
883 template<typename T> class VArray : public VArrayCommon<T> {
884  friend VMutableArray<T>;
885 
886  public:
887  VArray() = default;
888  VArray(const VArray &other) = default;
889  VArray(VArray &&other) noexcept = default;
890 
892  {
893  }
894 
895  VArray(std::shared_ptr<const VArrayImpl<T>> impl) : VArrayCommon<T>(std::move(impl))
896  {
897  }
898 
899  VArray(varray_tag::span /* tag */, Span<T> span)
900  {
901  /* Cast const away, because the virtual array implementation for const and non const spans is
902  * shared. */
903  MutableSpan<T> mutable_span{const_cast<T *>(span.data()), span.size()};
904  this->template emplace<VArrayImpl_For_Span_final<T>>(mutable_span);
905  }
906 
907  VArray(varray_tag::single /* tag */, T value, const int64_t size)
908  {
909  this->template emplace<VArrayImpl_For_Single<T>>(std::move(value), size);
910  }
911 
915  template<typename ImplT, typename... Args> static VArray For(Args &&...args)
916  {
917  static_assert(std::is_base_of_v<VArrayImpl<T>, ImplT>);
918  VArray varray;
919  varray.template emplace<ImplT>(std::forward<Args>(args)...);
920  return varray;
921  }
922 
926  static VArray ForSingle(T value, const int64_t size)
927  {
928  return VArray(varray_tag::single{}, std::move(value), size);
929  }
930 
935  static VArray ForSpan(Span<T> values)
936  {
937  return VArray(varray_tag::span{}, values);
938  }
939 
944  template<typename GetFunc> static VArray ForFunc(const int64_t size, GetFunc get_func)
945  {
946  return VArray::For<VArrayImpl_For_Func<T, decltype(get_func)>>(size, std::move(get_func));
947  }
948 
953  template<typename StructT, T (*GetFunc)(const StructT &)>
955  {
956  /* Cast const away, because the virtual array implementation for const and non const derived
957  * spans is shared. */
958  MutableSpan<StructT> span{const_cast<StructT *>(values.data()), values.size()};
959  return VArray::For<VArrayImpl_For_DerivedSpan<StructT, T, GetFunc>>(span);
960  }
961 
967  template<typename ContainerT> static VArray ForContainer(ContainerT container)
968  {
969  return VArray::For<VArrayImpl_For_ArrayContainer<ContainerT>>(std::move(container));
970  }
971 
972  VArray &operator=(const VArray &other)
973  {
974  this->copy_from(other);
975  return *this;
976  }
977 
978  VArray &operator=(VArray &&other) noexcept
979  {
980  this->move_from(std::move(other));
981  return *this;
982  }
983 };
984 
988 template<typename T> class VMutableArray : public VArrayCommon<T> {
989  public:
990  VMutableArray() = default;
991  VMutableArray(const VMutableArray &other) = default;
992  VMutableArray(VMutableArray &&other) noexcept = default;
993 
995  {
996  }
997 
998  VMutableArray(std::shared_ptr<const VMutableArrayImpl<T>> impl)
999  : VArrayCommon<T>(std::move(impl))
1000  {
1001  }
1002 
1006  template<typename ImplT, typename... Args> static VMutableArray For(Args &&...args)
1007  {
1008  static_assert(std::is_base_of_v<VMutableArrayImpl<T>, ImplT>);
1009  VMutableArray varray;
1010  varray.template emplace<ImplT>(std::forward<Args>(args)...);
1011  return varray;
1012  }
1013 
1018  {
1019  return VMutableArray::For<VArrayImpl_For_Span_final<T>>(values);
1020  }
1021 
1026  template<typename StructT, T (*GetFunc)(const StructT &), void (*SetFunc)(StructT &, T)>
1028  {
1029  return VMutableArray::For<VArrayImpl_For_DerivedSpan<StructT, T, GetFunc, SetFunc>>(values);
1030  }
1031 
1033  operator VArray<T>() const &
1034  {
1035  VArray<T> varray;
1036  varray.copy_from(*this);
1037  return varray;
1038  }
1039 
1041  operator VArray<T>() &&noexcept
1042  {
1043  VArray<T> varray;
1044  varray.move_from(std::move(*this));
1045  return varray;
1046  }
1047 
1049  {
1050  this->copy_from(other);
1051  return *this;
1052  }
1053 
1055  {
1056  this->move_from(std::move(other));
1057  return *this;
1058  }
1059 
1065  {
1066  BLI_assert(this->is_span());
1067  const CommonVArrayInfo info = this->get_impl()->common_info();
1068  return MutableSpan<T>(const_cast<T *>(static_cast<const T *>(info.data)), this->size());
1069  }
1070 
1074  void set(const int64_t index, T value)
1075  {
1076  BLI_assert(index >= 0);
1077  BLI_assert(index < this->size());
1078  this->get_impl()->set(index, std::move(value));
1079  }
1080 
1085  {
1086  BLI_assert(src.size() == this->size());
1087  this->get_impl()->set_all(src);
1088  }
1089 
1092  {
1093  return this->get_impl()->try_assign_GVMutableArray(varray);
1094  }
1095 
1096  private:
1098  VMutableArrayImpl<T> *get_impl() const
1099  {
1100  /* This cast is valid by the invariant that a #VMutableArray->impl_ is always a
1101  * #VMutableArrayImpl. */
1102  return (VMutableArrayImpl<T> *)this->impl_;
1103  }
1104 };
1105 
1106 template<typename T> static constexpr bool is_VArray_v = false;
1107 template<typename T> static constexpr bool is_VArray_v<VArray<T>> = true;
1108 
1109 template<typename T> static constexpr bool is_VMutableArray_v = false;
1110 template<typename T> static constexpr bool is_VMutableArray_v<VMutableArray<T>> = true;
1111 
1123 template<typename T> class VArraySpan final : public Span<T> {
1124  private:
1125  VArray<T> varray_;
1126  Array<T> owned_data_;
1127 
1128  public:
1129  VArraySpan() = default;
1130 
1131  VArraySpan(VArray<T> varray) : Span<T>(), varray_(std::move(varray))
1132  {
1133  if (!varray_) {
1134  return;
1135  }
1136  this->size_ = varray_.size();
1137  const CommonVArrayInfo info = varray_.common_info();
1138  if (info.type == CommonVArrayInfo::Type::Span) {
1139  this->data_ = static_cast<const T *>(info.data);
1140  }
1141  else {
1142  owned_data_.~Array();
1143  new (&owned_data_) Array<T>(varray_.size(), NoInitialization{});
1144  varray_.materialize_to_uninitialized(owned_data_);
1145  this->data_ = owned_data_.data();
1146  }
1147  }
1148 
1150  : varray_(std::move(other.varray_)), owned_data_(std::move(other.owned_data_))
1151  {
1152  if (!varray_) {
1153  return;
1154  }
1155  this->size_ = varray_.size();
1156  const CommonVArrayInfo info = varray_.common_info();
1157  if (info.type == CommonVArrayInfo::Type::Span) {
1158  this->data_ = static_cast<const T *>(info.data);
1159  }
1160  else {
1161  this->data_ = owned_data_.data();
1162  }
1163  other.data_ = nullptr;
1164  other.size_ = 0;
1165  }
1166 
1168  {
1169  if (this == &other) {
1170  return *this;
1171  }
1172  std::destroy_at(this);
1173  new (this) VArraySpan(std::move(other));
1174  return *this;
1175  }
1176 };
1177 
1185 template<typename T> class MutableVArraySpan final : public MutableSpan<T> {
1186  private:
1187  VMutableArray<T> varray_;
1188  Array<T> owned_data_;
1189  bool save_has_been_called_ = false;
1190  bool show_not_saved_warning_ = true;
1191 
1192  public:
1193  MutableVArraySpan() = default;
1194 
1195  /* Create a span for any virtual array. This is cheap when the virtual array is a span itself. If
1196  * not, a new array has to be allocated as a wrapper for the underlying virtual array. */
1197  MutableVArraySpan(VMutableArray<T> varray, const bool copy_values_to_span = true)
1198  : MutableSpan<T>(), varray_(std::move(varray))
1199  {
1200  if (!varray_) {
1201  return;
1202  }
1203 
1204  this->size_ = varray_.size();
1205  const CommonVArrayInfo info = varray_.common_info();
1206  if (info.type == CommonVArrayInfo::Type::Span) {
1207  this->data_ = const_cast<T *>(static_cast<const T *>(info.data));
1208  }
1209  else {
1210  if (copy_values_to_span) {
1211  owned_data_.~Array();
1212  new (&owned_data_) Array<T>(varray_.size(), NoInitialization{});
1213  varray_.materialize_to_uninitialized(owned_data_);
1214  }
1215  else {
1216  owned_data_.reinitialize(varray_.size());
1217  }
1218  this->data_ = owned_data_.data();
1219  }
1220  }
1221 
1223  : varray_(std::move(other.varray_)),
1224  owned_data_(std::move(other.owned_data_)),
1225  show_not_saved_warning_(other.show_not_saved_warning_)
1226  {
1227  if (!varray_) {
1228  return;
1229  }
1230 
1231  this->size_ = varray_.size();
1232  const CommonVArrayInfo info = varray_.common_info();
1233  if (info.type == CommonVArrayInfo::Type::Span) {
1234  this->data_ = static_cast<T *>(const_cast<void *>(info.data));
1235  }
1236  else {
1237  this->data_ = owned_data_.data();
1238  }
1239  other.data_ = nullptr;
1240  other.size_ = 0;
1241  }
1242 
1244  {
1245  if (varray_) {
1246  if (show_not_saved_warning_) {
1247  if (!save_has_been_called_) {
1248  std::cout << "Warning: Call `save()` to make sure that changes persist in all cases.\n";
1249  }
1250  }
1251  }
1252  }
1253 
1255  {
1256  if (this == &other) {
1257  return *this;
1258  }
1259  std::destroy_at(this);
1260  new (this) MutableVArraySpan(std::move(other));
1261  return *this;
1262  }
1263 
1264  const VMutableArray<T> &varray() const
1265  {
1266  return varray_;
1267  }
1268 
1269  /* Write back all values from a temporary allocated array to the underlying virtual array. */
1270  void save()
1271  {
1272  save_has_been_called_ = true;
1273  if (this->data_ != owned_data_.data()) {
1274  return;
1275  }
1276  varray_.set_all(owned_data_);
1277  }
1278 
1280  {
1281  show_not_saved_warning_ = false;
1282  }
1283 };
1284 
1285 template<typename T> class SingleAsSpan {
1286  private:
1287  T value_;
1288  int64_t size_;
1289 
1290  public:
1291  SingleAsSpan(T value, int64_t size) : value_(std::move(value)), size_(size)
1292  {
1293  BLI_assert(size_ >= 0);
1294  }
1295 
1296  SingleAsSpan(const VArray<T> &varray) : SingleAsSpan(varray.get_internal_single(), varray.size())
1297  {
1298  }
1299 
1300  const T &operator[](const int64_t index) const
1301  {
1302  BLI_assert(index >= 0);
1303  BLI_assert(index < size_);
1304  UNUSED_VARS_NDEBUG(index);
1305  return value_;
1306  }
1307 };
1308 
1309 } // namespace blender
#define BLI_assert_unreachable()
Definition: BLI_assert.h:93
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define final(a, b, c)
Definition: BLI_hash.h:21
#define UNUSED_VARS_NDEBUG(...)
#define UNUSED(x)
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
bool has_value() const
Definition: BLI_any.hh:249
const RealExtraInfo & extra_info() const
Definition: BLI_any.hh:330
void * get()
Definition: BLI_any.hh:288
const T * data() const
Definition: BLI_array.hh:300
void reinitialize(const int64_t new_size)
Definition: BLI_array.hh:387
constexpr int64_t size() const
Definition: BLI_span.hh:511
constexpr void fill(const T &value)
Definition: BLI_span.hh:527
constexpr T * data() const
Definition: BLI_span.hh:548
MutableVArraySpan & operator=(MutableVArraySpan &&other)
MutableVArraySpan(MutableVArraySpan &&other)
MutableVArraySpan(VMutableArray< T > varray, const bool copy_values_to_span=true)
const VMutableArray< T > & varray() const
SingleAsSpan(const VArray< T > &varray)
const T & operator[](const int64_t index) const
SingleAsSpan(T value, int64_t size)
constexpr const T * data() const
Definition: BLI_span.hh:203
constexpr int64_t size() const
Definition: BLI_span.hh:240
IndexRange index_range() const
void materialize(MutableSpan< T > r_span) const
T operator[](const int64_t index) const
void materialize(IndexMask mask, MutableSpan< T > r_span) const
VArrayCommon(const VArrayImpl< T > *impl)
void copy_from(const VArrayCommon &other)
void materialize_compressed(IndexMask mask, MutableSpan< T > r_span) const
VArrayCommon(const VArrayCommon &other)
const VArrayImpl< T > * impl_
VArrayCommon(VArrayCommon &&other) noexcept
void move_from(VArrayCommon &&other) noexcept
T get(const int64_t index) const
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan< T > r_span) const
bool is_same(const VArrayCommon< T > &other) const
bool try_assign_GVArray(GVArray &varray) const
VArrayCommon(std::shared_ptr< const VArrayImpl< T >> impl)
void materialize_to_uninitialized(MutableSpan< T > r_span) const
CommonVArrayInfo common_info() const
void materialize_to_uninitialized(IndexMask mask, MutableSpan< T > r_span) const
Span< T > get_internal_span() const
void emplace(Args &&...args)
const VArrayImpl< T > * impl_from_storage() const
VArrayImpl_For_ArrayContainer(Container container)
VArrayImpl_For_DerivedSpan(const MutableSpan< StructT > data)
VArrayImpl_For_Func(const int64_t size, GetFunc get_func)
VArrayImpl_For_Single(T value, const int64_t size)
CommonVArrayInfo common_info() const override
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan< T > r_span) const override
void materialize_compressed(IndexMask mask, MutableSpan< T > r_span) const override
T get(const int64_t UNUSED(index)) const override
CommonVArrayInfo common_info() const override
void materialize_compressed(IndexMask mask, MutableSpan< T > r_span) const override
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan< T > r_span) const override
void set(const int64_t index, T value) final
bool is_same(const VArrayImpl< T > &other) const final
VArrayImpl_For_Span(const int64_t size)
T get(const int64_t index) const final
VArrayImpl_For_Span(const MutableSpan< T > data)
virtual bool is_same(const VArrayImpl< T > &UNUSED(other)) const
virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan< T > r_span) const
virtual bool try_assign_GVArray(GVArray &UNUSED(varray)) const
virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan< T > r_span) const
virtual CommonVArrayInfo common_info() const
int64_t size() const
virtual void materialize_compressed(IndexMask mask, MutableSpan< T > r_span) const
virtual void materialize(IndexMask mask, MutableSpan< T > r_span) const
virtual T get(int64_t index) const =0
VArrayImpl(const int64_t size)
virtual ~VArrayImpl()=default
VArraySpan(VArraySpan &&other)
VArraySpan(VArray< T > varray)
VArraySpan & operator=(VArraySpan &&other)
VArray(const VArrayImpl< T > *impl)
VArray & operator=(VArray &&other) noexcept
VArray(const VArray &other)=default
VArray(varray_tag::single, T value, const int64_t size)
static VArray ForDerivedSpan(Span< StructT > values)
VArray(std::shared_ptr< const VArrayImpl< T >> impl)
VArray(VArray &&other) noexcept=default
VArray & operator=(const VArray &other)
static VArray ForContainer(ContainerT container)
VArray(varray_tag::span, Span< T > span)
static VArray ForSingle(T value, const int64_t size)
static VArray For(Args &&...args)
static VArray ForSpan(Span< T > values)
static VArray ForFunc(const int64_t size, GetFunc get_func)
VArray()=default
virtual void set(int64_t index, T value)=0
virtual void set_all(Span< T > src)
virtual bool try_assign_GVMutableArray(GVMutableArray &UNUSED(varray)) const
void set(const int64_t index, T value)
VMutableArray & operator=(VMutableArray &&other) noexcept
VMutableArray(std::shared_ptr< const VMutableArrayImpl< T >> impl)
static VMutableArray ForDerivedSpan(MutableSpan< StructT > values)
VMutableArray & operator=(const VMutableArray &other)
static VMutableArray ForSpan(MutableSpan< T > values)
static VMutableArray For(Args &&...args)
bool try_assign_GVMutableArray(GVMutableArray &varray) const
MutableSpan< T > get_internal_span() const
VMutableArray(VMutableArray &&other) noexcept=default
VMutableArray(const VMutableArray &other)=default
VMutableArray(const VMutableArrayImpl< T > *impl)
void set_all(Span< T > src)
SyclQueue void void * src
T * data_
Definition: eval_output.h:163
ccl_global float * buffer
ccl_device_inline float4 mask(const int4 &mask, const float4 &a)
Definition: math_float4.h:513
#define T
constexpr bool is_same_any_v
static constexpr bool is_VArray_v
static constexpr bool is_VMutableArray_v
void uninitialized_fill_n(T *dst, int64_t n, const T &value)
void initialized_copy_n(const T *src, int64_t n, T *dst)
constexpr bool is_trivial_extended_v
static IOCIOImpl * impl
Definition: ocio_capi.cc:8
__int64 int64_t
Definition: stdint.h:89
unsigned char uint8_t
Definition: stdint.h:78
CommonVArrayInfo(const Type _type, const bool _may_have_ownership, const void *_data)
static constexpr VArrayAnyExtraInfo get()
PointerRNA * ptr
Definition: wm_files.c:3480