TiledArray  0.7.0
range.h
Go to the documentation of this file.
1 /*
2  * This file is a part of TiledArray.
3  * Copyright (C) 2013 Virginia Tech
4  *
5  * This program is free software: you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation, either version 3 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  */
19 
20 #ifndef TILEDARRAY_RANGE_H__INCLUDED
21 #define TILEDARRAY_RANGE_H__INCLUDED
22 
24 #include <TiledArray/permutation.h>
25 #include <TiledArray/size_array.h>
26 
27 namespace TiledArray {
28 
30 
39  class Range {
40  public:
41  typedef Range Range_;
42  typedef std::size_t size_type;
43  typedef std::vector<size_type> index;
44  typedef index index_type;
47  typedef std::size_t ordinal_type;
50 
51  protected:
52 
53  size_type* data_ = nullptr;
64  unsigned int rank_ = 0u;
65 
66  private:
67 
69 
77  template <typename Index>
78  void init_range_data(const Index& lower_bound, const Index& upper_bound) {
79  // Construct temp pointers
80  size_type* MADNESS_RESTRICT const lower = data_;
81  size_type* MADNESS_RESTRICT const upper = lower + rank_;
82  size_type* MADNESS_RESTRICT const extent = upper + rank_;
83  size_type* MADNESS_RESTRICT const stride = extent + rank_;
84  const auto* MADNESS_RESTRICT const lower_data = detail::data(lower_bound);
85  const auto* MADNESS_RESTRICT const upper_data = detail::data(upper_bound);
86 
87  // Set the volume seed
88  volume_ = 1ul;
89  offset_ = 0ul;
90 
91  // Compute range data
92  for(int i = int(rank_) - 1; i >= 0; --i) {
93  // Check input dimensions
94  TA_ASSERT(lower_data[i] >= 0);
95  TA_ASSERT(lower_data[i] < upper_data[i]);
96 
97  // Compute data for element i of lower, upper, and extent
98  const size_type lower_bound_i = lower_data[i];
99  const size_type upper_bound_i = upper_data[i];
100  const size_type extent_i = upper_bound_i - lower_bound_i;
101 
102  lower[i] = lower_bound_i;
103  upper[i] = upper_bound_i;
104  extent[i] = extent_i;
105  stride[i] = volume_;
106  offset_ += lower_bound_i * volume_;
107  volume_ *= extent_i;
108  }
109  }
110 
112 
120  template <typename Index,
121  typename std::enable_if<detail::is_pair<
122  typename Index::value_type>::value>::type* = nullptr>
123  void init_range_data(const Index& bound) {
124  // Construct temp pointers
125  size_type* MADNESS_RESTRICT const lower = data_;
126  size_type* MADNESS_RESTRICT const upper = lower + rank_;
127  size_type* MADNESS_RESTRICT const extent = upper + rank_;
128  size_type* MADNESS_RESTRICT const stride = extent + rank_;
129  const auto* MADNESS_RESTRICT const bound_data = detail::data(bound);
130 
131  // Set the volume seed
132  volume_ = 1ul;
133  offset_ = 0ul;
134 
135  // Compute range data
136  for(int i = int(rank_) - 1; i >= 0; --i) {
137  // Compute data for element i of lower, upper, and extent
138  const size_type lower_bound_i = bound_data[i].first;
139  const size_type upper_bound_i = bound_data[i].second;
140  const size_type extent_i = upper_bound_i - lower_bound_i;
141 
142  // Check input dimensions
143  TA_ASSERT(lower_bound_i >= 0ul);
144  TA_ASSERT(lower_bound_i < upper_bound_i);
145 
146  lower[i] = lower_bound_i;
147  upper[i] = upper_bound_i;
148  extent[i] = extent_i;
149  stride[i] = volume_;
150  offset_ += lower_bound_i * volume_;
151  volume_ *= extent_i;
152  }
153  }
154 
156 
163  template <typename Index,
164  typename std::enable_if<detail::is_integral_list<
165  typename Index::value_type>::value>::type* = nullptr>
166  void init_range_data(const Index& extents) {
167  // Construct temp pointers
168  size_type* MADNESS_RESTRICT const lower = data_;
169  size_type* MADNESS_RESTRICT const upper = lower + rank_;
170  size_type* MADNESS_RESTRICT const extent = upper + rank_;
171  size_type* MADNESS_RESTRICT const stride = extent + rank_;
172  const auto* MADNESS_RESTRICT const extent_data = detail::data(extents);
173 
174  // Set the offset and volume initial values
175  volume_ = 1ul;
176  offset_ = 0ul;
177 
178  // Compute range data
179  for(int i = int(rank_) - 1; i >= 0; --i) {
180  // Check bounds of the input extent
181  TA_ASSERT(extent_data[i] > 0);
182 
183  // Get extent i
184  const size_type extent_i = extent_data[i];
185 
186  lower[i] = 0ul;
187  upper[i] = extent_i;
188  extent[i] = extent_i;
189  stride[i] = volume_;
190  volume_ *= extent_i;
191  }
192  }
193 
195 
202  template <typename ... Indices,
203  typename std::enable_if<detail::is_integral_list<Indices...>::value>::type* = nullptr>
204  void init_range_data(const std::tuple<Indices...>& extents) {
205  const constexpr std::size_t rank = std::tuple_size<std::tuple<Indices...>>::value;
206  TA_ASSERT(rank_ == rank);
207 
208  // Set the offset and volume initial values
209  volume_ = 1ul;
210  offset_ = 0ul;
211 
212  // initialize by recursion
213  init_range_data_helper(extents, std::make_index_sequence<rank>{});
214  }
215 
216  template <typename ... Indices, std::size_t ... Is>
217  void init_range_data_helper(const std::tuple<Indices...>& extents, std::index_sequence<Is...>) {
218  int workers[] = {0, (init_range_data_helper_iter<Is>(extents),0)...};
219  ++workers[0];
220  }
221 
222  template <std::size_t I, typename ... Indices>
223  void init_range_data_helper_iter(const std::tuple<Indices...>& extents) {
224  // Check bounds of the input extent
225  TA_ASSERT(std::get<I>(extents) > 0ul);
226 
227  // Get extent i
228  const size_type extent_i = std::get<I>(extents);
229 
230  size_type* MADNESS_RESTRICT const lower = data_;
231  size_type* MADNESS_RESTRICT const upper = lower + rank_;
232  size_type* MADNESS_RESTRICT const extent = upper + rank_;
233  size_type* MADNESS_RESTRICT const stride = extent + rank_;
234 
235  lower[I] = 0ul;
236  upper[I] = extent_i;
237  extent[I] = extent_i;
238  stride[I] = volume_;
239  volume_ *= extent_i;
240  }
241 
243 
251  void init_range_data(const Permutation& perm,
252  const size_type* MADNESS_RESTRICT const other_lower_bound,
253  const size_type* MADNESS_RESTRICT const other_upper_bound)
254  {
255  // Create temporary pointers to this range data
256  auto* MADNESS_RESTRICT const lower = data_;
257  auto* MADNESS_RESTRICT const upper = lower + rank_;
258  auto* MADNESS_RESTRICT const extent = upper + rank_;
259  auto* MADNESS_RESTRICT const stride = extent + rank_;
260 
261  // Copy the permuted lower, upper, and extent into this range.
262  for(unsigned int i = 0u; i < rank_; ++i) {
263  const auto perm_i = perm[i];
264 
265  // Get the lower bound, upper bound, and extent from other for rank i.
266  const auto other_lower_bound_i = other_lower_bound[i];
267  const auto other_upper_bound_i = other_upper_bound[i];
268  const auto other_extent_i = other_upper_bound_i - other_lower_bound_i;
269 
270  // Store the permuted lower bound, upper bound, and extent
271  lower[perm_i] = other_lower_bound_i;
272  upper[perm_i] = other_upper_bound_i;
273  extent[perm_i] = other_extent_i;
274  }
275 
276  // Recompute stride, offset, and volume
277  volume_ = 1ul;
278  offset_ = 0ul;
279  for(int i = int(rank_) - 1; i >= 0; --i) {
280  const auto lower_i = lower[i];
281  const auto extent_i = extent[i];
282  stride[i] = volume_;
283  offset_ += lower_i * volume_;
284  volume_ *= extent_i;
285  }
286  }
287 
288  public:
289 
291 
293  Range() { }
294 
296 
305  template <typename Index,
306  typename std::enable_if<! std::is_integral<Index>::value>::type* = nullptr>
307  Range(const Index& lower_bound, const Index& upper_bound) {
308  const size_type n = detail::size(lower_bound);
309  TA_ASSERT(n == detail::size(upper_bound));
310  if(n) {
311  // Initialize array memory
312  data_ = new size_type[n << 2];
313  rank_ = n;
314  init_range_data(lower_bound, upper_bound);
315  }
316  }
317 
319 
327  template <typename Index1,
328  typename std::enable_if<std::is_integral<Index1>::value>::type* = nullptr>
329  Range(const std::initializer_list<Index1>& lower_bound,
330  const std::initializer_list<Index1>& upper_bound)
331  {
332  const size_type n = detail::size(lower_bound);
333  TA_ASSERT(n == detail::size(upper_bound));
334  if(n) {
335  // Initialize array memory
336  data_ = new size_type[n << 2];
337  rank_ = n;
338  init_range_data(lower_bound, upper_bound);
339  }
340  }
341 
343 
349  template <typename Index,
350  typename std::enable_if<! std::is_integral<Index>::value &&
351  std::is_integral<typename Index::value_type>::value>::type* = nullptr>
352  explicit Range(const Index& extent) {
353  const size_type n = detail::size(extent);
354  if(n) {
355  // Initialize array memory
356  data_ = new size_type[n << 2];
357  rank_ = n;
358  init_range_data(extent);
359  }
360  }
361 
363 
368  template <typename Index1,
369  typename std::enable_if<std::is_integral<Index1>::value>::type* = nullptr>
370  explicit Range(const std::initializer_list<Index1>& extent) {
371  const size_type n = detail::size(extent);
372  if(n) {
373  // Initialize array memory
374  data_ = new size_type[n << 2];
375  rank_ = n;
376  init_range_data(extent);
377  }
378  }
379 
381 
389  template <typename Index,
390  typename std::enable_if<! std::is_integral<Index>::value &&
392  Range(const Index& bounds) {
393  const size_type n = detail::size(bounds);
394  if(n) {
395  // Initialize array memory
396  data_ = new size_type[n << 2];
397  rank_ = n;
398  init_range_data(bounds);
399  }
400  }
401 
403 
411  template <typename Index1, typename Index2>
412  Range(const std::initializer_list<std::pair<Index1,Index2>>& bounds) {
413  const size_type n = detail::size(bounds);
414  if(n) {
415  // Initialize array memory
416  data_ = new size_type[n << 2];
417  rank_ = n;
418  init_range_data(bounds);
419  }
420  }
421 
423 
428  template<typename... Index,
429  typename std::enable_if<detail::is_integral_list<Index...>::value>::type* = nullptr>
430  explicit Range(const Index... extents) :
431  Range(std::array<size_t, sizeof...(Index)>{{static_cast<std::size_t>(extents)...}})
432  { }
433 
435 
439  template <typename ... IndexPairs,
440  typename std::enable_if<detail::is_integral_pair_list<IndexPairs...>::value>::type* = nullptr
441  >
442  explicit Range(const IndexPairs... bounds) :
443  Range(std::array<std::pair<std::size_t,std::size_t>, sizeof...(IndexPairs)>{{static_cast<std::pair<std::size_t,std::size_t>>(bounds)...}})
444  { }
445 
446 
448 
451  Range(const Range_& other) {
452  if(other.rank_ > 0ul) {
453  data_ = new size_type[other.rank_ << 2];
454  offset_ = other.offset_;
455  volume_ = other.volume_;
456  rank_ = other.rank_;
457  memcpy(data_, other.data_, (sizeof(size_type) << 2) * other.rank_);
458  }
459  }
460 
462 
465  Range(Range_&& other) :
466  data_(other.data_), offset_(other.offset_), volume_(other.volume_),
467  rank_(other.rank_)
468  {
469  other.data_ = nullptr;
470  other.offset_ = 0ul;
471  other.volume_ = 0ul;
472  other.rank_ = 0u;
473  }
474 
476 
480  Range(const Permutation& perm, const Range_& other) {
481  TA_ASSERT(perm.dim() == other.rank_);
482 
483  if(other.rank_ > 0ul) {
484  data_ = new size_type[other.rank_ << 2];
485  rank_ = other.rank_;
486 
487  if(perm) {
488  init_range_data(perm, other.data_, other.data_ + rank_);
489  } else {
490  // Simple copy will due.
491  memcpy(data_, other.data_, (sizeof(size_type) << 2) * rank_);
492  offset_ = other.offset_;
493  volume_ = other.volume_;
494  }
495  }
496  }
497 
499  ~Range() { delete [] data_; }
500 
502 
506  Range_& operator=(const Range_& other) {
507  if(rank_ != other.rank_) {
508  delete [] data_;
509  data_ = (other.rank_ > 0ul ? new size_type[other.rank_ << 2] : nullptr);
510  rank_ = other.rank_;
511  }
512  memcpy(data_, other.data_, (sizeof(size_type) << 2) * rank_);
513  offset_ = other.offset_;
514  volume_ = other.volume_;
515 
516  return *this;
517  }
518 
520 
524  Range_& operator=(Range_&& other) {
525  data_ = other.data_;
526  offset_ = other.offset_;
527  volume_ = other.volume_;
528  rank_ = other.rank_;
529 
530  other.data_ = nullptr;
531  other.offset_ = 0ul;
532  other.volume_ = 0ul;
533  other.rank_ = 0u;
534 
535  return *this;
536  }
537 
539 
542  unsigned int rank() const { return rank_; }
543 
545 
548  const size_type* lobound_data() const { return data_; }
549 
551 
556 
558 
561  size_type lobound(size_t dim) const {
562  TA_ASSERT(dim < rank_);
563  return *(lobound_data() + dim);
564  }
565 
567 
570  const size_type* upbound_data() const { return data_ + rank_; }
571 
573 
577  size_array upbound() const {
578  return size_array(upbound_data(), rank_);
579  }
580 
582 
585  size_type upbound(size_t dim) const {
586  TA_ASSERT(dim < rank_);
587  return *(upbound_data() + dim);
588  }
589 
591 
594  const size_type* extent_data() const { return data_ + (rank_ + rank_); }
595 
597 
601  extent_type extent() const {
602  return size_array(extent_data(), rank_);
603  }
604 
606 
609  size_type extent(size_t dim) const {
610  TA_ASSERT(dim < rank_);
611  return *(extent_data() + dim);
612  }
613 
615 
618  const size_type* stride_data() const { return data_ + (rank_ + rank_ + rank_); }
619 
621 
625  size_array stride() const {
626  return size_array(stride_data(), rank_);
627  }
628 
630 
633  size_type stride(size_t dim) const {
634  TA_ASSERT(dim < rank_);
635  return *(stride_data() + dim);
636  }
637 
639 
642  ordinal_type volume() const { return volume_; }
643 
647  ordinal_type area() const { return volume_; }
648 
650 
654  ordinal_type offset() const { return offset_; }
655 
657 
662  const_iterator begin() const { return const_iterator(data_, this); }
663 
665 
670  const_iterator end() const { return const_iterator(data_ + rank_, this); }
671 
673 
680  template <typename Index,
681  typename std::enable_if<! std::is_integral<Index>::value, bool>::type* = nullptr>
682  bool includes(const Index& index) const {
684  const size_type* MADNESS_RESTRICT const lower = data_;
685  const size_type* MADNESS_RESTRICT const upper = lower + rank_;
686 
687  bool result = (rank_ > 0u);
688  auto it = std::begin(index); // TODO C++14 switch to std::cbegin
689  for(unsigned int i = 0u; result && (i < rank_); ++i, ++it) {
690  const size_type index_i = *it;
691  const size_type lower_i = lower[i];
692  const size_type upper_i = upper[i];
693  result = result && (index_i >= lower_i) && (index_i < upper_i);
694  }
695 
696  return result;
697  }
698 
700 
708  template <typename Integer>
709  bool includes(const std::initializer_list<Integer>& index) const {
710  return includes<std::initializer_list<Integer>>(index);
711  }
712 
713 
715 
719  template <typename Ordinal>
720  typename std::enable_if<std::is_integral<Ordinal>::value, bool>::type
721  includes(Ordinal i) const {
722  return include_ordinal_(i);
723  }
724 
725  template <typename... Index>
726  typename std::enable_if<(sizeof...(Index) > 1ul), size_type>::type
727  includes(const Index&... index) const {
728  const size_type i[sizeof...(Index)] = {static_cast<size_type>(index)...};
729  return includes(i);
730  }
731 
732 
733 
735 
741  Range_& operator *=(const Permutation& perm);
742 
744 
753  template <typename Index>
754  Range_& resize(const Index& lower_bound, const Index& upper_bound) {
755  const size_type n = detail::size(lower_bound);
756  TA_ASSERT(n == detail::size(upper_bound));
757 
758  // Reallocate memory for range arrays
759  if(rank_ != n) {
760  delete [] data_;
761  data_ = (n > 0ul ? new size_type[n << 2] : nullptr);
762  rank_ = n;
763  }
764  if(n > 0ul)
765  init_range_data(lower_bound, upper_bound);
766  else
767  volume_ = 0ul;
768 
769  return *this;
770  }
771 
773 
777  template <typename Index>
778  Range_& inplace_shift(const Index& bound_shift) {
779  TA_ASSERT(detail::size(bound_shift) == rank_);
780 
781  const auto* MADNESS_RESTRICT const bound_shift_data = detail::data(bound_shift);
782  size_type* MADNESS_RESTRICT const lower = data_;
783  size_type* MADNESS_RESTRICT const upper = data_ + rank_;
784  const size_type* MADNESS_RESTRICT const stride = upper + rank_ + rank_;
785 
786  offset_ = 0ul;
787  for(unsigned i = 0u; i < rank_; ++i) {
788  // Load range data
789  const auto bound_shift_i = bound_shift_data[i];
790  auto lower_i = lower[i];
791  auto upper_i = upper[i];
792  const auto stride_i = stride[i];
793 
794  // Compute new range bounds
795  lower_i += bound_shift_i;
796  upper_i += bound_shift_i;
797 
798  // Update range data
799  offset_ += lower_i * stride_i;
800  lower[i] = lower_i;
801  upper[i] = upper_i;
802  }
803 
804  return *this;
805  }
806 
808 
812  template <typename Index>
813  Range_ shift(const Index& bound_shift) {
814  Range_ result(*this);
815  result.inplace_shift(bound_shift);
816  return result;
817  }
818 
820 
828  return index;
829  }
830 
832 
838  template <typename Index,
839  typename std::enable_if<! std::is_integral<Index>::value>::type* = nullptr>
840  ordinal_type ordinal(const Index& index) const {
843 
844  size_type* MADNESS_RESTRICT const stride = data_ + rank_ + rank_ + rank_;
845 
846  size_type result = 0ul;
847  auto index_it = std::begin(index);
848  for(unsigned int i = 0u; i < rank_; ++i, ++index_it) {
849  const size_type stride_i = stride[i];
850  result += *(index_it) * stride_i;
851  }
852 
853  return result - offset_;
854  }
855 
857 
863  template <typename... Index,
864  typename std::enable_if<(sizeof...(Index) > 1ul)>::type* = nullptr>
865  size_type ordinal(const Index&... index) const {
866  const size_type temp_index[sizeof...(Index)] = { static_cast<size_type>(index)... };
867  return ordinal(temp_index);
868  }
869 
871 
878  // Check that index is contained by range.
880 
881  // Construct result coordinate index object and allocate its memory.
882  Range_::index result(rank_, 0);
883 
884  // Get pointers to the data
885  size_type * MADNESS_RESTRICT const result_data = result.data();
886  size_type const * MADNESS_RESTRICT const lower = data_;
887  size_type const * MADNESS_RESTRICT const size = data_ + rank_ + rank_;
888 
889  // Compute the coordinate index of index in range.
890  for(int i = int(rank_) - 1; i >= 0; --i) {
891  const size_type lower_i = lower[i];
892  const size_type size_i = size[i];
893 
894  // Compute result index element i
895  const size_type result_i = (index % size_i) + lower_i;
896  index /= size_i;
897 
898  // Store result
899  result_data[i] = result_i;
900  }
901 
902  return result;
903  }
904 
906 
911  template <typename Index,
912  typename std::enable_if<! std::is_integral<Index>::value>::type* = nullptr>
913  const Index& idx(const Index& i) const {
914  TA_ASSERT(includes(i));
915  return i;
916  }
917 
918  template <typename Archive,
919  typename std::enable_if<madness::archive::is_input_archive<Archive>::value>::type* = nullptr>
920  void serialize(const Archive& ar) {
921  // Get rank
922  unsigned int rank = 0ul;
923  ar & rank;
924 
925  // Reallocate the array
926  const unsigned int four_x_rank = rank << 2;
927  if(rank_ != rank) {
928  delete [] data_;
929  data_ = (rank > 0u ? new size_type[four_x_rank] : nullptr);
930  rank_ = rank;
931  }
932 
933  // Get range data
934  ar & madness::archive::wrap(data_, four_x_rank) & offset_ & volume_;
935  }
936 
937  template <typename Archive,
938  typename std::enable_if<madness::archive::is_output_archive<Archive>::value>::type* = nullptr>
939  void serialize(const Archive& ar) const {
940  ar & rank_ & madness::archive::wrap(data_, rank_ << 2) & offset_ & volume_;
941  }
942 
943  void swap(Range_& other) {
944  // Get temp data
945  std::swap(data_, other.data_);
946  std::swap(offset_, other.offset_);
947  std::swap(volume_, other.volume_);
948  std::swap(rank_, other.rank_);
949  }
950 
951  private:
952 
954 
959  template <typename Index>
960  typename std::enable_if<std::is_signed<Index>::value, bool>::type
961  include_ordinal_(Index i) const { return (i >= Index(0)) && (i < Index(volume_)); }
962 
964 
968  template <typename Index>
969  typename std::enable_if<! std::is_signed<Index>::value, bool>::type
970  include_ordinal_(Index i) const { return i < volume_; }
971 
973 
978  void increment(index& i) const {
979  TA_ASSERT(includes(i));
980 
981  size_type const * MADNESS_RESTRICT const lower = data_;
982  size_type const * MADNESS_RESTRICT const upper = data_ + rank_;
983 
984  for(int d = int(rank_) - 1; d >= 0; --d) {
985  // increment coordinate
986  ++i[d];
987 
988  // break if done
989  if(i[d] < upper[d])
990  return;
991 
992  // Reset current index to lower bound.
993  i[d] = lower[d];
994  }
995 
996  // if the current location was set to lower then it was at the end and
997  // needs to be reset to equal upper.
998  std::copy(upper, upper + rank_, i.begin());
999  }
1000 
1002 
1008  void advance(index& i, std::ptrdiff_t n) const {
1009  TA_ASSERT(includes(i));
1010  const size_type o = ordinal(i) + n;
1011  TA_ASSERT(includes(o));
1012  i = idx(o);
1013  }
1014 
1016 
1023  std::ptrdiff_t distance_to(const index& first, const index& last) const {
1024  TA_ASSERT(includes(first));
1025  TA_ASSERT(includes(last));
1026  return ordinal(last) - ordinal(first);
1027  }
1028 
1029  }; // class Range
1030 
1031  inline Range& Range::operator *=(const Permutation& perm) {
1032  TA_ASSERT(perm.dim() == rank_);
1033  if(rank_ > 1ul) {
1034  // Copy the lower and upper bound data into a temporary array
1035  size_type* MADNESS_RESTRICT const temp_lower = new size_type[rank_ << 1];
1036  const size_type* MADNESS_RESTRICT const temp_upper = temp_lower + rank_;
1037  std::memcpy(temp_lower, data_, (sizeof(size_type) << 1) * rank_);
1038 
1039  init_range_data(perm, temp_lower, temp_upper);
1040 
1041  // Cleanup old memory.
1042  delete[] temp_lower;
1043  }
1044  return *this;
1045  }
1046 
1048  inline void swap(Range& r0, Range& r1) { // no throw
1049  r0.swap(r1);
1050  }
1051 
1052 
1054 
1058  inline Range operator*(const Permutation& perm, const Range& r) {
1059  return Range(perm, r);
1060  }
1061 
1063 
1068  inline bool operator ==(const Range& r1, const Range& r2) {
1069  return (r1.rank() == r2.rank()) && !std::memcmp(r1.lobound_data(), r2.lobound_data(),
1070  r1.rank() * (2u * sizeof(Range::size_type)));
1071  }
1073 
1078  inline bool operator !=(const Range& r1, const Range& r2) {
1079  return (r1.rank() != r2.rank()) || std::memcmp(r1.lobound_data(), r2.lobound_data(),
1080  r1.rank() * (2u * sizeof(Range::size_type)));
1081  }
1082 
1084 
1088  inline std::ostream& operator<<(std::ostream& os, const Range& r) {
1089  os << "[ ";
1090  detail::print_array(os, r.lobound_data(), r.rank());
1091  os << ", ";
1092  detail::print_array(os, r.upbound_data(), r.rank());
1093  os << " )";
1094  return os;
1095  }
1096 
1097 } // namespace TiledArray
1098 #endif // TILEDARRAY_RANGE_H__INCLUDED
std::vector< size_type > index
Coordinate index type.
Definition: range.h:43
size_type volume_
Total number of elements.
Definition: range.h:63
constexpr bool operator==(const DenseShape &a, const DenseShape &b)
Definition: dense_shape.h:178
A (hyperrectangular) interval on , space of integer n-indices.
Definition: range.h:39
const size_type * extent_data() const
Range extent data accessor.
Definition: range.h:594
void swap(Range &r0, Range &r1)
Exchange the values of the give two ranges.
Definition: range.h:1048
size_array lobound() const
Range lower bound accessor.
Definition: range.h:555
auto data(T &t)
Container data pointer accessor.
Definition: utility.h:89
ordinal_type ordinal(const Index &index) const
calculate the ordinal index of index
Definition: range.h:840
Range_ shift(const Index &bound_shift)
Shift the lower and upper bound of this range.
Definition: range.h:813
std::size_t ordinal_type
Ordinal type, to conform Tensor Working Group spec.
Definition: range.h:47
size_type ordinal(const Index &... index) const
calculate the ordinal index of index
Definition: range.h:865
Range(const Permutation &perm, const Range_ &other)
Permuting copy constructor.
Definition: range.h:480
size_array extent_type
Range extent type, to conform Tensor Working Group spec.
Definition: range.h:46
size_type stride(size_t dim) const
Range stride element accessor.
Definition: range.h:633
void swap(Range_ &other)
Definition: range.h:943
Range_ & inplace_shift(const Index &bound_shift)
Shift the lower and upper bound of this range.
Definition: range.h:778
STL namespace.
Range(const Index &extent)
Range constructor from a sequence of extents.
Definition: range.h:352
constexpr bool operator!=(const DenseShape &a, const DenseShape &b)
Definition: dense_shape.h:179
std::array< T, N > operator*(const Permutation &, const std::array< T, N > &)
Permute a std::array.
Definition: permutation.h:484
unsigned int rank() const
Rank accessor.
Definition: range.h:542
const size_type * stride_data() const
Range stride data accessor.
Definition: range.h:618
size_t size(const DistArray< Tile, Policy > &a)
Definition: utils.h:49
const size_type * upbound_data() const
Range upper bound data accessor.
Definition: range.h:570
index_type dim() const
Domain size accessor.
Definition: permutation.h:206
Range(const std::initializer_list< Index1 > &extent)
Range constructor from an initializer list of extents.
Definition: range.h:370
ordinal_type volume() const
Range volume accessor.
Definition: range.h:642
std::enable_if<(sizeof...(Index) > 1ul), size_type >::type includes(const Index &... index) const
Definition: range.h:727
constexpr std::size_t size(T(&)[N])
Array size accessor.
Definition: utility.h:47
detail::RangeIterator< size_type, Range_ > const_iterator
Coordinate iterator.
Definition: range.h:48
bool includes(const std::initializer_list< Integer > &index) const
Check the coordinate to make sure it is within the range.
Definition: range.h:709
void print_array(std::ostream &out, const A &a, const std::size_t n)
Print the content of an array like object.
Definition: utility.h:153
std::enable_if< std::is_integral< Ordinal >::value, bool >::type includes(Ordinal i) const
Check the ordinal index to make sure it is within the range.
Definition: range.h:721
Range(const Index &lower_bound, const Index &upper_bound)
Construct range defined by upper and lower bound sequences.
Definition: range.h:307
Range_ & resize(const Index &lower_bound, const Index &upper_bound)
Resize range to a new upper and lower bound.
Definition: range.h:754
#define TA_ASSERT(a)
Definition: error.h:107
void serialize(const Archive &ar) const
Definition: range.h:939
size_type offset_
Ordinal index offset correction.
Definition: range.h:62
const_iterator end() const
Index iterator factory.
Definition: range.h:670
detail::SizeArray< const size_type > size_array
Size array type.
Definition: range.h:45
unsigned int rank_
The rank (or number of dimensions) in the range.
Definition: range.h:64
Range_ & operator=(const Range_ &other)
Copy assignment operator.
Definition: range.h:506
Range(Range_ &&other)
Copy Constructor.
Definition: range.h:465
DistArray< Tile, Policy > copy(const DistArray< Tile, Policy > &a)
Definition: utils.h:58
std::ostream & operator<<(std::ostream &os, const DistArray< Tile, Policy > &a)
Add the tensor to an output stream.
Definition: dist_array.h:853
size_type extent(size_t dim) const
Range extent element accessor.
Definition: range.h:609
size_array upbound() const
Range upper bound accessor.
Definition: range.h:577
bool includes(const Index &index) const
Check the coordinate to make sure it is within the range.
Definition: range.h:682
size_type upbound(size_t dim) const
Range upped bound element accessor.
Definition: range.h:585
Range_ & operator*=(const Permutation &perm)
Permute this range.
Definition: range.h:1031
std::size_t size_type
Size type.
Definition: range.h:42
Permutation of a sequence of objects indexed by base-0 indices.
Definition: permutation.h:119
const size_type * lobound_data() const
Range lower bound data accessor.
Definition: range.h:548
Range(const Index &bounds)
Construct range defined by a sequence of {lower,upper} bound pairs.
Definition: range.h:392
const_iterator begin() const
Index iterator factory.
Definition: range.h:662
size_type * data_
Definition: range.h:53
ordinal_type ordinal(const ordinal_type index) const
calculate the ordinal index of i
Definition: range.h:826
Range_ & operator=(Range_ &&other)
Move assignment operator.
Definition: range.h:524
index index_type
Coordinate index type, to conform Tensor Working Group spec.
Definition: range.h:44
void serialize(const Archive &ar)
Definition: range.h:920
const Index & idx(const Index &i) const
calculate the index of i
Definition: range.h:913
Range(const Index... extents)
Range constructor from a pack of extents for each dimension.
Definition: range.h:430
Range(const std::initializer_list< std::pair< Index1, Index2 >> &bounds)
Construct range defined by an initializer_list of {lower,upper} bound pairs.
Definition: range.h:412
Range Range_
This object type.
Definition: range.h:41
Range(const Range_ &other)
Copy Constructor.
Definition: range.h:451
Range(const std::initializer_list< Index1 > &lower_bound, const std::initializer_list< Index1 > &upper_bound)
Construct range defined by the upper and lower bound sequences.
Definition: range.h:329
extent_type extent() const
Range extent accessor.
Definition: range.h:601
size_type lobound(size_t dim) const
Range lower bound element accessor.
Definition: range.h:561
ordinal_type offset() const
Range offset.
Definition: range.h:654
Coordinate index iterate.
ordinal_type area() const
Definition: range.h:647
~Range()
Destructor.
Definition: range.h:499
index idx(size_type index) const
calculate the coordinate index of the ordinal index, index.
Definition: range.h:877
Range()
Default constructor.
Definition: range.h:293
size_array stride() const
Range stride accessor.
Definition: range.h:625
Range(const IndexPairs... bounds)
Range constructor from a pack of {lo,up} bounds for each dimension.
Definition: range.h:442