Eigen-unsupported  3.4.90 (git rev 67eeba6e720c5745abc77ae6c92ce0a44aa7b7ae)
Tensor.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_H
13 
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
65 template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
66 class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
67 {
68  public:
71  typedef typename Eigen::internal::nested<Self>::type Nested;
72  typedef typename internal::traits<Self>::StorageKind StorageKind;
73  typedef typename internal::traits<Self>::Index Index;
74  typedef Scalar_ Scalar;
75  typedef typename NumTraits<Scalar>::Real RealScalar;
76  typedef typename Base::CoeffReturnType CoeffReturnType;
77 
78  enum {
79  IsAligned = (EIGEN_MAX_ALIGN_BYTES>0) && !(Options_&DontAlign),
80  CoordAccess = true,
81  RawAccess = true
82  };
83 
84  static constexpr int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
85  static constexpr int Options = Options_;
86  static constexpr int NumIndices = NumIndices_;
87  typedef DSizes<Index, NumIndices_> Dimensions;
88 
89  protected:
90  TensorStorage<Scalar, Dimensions, Options> m_storage;
91 
92  template<typename CustomIndices>
93  struct isOfNormalIndex{
94  static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
95  static const bool is_int = NumTraits<CustomIndices>::IsInteger;
96  static const bool value = is_array | is_int;
97  };
98 
99  public:
100  // Metadata
101  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
102  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
103  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
104  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
105  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
106  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
107 
108  // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
109  // work, because that uses base().coeffRef() - and we don't yet
110  // implement a similar class hierarchy
111  inline Self& base() { return *this; }
112  inline const Self& base() const { return *this; }
113 
114  template<typename... IndexTypes>
115  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
116  {
117  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
118  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
119  return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
120  }
121 
122  // normal indices
123  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
124  {
125  eigen_internal_assert(checkIndexRange(indices));
126  return m_storage.data()[linearizedIndex(indices)];
127  }
128 
129  // custom indices
130  template<typename CustomIndices,
131  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
132  >
133  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const
134  {
135  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
136  }
137 
138  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
139  {
140  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
141  return m_storage.data()[0];
142  }
143 
144  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
145  {
146  eigen_internal_assert(index >= 0 && index < size());
147  return m_storage.data()[index];
148  }
149 
150  template<typename... IndexTypes>
151  inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
152  {
153  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
154  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
155  return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
156  }
157 
158  // normal indices
159  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
160  {
161  eigen_internal_assert(checkIndexRange(indices));
162  return m_storage.data()[linearizedIndex(indices)];
163  }
164 
165  // custom indices
166  template<typename CustomIndices,
167  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
168  >
169  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices)
170  {
171  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
172  }
173 
174  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
175  {
176  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
177  return m_storage.data()[0];
178  }
179 
180  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
181  {
182  eigen_internal_assert(index >= 0 && index < size());
183  return m_storage.data()[index];
184  }
185 
186  template<typename... IndexTypes>
187  inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
188  {
189  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
190  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
191  return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
192  }
193 
194  // custom indices
195  template<typename CustomIndices,
196  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
197  >
198  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const
199  {
200  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
201  }
202 
203  // normal indices
204  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
205  {
206  return coeff(indices);
207  }
208 
209  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
210  {
211  eigen_internal_assert(index >= 0 && index < size());
212  return coeff(index);
213  }
214 
215  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const
216  {
217  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
218  return coeff();
219  }
220 
221  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
222  {
223  // The bracket operator is only for vectors, use the parenthesis operator instead.
224  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
225  return coeff(index);
226  }
227 
228  template<typename... IndexTypes>
229  inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
230  {
231  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
232  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
233  return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
234  }
235 
236  // normal indices
237  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
238  {
239  return coeffRef(indices);
240  }
241 
242  // custom indices
243  template<typename CustomIndices,
244  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
245  >
246  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices)
247  {
248  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
249  }
250 
251  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
252  {
253  eigen_assert(index >= 0 && index < size());
254  return coeffRef(index);
255  }
256 
257  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()()
258  {
259  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
260  return coeffRef();
261  }
262 
263  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
264  {
265  // The bracket operator is only for vectors, use the parenthesis operator instead
266  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
267  return coeffRef(index);
268  }
269 
270  EIGEN_DEVICE_FUNC
271  EIGEN_STRONG_INLINE Tensor()
272  : m_storage()
273  {
274  }
275 
276  EIGEN_DEVICE_FUNC
277  EIGEN_STRONG_INLINE Tensor(const Self& other)
278  : Base(other), m_storage(other.m_storage)
279  {
280  }
281 
282  template<typename... IndexTypes>
283  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
284  : m_storage(firstDimension, otherDimensions...)
285  {
286  // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
287  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
288  }
289 
291  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
292  : m_storage(internal::array_prod(dimensions), dimensions)
293  {
294  EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
295  }
296 
297  template<typename OtherDerived>
298  EIGEN_DEVICE_FUNC
299  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
300  {
301  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
302  Assign assign(*this, other.derived());
303  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
304  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
305  }
306 
307  template<typename OtherDerived>
308  EIGEN_DEVICE_FUNC
309  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
310  {
311  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
312  Assign assign(*this, other.derived());
313  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
314  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
315  }
316 
317  EIGEN_DEVICE_FUNC
318  EIGEN_STRONG_INLINE Tensor(Self&& other)
319  : m_storage(std::move(other.m_storage))
320  {
321  }
322  EIGEN_DEVICE_FUNC
323  EIGEN_STRONG_INLINE Tensor& operator=(Self&& other)
324  {
325  m_storage = std::move(other.m_storage);
326  return *this;
327  }
328 
329  EIGEN_DEVICE_FUNC
330  EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
331  {
332  typedef TensorAssignOp<Tensor, const Tensor> Assign;
333  Assign assign(*this, other);
334  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
335  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
336  return *this;
337  }
338  template<typename OtherDerived>
339  EIGEN_DEVICE_FUNC
340  EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
341  {
342  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
343  Assign assign(*this, other);
344  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
345  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
346  return *this;
347  }
348 
349  template<typename... IndexTypes> EIGEN_DEVICE_FUNC
350  void resize(Index firstDimension, IndexTypes... otherDimensions)
351  {
352  // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
353  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
354  resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
355  }
356 
358  EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
359  {
360  int i;
361  Index size = Index(1);
362  for (i = 0; i < NumIndices; i++) {
363  internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
364  size *= dimensions[i];
365  }
366  #ifdef EIGEN_INITIALIZE_COEFFS
367  bool size_changed = size != this->size();
368  m_storage.resize(size, dimensions);
369  if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
370  #else
371  m_storage.resize(size, dimensions);
372  #endif
373  }
374 
375  // Why this overload, DSizes is derived from array ??? //
376  EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) {
377  array<Index, NumIndices> dims;
378  for (int i = 0; i < NumIndices; ++i) {
379  dims[i] = dimensions[i];
380  }
381  resize(dims);
382  }
383 
384  EIGEN_DEVICE_FUNC
385  void resize()
386  {
387  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
388  // Nothing to do: rank 0 tensors have fixed size
389  }
390 
391  template <typename FirstType, typename... OtherTypes>
392  EIGEN_DEVICE_FUNC
393  void resize(const Eigen::IndexList<FirstType, OtherTypes...>& dimensions) {
394  array<Index, NumIndices> dims;
395  for (int i = 0; i < NumIndices; ++i) {
396  dims[i] = static_cast<Index>(dimensions[i]);
397  }
398  resize(dims);
399  }
400 
402  template<typename CustomDimension,
403  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) )
404  >
405  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions)
406  {
407  resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
408  }
409 
410 #ifndef EIGEN_EMULATE_CXX11_META_H
411  template <typename std::ptrdiff_t... Indices>
412  EIGEN_DEVICE_FUNC
413  void resize(const Sizes<Indices...>& dimensions) {
414  array<Index, NumIndices> dims;
415  for (int i = 0; i < NumIndices; ++i) {
416  dims[i] = static_cast<Index>(dimensions[i]);
417  }
418  resize(dims);
419  }
420 #else
421  template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
422  EIGEN_DEVICE_FUNC
423  void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) {
424  array<Index, NumIndices> dims;
425  for (int i = 0; i < NumIndices; ++i) {
426  dims[i] = static_cast<Index>(dimensions[i]);
427  }
428  resize(dims);
429  }
430 #endif
431 
432  #ifdef EIGEN_TENSOR_PLUGIN
433  #include EIGEN_TENSOR_PLUGIN
434  #endif
435 
436  protected:
437 
438  bool checkIndexRange(const array<Index, NumIndices>& indices) const
439  {
440  using internal::array_apply_and_reduce;
441  using internal::array_zip_and_reduce;
442  using internal::greater_equal_zero_op;
443  using internal::logical_and_op;
444  using internal::lesser_op;
445 
446  return
447  // check whether the indices are all >= 0
448  array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
449  // check whether the indices fit in the dimensions
450  array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
451  }
452 
453  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
454  {
455  if (Options&RowMajor) {
456  return m_storage.dimensions().IndexOfRowMajor(indices);
457  } else {
458  return m_storage.dimensions().IndexOfColMajor(indices);
459  }
460  }
461 };
462 
463 } // end namespace Eigen
464 
465 #endif // EIGEN_CXX11_TENSOR_TENSOR_H
The tensor base class.
Definition: TensorForwardDeclarations.h:58
The tensor class.
Definition: Tensor.h:67
Tensor(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:291
void resize(CustomDimension &dimensions)
Definition: Tensor.h:405
void resize(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:358
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:31