Eigen-unsupported  3.4.90 (git rev 67eeba6e720c5745abc77ae6c92ce0a44aa7b7ae)
TensorContraction.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
24 namespace internal {
25 
26 template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
27 struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >
28 {
29  // Type promotion to handle the case where the types of the lhs and the rhs are different.
30  typedef typename gebp_traits<std::remove_const_t<typename LhsXprType::Scalar>,
31  std::remove_const_t<typename RhsXprType::Scalar>>::ResScalar Scalar;
32 
33  typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
34  typename traits<RhsXprType>::StorageKind>::ret StorageKind;
35  typedef typename promote_index_type<typename traits<LhsXprType>::Index,
36  typename traits<RhsXprType>::Index>::type Index;
37  typedef typename LhsXprType::Nested LhsNested;
38  typedef typename RhsXprType::Nested RhsNested;
39  typedef std::remove_reference_t<LhsNested> LhsNested_;
40  typedef std::remove_reference_t<RhsNested> RhsNested_;
41 
42  // From NumDims below.
43  static constexpr int NumDimensions = traits<LhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
44  static constexpr int Layout = traits<LhsXprType>::Layout;
45  typedef std::conditional_t<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
46  typename traits<LhsXprType>::PointerType,
47  typename traits<RhsXprType>::PointerType>
48  PointerType;
49 
50  enum {
51  Flags = 0
52  };
53 };
54 
55 template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
56 struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, Eigen::Dense>
57 {
58  typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>& type;
59 };
60 
61 template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
62 struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >::type>
63 {
64  typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> type;
65 };
66 
67 template<typename Indices_, typename LeftArgType_, typename RightArgType_, typename OutputKernelType_, typename Device_>
68 struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_, OutputKernelType_>, Device_> > {
69  typedef Indices_ Indices;
70  typedef LeftArgType_ LeftArgType;
71  typedef RightArgType_ RightArgType;
72  typedef OutputKernelType_ OutputKernelType;
73  typedef Device_ Device;
74 
75  // From NumDims below.
76  static constexpr int NumDimensions = traits<LeftArgType_>::NumDimensions + traits<RightArgType_>::NumDimensions - 2 * array_size<Indices_>::value;
77 };
78 
79 // Helper class to allocate and deallocate temporary memory for packed buffers.
80 template <typename LhsScalar, typename RhsScalar>
81 struct TensorContractionBlockMemAllocator {
82  typedef void* BlockMemHandle;
83 
84  template <typename Device>
85  EIGEN_DEVICE_FUNC static BlockMemHandle allocate(Device& d, const Index bm,
86  const Index bk,
87  const Index bn,
88  LhsScalar** lhs_block,
89  RhsScalar** rhs_block) {
90  eigen_assert(lhs_block);
91  eigen_assert(rhs_block);
92  BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
93  char* block_mem = static_cast<char*>(d.allocate(sz.lhs_size + sz.rhs_size));
94  eigen_assert(block_mem);
95  *lhs_block = reinterpret_cast<LhsScalar*>(block_mem);
96  *rhs_block = reinterpret_cast<RhsScalar*>(block_mem + sz.lhs_size);
97  return block_mem;
98  }
99 
100  template <typename Device>
101  EIGEN_DEVICE_FUNC static BlockMemHandle allocateSlices(
102  Device& d, const Index bm, const Index bk, const Index bn,
103  const Index num_lhs, const Index num_rhs, const Index num_slices,
104  std::vector<LhsScalar*>* lhs_blocks,
105  std::vector<RhsScalar*>* rhs_blocks) {
106  eigen_assert(num_slices > 0);
107  eigen_assert(num_lhs >= 0 && num_rhs >= 0);
108  eigen_assert(num_lhs == 0 || lhs_blocks);
109  eigen_assert(num_rhs == 0 || rhs_blocks);
110  BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
111  void* block_mem = d.allocate(
112  (num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices);
113  eigen_assert(block_mem);
114  char* mem = static_cast<char*>(block_mem);
115 
116  for (Index x = 0; x < num_slices; x++) {
117  if (num_lhs > 0) lhs_blocks[x].resize(num_lhs);
118  for (Index m = 0; m < num_lhs; m++) {
119  lhs_blocks[x][m] = reinterpret_cast<LhsScalar*>(mem);
120  mem += sz.lhs_size;
121  }
122  if (num_rhs > 0) rhs_blocks[x].resize(num_rhs);
123  for (Index n = 0; n < num_rhs; n++) {
124  rhs_blocks[x][n] = reinterpret_cast<RhsScalar*>(mem);
125  mem += sz.rhs_size;
126  }
127  }
128 
129  return block_mem;
130  }
131 
132  template <typename Device>
133  EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
134  d.deallocate(handle);
135  }
136 
137  private:
138  struct BlockSizes {
139  Index lhs_size;
140  Index rhs_size;
141  };
142  EIGEN_DEVICE_FUNC static BlockSizes ComputeLhsRhsBlockSizes(const Index bm,
143  const Index bk,
144  const Index bn) {
145  Index align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
146  BlockSizes sz;
147  sz.lhs_size = divup<Index>(bm * bk * sizeof(LhsScalar), align) * align;
148  sz.rhs_size = divup<Index>(bn * bk * sizeof(RhsScalar), align) * align;
149  return sz;
150  }
151 };
152 
153 // WARNING: In this code we assume that Lhs and Rhs tensor expressions are in
154 // ColMajor storage order. This property is guaranteed by the
155 // TensorContractionOp evaluator. TensorContractionKernel specifies how we pack
156 // blocks of Lhs and Rhs tensor expressions, and how we invoke matrix
157 // multiplication for these blocks. Default tensor contraction uses
158 // gemm_pack_rhs, gemm_pack_lhs and gebp_kernel from Eigen Core (see
159 // GeneralBlocPanelKernel.h for details).
160 //
161 // By specializing contraction kernels we can use other low level libraries to
162 // perform matrix multiplication, and still rely on Eigen contraction evaluator.
163 // This also includes full support in TensorContractionThreadPool, assuming that
164 // underlying gemm do not use it's own threading.
165 //
166 // - ResScalar/LhsScalar/RhsScalar - scalar type for the result of
167 // multiplication, lhs tensor and rhs tensor respectively.
168 //
169 // - StorageIndex - index type for the tensor expressions. In practice almost
170 // always is Eigen::Index.
171 //
172 // - OutputMapper provides access to the memory of the output matrix. In
173 // practice it's always column major blas_data_mapper (it must be of ResScalar
174 // type).
175 //
176 // - LhsMapper/RhsMapper similarly to blas_data_mapper provide a two dimensional
177 // view into the Lhs/Rhs tensor expressions. In practice it's
178 // TensorContractionInputMapper, or some specialization of it based on the
179 // type of tensor expression (e.g. TensorImagePatchOp has optimized input
180 // mapper).
181 template <typename ResScalar, typename LhsScalar, typename RhsScalar,
182  typename StorageIndex, typename OutputMapper, typename LhsMapper,
183  typename RhsMapper>
184 struct TensorContractionKernel {
185  // True if `invoke()` supports `beta` in `C <- alpha * A * B + beta * C`
186  // (otherwise beta should be always equal to 1).
187  enum { HasBeta = false };
188 
189  EIGEN_DEVICE_FUNC
190  TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_,
191  StorageIndex bm_, StorageIndex bk_, StorageIndex bn_)
192  : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {}
193 
194  // Pack blocks of Lhs and Rhs into contiguous blocks in memory.
195  typedef LhsScalar* LhsBlock;
196  typedef RhsScalar* RhsBlock;
197 
198  // Packed Lhs/Rhs block memory allocator.
199  typedef TensorContractionBlockMemAllocator<LhsScalar, RhsScalar>
200  BlockMemAllocator;
201  typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle;
202 
203  typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
204 
205  typedef internal::gemm_pack_lhs<
206  LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr,
207  Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor>
208  LhsPacker;
209 
210  typedef internal::gemm_pack_rhs<RhsScalar, StorageIndex,
211  typename RhsMapper::SubMapper, Traits::nr,
212  ColMajor>
213  RhsPacker;
214 
215  typedef internal::gebp_kernel<LhsScalar, RhsScalar, StorageIndex,
216  OutputMapper, Traits::mr, Traits::nr,
217  /*ConjugateLhs*/ false, /*ConjugateRhs*/ false>
218  GebpKernel;
219 
220  template <typename Device>
221  EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block,
222  RhsBlock* rhs_block) {
223  return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block);
224  }
225 
226  template <typename Device>
227  EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(
228  Device& d, const StorageIndex num_lhs, const StorageIndex num_rhs,
229  const StorageIndex num_slices, std::vector<LhsBlock>* lhs_blocks,
230  std::vector<RhsBlock>* rhs_blocks) {
231  return BlockMemAllocator::allocateSlices(
232  d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks);
233  }
234 
235  template <typename Device>
236  EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
237  BlockMemAllocator::deallocate(d, handle);
238  }
239 
240  EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs(
241  LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper,
242  const StorageIndex depth, const StorageIndex rows) {
243  LhsPacker()(*lhsBlock, data_mapper, depth, rows, /*stride*/ 0,
244  /*offset*/ 0);
245  }
246 
247  EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs(
248  RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper,
249  const StorageIndex depth, const StorageIndex cols) {
250  RhsPacker()(*rhsBlock, data_mapper, depth, cols);
251  }
252 
253  EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke(
254  const OutputMapper& output_mapper, const LhsBlock& lhsBlock,
255  const RhsBlock& rhsBlock, const StorageIndex rows,
256  const StorageIndex depth, const StorageIndex cols,
257  const ResScalar alpha, const ResScalar beta) {
258  // Default GEBP kernel does not support beta.
259  eigen_assert(beta == ResScalar(1));
260  static const int kComputeStrideFromBlockDimensions = -1;
261  GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha,
262  /*strideA*/ kComputeStrideFromBlockDimensions,
263  /*strideB*/ kComputeStrideFromBlockDimensions,
264  /*offsetA*/ 0, /*offsetB*/ 0);
265  }
266 
267  private:
268  // These are dimensions of the original Tensors, and selected block sizes. The
269  // actual block sizes passed to all function above might be smaller because of
270  // the partial blocks at the end.
271  const StorageIndex m;
272  const StorageIndex k;
273  const StorageIndex n;
274  const StorageIndex bm;
275  const StorageIndex bk;
276  const StorageIndex bn;
277 };
278 
279 } // end namespace internal
280 
281 // Tensor contraction params that should enable to get from output matrix
282 // 2-dimensional coordinates to the output tensor dimensions.
283 struct TensorContractionParams {
284  // TensorContraction evaluator assumes that both tensors are in ColMajor
285  // layout, if tensors are in RowMajor evaluator swap lhs with rhs.
286  bool swapped_arguments;
287 };
288 
289 // Output kernel allows to fuse operations into the tensor contraction.
290 //
291 // Examples:
292 // 1. Elementwise Relu transformation following Conv2D.
293 // 2. AddBias to the Conv2D output channels dimension.
294 //
295 // The NoOpOutputKernel implements an output kernel that does absolutely nothing.
296 struct NoOpOutputKernel {
312  template <typename Index, typename Scalar>
313  EIGEN_ALWAYS_INLINE void operator()(
314  const internal::blas_data_mapper<Scalar, Index, ColMajor>& output_mapper,
315  const TensorContractionParams& params, Index i,
316  Index j, Index num_rows, Index num_cols) const {
317  EIGEN_UNUSED_VARIABLE(output_mapper);
318  EIGEN_UNUSED_VARIABLE(params);
319  EIGEN_UNUSED_VARIABLE(i);
320  EIGEN_UNUSED_VARIABLE(j);
321  EIGEN_UNUSED_VARIABLE(num_rows);
322  EIGEN_UNUSED_VARIABLE(num_cols);
323  }
324 };
325 
326 template<typename Indices, typename LhsXprType, typename RhsXprType, typename OutputKernelType = const NoOpOutputKernel>
327 class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType, OutputKernelType>, ReadOnlyAccessors>
328 {
329  public:
330  typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
331  typedef typename internal::gebp_traits<typename LhsXprType::CoeffReturnType,
332  typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
333  typedef typename Eigen::internal::nested<TensorContractionOp>::type Nested;
334  typedef typename Eigen::internal::traits<TensorContractionOp>::StorageKind StorageKind;
335  typedef typename Eigen::internal::traits<TensorContractionOp>::Index Index;
336 
337  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(
338  const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims,
339  const OutputKernelType& output_kernel = OutputKernelType())
340  : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims),
341  m_output_kernel(output_kernel) {}
342 
343  EIGEN_DEVICE_FUNC
344  const Indices& indices() const { return m_indices; }
345 
347  EIGEN_DEVICE_FUNC
348  const internal::remove_all_t<typename LhsXprType::Nested>&
349  lhsExpression() const { return m_lhs_xpr; }
350 
351  EIGEN_DEVICE_FUNC
352  const internal::remove_all_t<typename RhsXprType::Nested>&
353  rhsExpression() const { return m_rhs_xpr; }
354 
355  EIGEN_DEVICE_FUNC
356  const OutputKernelType& outputKernel() const { return m_output_kernel; }
357 
358  protected:
359  typename LhsXprType::Nested m_lhs_xpr;
360  typename RhsXprType::Nested m_rhs_xpr;
361  const Indices m_indices;
362  const OutputKernelType m_output_kernel;
363 };
364 
365 
366 template<typename Derived>
367 struct TensorContractionEvaluatorBase : internal::no_assignment_operator
368 {
369  typedef typename internal::traits<Derived>::Indices Indices;
370  typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
371  typedef typename internal::traits<Derived>::RightArgType RightArgType;
372  typedef typename internal::traits<Derived>::OutputKernelType OutputKernelType;
373  typedef typename internal::traits<Derived>::Device Device;
374 
375  typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
376  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
377  typedef typename XprType::Index Index;
378  typedef typename XprType::CoeffReturnType CoeffReturnType;
379  typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
380  typedef StorageMemory<Scalar, Device> Storage;
381  typedef typename Storage::Type EvaluatorPointerType;
382 
383  static constexpr int Layout = TensorEvaluator<LeftArgType, Device>::Layout;
384  enum {
385  IsAligned = true,
386  PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
387  BlockAccess = false,
388  PreferBlockAccess = false,
389  CoordAccess = false, // to be implemented
390  RawAccess = true
391  };
392 
393  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
394  typedef internal::TensorBlockNotImplemented TensorBlock;
395  //===--------------------------------------------------------------------===//
396 
397  // Most of the code is assuming that both input tensors are ColMajor. If the
398  // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
399  // If we want to compute A * B = C, where A is LHS and B is RHS, the code
400  // will pretend B is LHS and A is RHS.
401  typedef std::conditional_t<
402  static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
403  typedef std::conditional_t<
404  static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType> EvalRightArgType;
405 
406  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluatorType;
407  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluatorType;
408 
409  static constexpr int LDims =
410  internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
411  static constexpr int RDims =
412  internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
413  static constexpr int ContractDims = internal::array_size<Indices>::value;
414  static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
415 
416  typedef array<Index, ContractDims> contract_t;
417  typedef array<Index, LDims - ContractDims> left_nocontract_t;
418  typedef array<Index, RDims - ContractDims> right_nocontract_t;
419 
420  typedef DSizes<Index, NumDims> Dimensions;
421 
422  EIGEN_STRONG_INLINE
423  TensorContractionEvaluatorBase(const XprType& op, const Device& device)
424  : m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
425  op.lhsExpression(), op.rhsExpression()), device),
426  m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
427  op.rhsExpression(), op.lhsExpression()), device),
428  m_device(device),
429  m_output_kernel(op.outputKernel()),
430  m_result(NULL) {
431  EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
432  static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
433  YOU_MADE_A_PROGRAMMING_MISTAKE);
434 
435 
436  DSizes<Index, LDims> eval_left_dims;
437  DSizes<Index, RDims> eval_right_dims;
438  array<IndexPair<Index>, ContractDims> eval_op_indices;
439  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
440  // For ColMajor, we keep using the existing dimensions
441  for (int i = 0; i < LDims; i++) {
442  eval_left_dims[i] = m_leftImpl.dimensions()[i];
443  }
444  for (int i = 0; i < RDims; i++) {
445  eval_right_dims[i] = m_rightImpl.dimensions()[i];
446  }
447  // We keep the pairs of contracting indices.
448  for (int i = 0; i < ContractDims; i++) {
449  eval_op_indices[i].first = op.indices()[i].first;
450  eval_op_indices[i].second = op.indices()[i].second;
451  }
452  } else {
453  // For RowMajor, we need to reverse the existing dimensions
454  for (int i = 0; i < LDims; i++) {
455  eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
456  }
457  for (int i = 0; i < RDims; i++) {
458  eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
459  }
460  // We need to flip all the pairs of contracting indices as well as
461  // reversing the dimensions.
462  for (int i = 0; i < ContractDims; i++) {
463  eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
464  eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
465  }
466  }
467 
468  // Check for duplicate axes and make sure the first index in eval_op_indices
469  // is increasing. Using O(n^2) sorting is OK since ContractDims is small
470  for (int i = 0; i < ContractDims; i++) {
471  for (int j = i + 1; j < ContractDims; j++) {
472  eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
473  eval_op_indices[j].second != eval_op_indices[i].second &&
474  "contraction axes should be unique");
475  if (eval_op_indices[j].first < eval_op_indices[i].first) {
476  numext::swap(eval_op_indices[j], eval_op_indices[i]);
477  }
478  }
479  }
480 
481  array<Index, LDims> lhs_strides;
482  lhs_strides[0] = 1;
483  for (int i = 0; i < LDims-1; ++i) {
484  lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i];
485  }
486 
487  array<Index, RDims> rhs_strides;
488  rhs_strides[0] = 1;
489  for (int i = 0; i < RDims-1; ++i) {
490  rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i];
491  }
492 
493  if (m_i_strides.size() > 0) m_i_strides[0] = 1;
494  if (m_j_strides.size() > 0) m_j_strides[0] = 1;
495  if (m_k_strides.size() > 0) m_k_strides[0] = 1;
496 
497  m_i_size = 1;
498  m_j_size = 1;
499  m_k_size = 1;
500 
501  // To compute the dimension, we simply concatenate the non-contracting
502  // dimensions of the left and then the right tensor. Additionally, we also
503  // compute the strides corresponding to the left non-contracting
504  // dimensions and right non-contracting dimensions.
505  m_lhs_inner_dim_contiguous = true;
506  int dim_idx = 0;
507  Index nocontract_idx = 0;
508 
509  for (int i = 0; i < LDims; i++) {
510  // find if we are contracting on index i of left tensor
511  bool contracting = false;
512  for (int j = 0; j < ContractDims; j++) {
513  if (eval_op_indices[j].first == i) {
514  contracting = true;
515  break;
516  }
517  }
518  if (!contracting) {
519  // add dimension size to output dimensions
520  m_dimensions[dim_idx] = eval_left_dims[i];
521  m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
522  if (dim_idx != i) {
523  m_lhs_inner_dim_contiguous = false;
524  }
525  if (nocontract_idx+1 < internal::array_size<left_nocontract_t>::value) {
526  m_i_strides[nocontract_idx+1] =
527  m_i_strides[nocontract_idx] * eval_left_dims[i];
528  } else {
529  m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
530  }
531  dim_idx++;
532  nocontract_idx++;
533  }
534  }
535 
536  nocontract_idx = 0;
537  for (int i = 0; i < RDims; i++) {
538  bool contracting = false;
539  // find if we are contracting on index i of right tensor
540  for (int j = 0; j < ContractDims; j++) {
541  if (eval_op_indices[j].second == i) {
542  contracting = true;
543  break;
544  }
545  }
546  if (!contracting) {
547  m_dimensions[dim_idx] = eval_right_dims[i];
548  if (nocontract_idx+1 < internal::array_size<right_nocontract_t>::value) {
549  m_j_strides[nocontract_idx+1] =
550  m_j_strides[nocontract_idx] * eval_right_dims[i];
551  } else {
552  m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
553  }
554  m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
555  dim_idx++;
556  nocontract_idx++;
557  }
558  }
559 
560  // Now compute the strides corresponding to the contracting dimensions. We
561  // assumed above that non-contracting axes are represented in the same order
562  // in the matrix as they are in the tensor. This is not the case for
563  // contracting axes. As the contracting axes must be of the same size in
564  // each tensor, we'll only look at the first tensor here.
565  m_rhs_inner_dim_contiguous = true;
566  m_rhs_inner_dim_reordered = false;
567  for (int i = 0; i < ContractDims; i++) {
568  Index left = eval_op_indices[i].first;
569  Index right = eval_op_indices[i].second;
570 
571  Index size = eval_left_dims[left];
572  eigen_assert(size == eval_right_dims[right] &&
573  "Contraction axes must be same size");
574 
575  if (i+1 < static_cast<int>(internal::array_size<contract_t>::value)) {
576  m_k_strides[i+1] = m_k_strides[i] * size;
577  } else {
578  m_k_size = m_k_strides[i] * size;
579  }
580  m_left_contracting_strides[i] = lhs_strides[left];
581  m_right_contracting_strides[i] = rhs_strides[right];
582 
583  if (i > 0 && right < eval_op_indices[i-1].second) {
584  m_rhs_inner_dim_reordered = true;
585  }
586  if (right != i) {
587  m_rhs_inner_dim_contiguous = false;
588  }
589  }
590 
591  // If the layout is RowMajor, we need to reverse the m_dimensions
592  if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
593  for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
594  numext::swap(m_dimensions[i], m_dimensions[j]);
595  }
596  }
597 
598  // A set of parameters that will allow output kernel to get from output
599  // tensor dimensions (i, j) into the original tensor dimensions.
600  // TODO(ezhulenev): Add parameters required to infer output tensor index for
601  // more complex contractions than 2x2 on internal dimension.
602  m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor;
603  }
604 
605  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
606 
607  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
608  m_leftImpl.evalSubExprsIfNeeded(NULL);
609  m_rightImpl.evalSubExprsIfNeeded(NULL);
610  if (data) {
611  evalTo(data);
612  return false;
613  } else {
614  m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
615  evalTo(m_result);
616  return true;
617  }
618  }
619 
620 #ifdef EIGEN_USE_THREADS
621  template <typename EvalSubExprsCallback>
622  EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
623  EvaluatorPointerType dest, EvalSubExprsCallback done) {
624  m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
625  m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
626  if (dest) {
627  evalToAsync(dest, [done]() { done(false); });
628  } else {
629  m_result = static_cast<EvaluatorPointerType>(
630  m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
631  evalToAsync(m_result, [done]() { done(true); });
632  }
633  });
634  });
635  }
636 #endif // EIGEN_USE_THREADS
637 
638 #ifndef TENSOR_CONTRACTION_DISPATCH
639 #define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
640  if (this->m_lhs_inner_dim_contiguous) { \
641  if (this->m_rhs_inner_dim_contiguous) { \
642  if (this->m_rhs_inner_dim_reordered) { \
643  METHOD<true, true, true, ALIGNMENT> ARGS; \
644  } else { \
645  METHOD<true, true, false, ALIGNMENT> ARGS; \
646  } \
647  } else { \
648  if (this->m_rhs_inner_dim_reordered) { \
649  METHOD<true, false, true, ALIGNMENT> ARGS; \
650  } else { \
651  METHOD<true, false, false, ALIGNMENT> ARGS; \
652  } \
653  } \
654  } else { \
655  if (this->m_rhs_inner_dim_contiguous) { \
656  if (this->m_rhs_inner_dim_reordered) { \
657  METHOD<false, true, true, ALIGNMENT> ARGS; \
658  } else { \
659  METHOD<false, true, false, ALIGNMENT> ARGS; \
660  } \
661  } else { \
662  if (this->m_rhs_inner_dim_reordered) { \
663  METHOD<false, false, true, ALIGNMENT> ARGS; \
664  } else { \
665  METHOD<false, false, false, ALIGNMENT> ARGS; \
666  } \
667  } \
668  }
669 #endif
670 
671 #ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH
672 #define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \
673  if (this->m_lhs_inner_dim_contiguous) { \
674  if (this->m_rhs_inner_dim_contiguous) { \
675  if (this->m_rhs_inner_dim_reordered) { \
676  (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \
677  } else { \
678  (new METHOD<DONE, true, true, false, ALIGNMENT> ARGS)->FN; \
679  } \
680  } else { \
681  if (this->m_rhs_inner_dim_reordered) { \
682  (new METHOD<DONE, true, false, true, ALIGNMENT> ARGS)->FN; \
683  } else { \
684  (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \
685  } \
686  } \
687  } else { \
688  if (this->m_rhs_inner_dim_contiguous) { \
689  if (this->m_rhs_inner_dim_reordered) { \
690  (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \
691  } else { \
692  (new METHOD<DONE, false, true, false, ALIGNMENT> ARGS)->FN; \
693  } \
694  } else { \
695  if (this->m_rhs_inner_dim_reordered) { \
696  (new METHOD<DONE, false, false, true, ALIGNMENT> ARGS)->FN; \
697  } else { \
698  (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \
699  } \
700  } \
701  }
702 #endif
703 
704  EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
705  static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer);
706  }
707 
708 #ifdef EIGEN_USE_THREADS
709  template <typename EvalToCallback>
710  void evalToAsync(Scalar* buffer, EvalToCallback done) const {
711  static_cast<const Derived*>(this)
712  ->template evalProductAsync<EvalToCallback, Unaligned>(buffer,
713  std::move(done));
714  }
715 #endif // EIGEN_USE_THREADS
716 
717  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
718  bool rhs_inner_dim_reordered, int Alignment>
719  void evalProductSequential(Scalar* buffer) const {
720  if (this->m_j_size == 1) {
721  this->template evalGemv<lhs_inner_dim_contiguous,
722  rhs_inner_dim_contiguous, rhs_inner_dim_reordered,
723  Alignment>(buffer);
724  } else {
725  this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
726  rhs_inner_dim_reordered, Alignment>(buffer);
727  }
728  }
729 
730  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
731  #if !defined(EIGEN_HIPCC)
732  EIGEN_DEVICE_FUNC
733  #endif
734  void evalGemv(Scalar* buffer) const {
735  const Index rows = m_i_size;
736  const Index cols = m_k_size;
737 
738  typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
739  typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
740  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
741  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
742  const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
743  const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
744  const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned;
745  const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned;
746  typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
747  LeftEvaluator, left_nocontract_t,
748  contract_t, lhs_packet_size,
749  lhs_inner_dim_contiguous,
750  false, lhs_alignment> LhsMapper;
751 
752  typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
753  RightEvaluator, right_nocontract_t,
754  contract_t, rhs_packet_size,
755  rhs_inner_dim_contiguous,
756  rhs_inner_dim_reordered, rhs_alignment> RhsMapper;
757 
758  LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides,
759  m_left_contracting_strides, m_k_strides);
760  RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides,
761  m_right_contracting_strides, m_k_strides);
762 
763  const Scalar alpha(1);
764  const Index resIncr(1);
765 
766  // zero out the result buffer (which must be of size at least rows * sizeof(Scalar)
767  m_device.fill(buffer, buffer + rows, Scalar(0));
768 
769  internal::general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,false,RhsScalar,RhsMapper,false>::run(
770  rows, cols, lhs, rhs,
771  buffer, resIncr, alpha);
772 
773  typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
774  m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params,
775  static_cast<Index>(0), static_cast<Index>(0), rows,
776  static_cast<Index>(1));
777  }
778 
779  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
780  #if !defined(EIGEN_HIPCC)
781  EIGEN_DEVICE_FUNC
782  #endif
783  void evalGemm(Scalar* buffer) const {
784  // columns in left side, rows in right side
785  const Index k = this->m_k_size;
786  this->template evalGemmPartial<lhs_inner_dim_contiguous,
787  rhs_inner_dim_contiguous,
788  rhs_inner_dim_reordered,
789  Alignment, true>(buffer, 0, k, 1);
790  }
791 
792  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
793  bool rhs_inner_dim_reordered, int Alignment>
794  EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(
795  Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
796  evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
797  rhs_inner_dim_reordered, Alignment,
798  /*use_output_kernel*/ false>(buffer, k_start, k_end,
799  num_threads);
800  }
801 
802  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel>
803  EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
804  eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size);
805  // columns in slice on left side, rows on right side
806  const Index k_slice = k_end - k_start;
807 
808  // rows in left side
809  const Index m = this->m_i_size;
810 
811  // columns in right side
812  const Index n = this->m_j_size;
813 
814  // define data mappers for Lhs and Rhs
815  typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
816  typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
817 
818  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
819  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
820 
821  const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
822  const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
823 
824  typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
825  LeftEvaluator, left_nocontract_t,
826  contract_t, lhs_packet_size,
827  lhs_inner_dim_contiguous,
828  false, Unaligned> LhsMapper;
829 
830  typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
831  RightEvaluator, right_nocontract_t,
832  contract_t, rhs_packet_size,
833  rhs_inner_dim_contiguous,
834  rhs_inner_dim_reordered, Unaligned> RhsMapper;
835 
836  typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
837 
838  typedef internal::TensorContractionKernel<
839  Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper>
840  TensorContractionKernel;
841 
842  // initialize data mappers
843  LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
844  this->m_left_contracting_strides, this->m_k_strides);
845 
846  RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
847  this->m_right_contracting_strides, this->m_k_strides);
848 
849  OutputMapper output(buffer, m);
850 
851  // Sizes of the blocks to load in cache. See the Goto paper for details.
852  internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar,
853  Index, internal::ShardByCol>
854  blocking(k_slice, m, n, num_threads);
855  const Index kc = blocking.kc();
856  const Index mc = numext::mini(m, blocking.mc());
857  const Index nc = numext::mini(n, blocking.nc());
858 
859  typedef typename TensorContractionKernel::LhsBlock LhsBlock;
860  typedef typename TensorContractionKernel::RhsBlock RhsBlock;
861 
862  LhsBlock blockA;
863  RhsBlock blockB;
864 
865  TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc);
866 
867  typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle;
868  const BlockMemHandle packed_mem =
869  kernel.allocate(this->m_device, &blockA, &blockB);
870 
871  // If a contraction kernel does not support beta, explicitly initialize
872  // output buffer with zeroes.
873  if (!TensorContractionKernel::HasBeta) {
874  this->m_device.fill(buffer, buffer + m * n, Scalar(0));
875  }
876 
877  for(Index i2=0; i2<m; i2+=mc)
878  {
879  const Index actual_mc = numext::mini(i2+mc,m)-i2;
880  for (Index k2 = k_start; k2 < k_end; k2 += kc) {
881  // make sure we don't overshoot right edge of left matrix, then pack vertical panel
882  const Index actual_kc = numext::mini(k2 + kc, k_end) - k2;
883  kernel.packLhs(&blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
884 
885  // If kernel supports beta, there is no need to initialize output
886  // buffer with zeroes.
887  const Scalar alpha = Scalar(1);
888  const Scalar beta = (TensorContractionKernel::HasBeta && k2 == k_start)
889  ? Scalar(0)
890  : Scalar(1);
891 
892  // series of horizontal blocks
893  for (Index j2 = 0; j2 < n; j2 += nc) {
894  // make sure we don't overshoot right edge of right matrix, then pack block
895  const Index actual_nc = numext::mini(j2 + nc, n) - j2;
896  kernel.packRhs(&blockB, rhs.getSubMapper(k2, j2), actual_kc,
897  actual_nc);
898 
899  // call gebp (matrix kernel)
900  // The parameters here are copied from Eigen's GEMM implementation
901  const OutputMapper output_mapper = output.getSubMapper(i2, j2);
902  kernel.invoke(output_mapper, blockA, blockB, actual_mc, actual_kc,
903  actual_nc, alpha, beta);
904 
905  // We are done with this [i2, j2] output block.
906  if (use_output_kernel && k2 + kc >= k_end) {
907  m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2,
908  actual_mc, actual_nc);
909  }
910  }
911  }
912  }
913 
914  kernel.deallocate(this->m_device, packed_mem);
915  }
916 
917  EIGEN_STRONG_INLINE void cleanup() {
918  m_leftImpl.cleanup();
919  m_rightImpl.cleanup();
920 
921  if (m_result != NULL) {
922  m_device.deallocate(m_result);
923  m_result = NULL;
924  }
925  }
926 
927  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
928  return m_result[index];
929  }
930 
931  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const {
932  return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
933  }
934 
935  template<int LoadMode>
936  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
937  return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
938  }
939 
940  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_result; }
941 
942 protected:
943  Dimensions m_dimensions;
944 
945  contract_t m_k_strides;
946  contract_t m_left_contracting_strides;
947  contract_t m_right_contracting_strides;
948 
949  bool m_lhs_inner_dim_contiguous;
950  bool m_rhs_inner_dim_contiguous;
951  bool m_rhs_inner_dim_reordered;
952 
953  left_nocontract_t m_i_strides;
954  right_nocontract_t m_j_strides;
955  left_nocontract_t m_left_nocontract_strides;
956  right_nocontract_t m_right_nocontract_strides;
957 
958  Index m_i_size;
959  Index m_j_size;
960  Index m_k_size;
961 
962  TensorContractionParams m_tensor_contraction_params;
963 
964  TensorEvaluator<EvalLeftArgType, Device> m_leftImpl;
965  TensorEvaluator<EvalRightArgType, Device> m_rightImpl;
966  const Device EIGEN_DEVICE_REF m_device;
967  OutputKernelType m_output_kernel;
968  EvaluatorPointerType m_result;
969 };
970 
971 
972 // evaluator for default device
973 template<typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType, typename Device>
974 struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> :
975  public TensorContractionEvaluatorBase<
976  TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> > {
977  typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
978  typedef TensorContractionEvaluatorBase<Self> Base;
979 
980  typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
981  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
982  typedef typename XprType::Index Index;
983  typedef typename XprType::CoeffReturnType CoeffReturnType;
984  typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
985 
986  static constexpr int Layout = TensorEvaluator<LeftArgType, Device>::Layout;
987 
988  // Most of the code is assuming that both input tensors are ColMajor. If the
989  // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
990  // If we want to compute A * B = C, where A is LHS and B is RHS, the code
991  // will pretend B is LHS and A is RHS.
992  typedef std::conditional_t<Layout == static_cast<int>(ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
993  typedef std::conditional_t<Layout == static_cast<int>(ColMajor), RightArgType, LeftArgType> EvalRightArgType;
994 
995  static constexpr int LDims =
996  internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
997  static constexpr int RDims =
998  internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
999  static constexpr int ContractDims = internal::array_size<Indices>::value;
1000 
1001  typedef array<Index, ContractDims> contract_t;
1002  typedef array<Index, LDims - ContractDims> left_nocontract_t;
1003  typedef array<Index, RDims - ContractDims> right_nocontract_t;
1004 
1005  static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
1006 
1007  // Could we use NumDimensions here?
1008  typedef DSizes<Index, NumDims> Dimensions;
1009 
1010  TensorEvaluator(const XprType& op, const Device& device) :
1011  Base(op, device) { }
1012 
1013  template <int Alignment>
1014  void evalProduct(Scalar* buffer) const {
1015  TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer));
1016  }
1017 };
1018 
1019 } // end namespace Eigen
1020 
1021 #endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index