10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
13 #include "./InternalHeaderCheck.h"
26 template<
typename Dimensions,
typename LhsXprType,
typename RhsXprType,
typename OutputKernelType>
27 struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >
30 typedef typename gebp_traits<std::remove_const_t<typename LhsXprType::Scalar>,
31 std::remove_const_t<typename RhsXprType::Scalar>>::ResScalar Scalar;
33 typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
34 typename traits<RhsXprType>::StorageKind>::ret StorageKind;
35 typedef typename promote_index_type<typename traits<LhsXprType>::Index,
36 typename traits<RhsXprType>::Index>::type
Index;
37 typedef typename LhsXprType::Nested LhsNested;
38 typedef typename RhsXprType::Nested RhsNested;
39 typedef std::remove_reference_t<LhsNested> LhsNested_;
40 typedef std::remove_reference_t<RhsNested> RhsNested_;
43 static constexpr
int NumDimensions = traits<LhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
44 static constexpr
int Layout = traits<LhsXprType>::Layout;
45 typedef std::conditional_t<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
46 typename traits<LhsXprType>::PointerType,
47 typename traits<RhsXprType>::PointerType>
55 template<
typename Dimensions,
typename LhsXprType,
typename RhsXprType,
typename OutputKernelType>
56 struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>,
Eigen::Dense>
58 typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>& type;
61 template<
typename Dimensions,
typename LhsXprType,
typename RhsXprType,
typename OutputKernelType>
62 struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >::type>
64 typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> type;
67 template<
typename Indices_,
typename LeftArgType_,
typename RightArgType_,
typename OutputKernelType_,
typename Device_>
68 struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_, OutputKernelType_>, Device_> > {
69 typedef Indices_ Indices;
70 typedef LeftArgType_ LeftArgType;
71 typedef RightArgType_ RightArgType;
72 typedef OutputKernelType_ OutputKernelType;
73 typedef Device_ Device;
76 static constexpr
int NumDimensions = traits<LeftArgType_>::NumDimensions + traits<RightArgType_>::NumDimensions - 2 * array_size<Indices_>::value;
80 template <
typename LhsScalar,
typename RhsScalar>
81 struct TensorContractionBlockMemAllocator {
82 typedef void* BlockMemHandle;
84 template <
typename Device>
85 EIGEN_DEVICE_FUNC
static BlockMemHandle allocate(Device& d,
const Index bm,
88 LhsScalar** lhs_block,
89 RhsScalar** rhs_block) {
90 eigen_assert(lhs_block);
91 eigen_assert(rhs_block);
92 BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
93 char* block_mem =
static_cast<char*
>(d.allocate(sz.lhs_size + sz.rhs_size));
94 eigen_assert(block_mem);
95 *lhs_block =
reinterpret_cast<LhsScalar*
>(block_mem);
96 *rhs_block =
reinterpret_cast<RhsScalar*
>(block_mem + sz.lhs_size);
100 template <
typename Device>
101 EIGEN_DEVICE_FUNC
static BlockMemHandle allocateSlices(
104 std::vector<LhsScalar*>* lhs_blocks,
105 std::vector<RhsScalar*>* rhs_blocks) {
106 eigen_assert(num_slices > 0);
107 eigen_assert(num_lhs >= 0 && num_rhs >= 0);
108 eigen_assert(num_lhs == 0 || lhs_blocks);
109 eigen_assert(num_rhs == 0 || rhs_blocks);
110 BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
111 void* block_mem = d.allocate(
112 (num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices);
113 eigen_assert(block_mem);
114 char* mem =
static_cast<char*
>(block_mem);
116 for (
Index x = 0; x < num_slices; x++) {
117 if (num_lhs > 0) lhs_blocks[x].resize(num_lhs);
118 for (
Index m = 0; m < num_lhs; m++) {
119 lhs_blocks[x][m] =
reinterpret_cast<LhsScalar*
>(mem);
122 if (num_rhs > 0) rhs_blocks[x].resize(num_rhs);
123 for (
Index n = 0; n < num_rhs; n++) {
124 rhs_blocks[x][n] =
reinterpret_cast<RhsScalar*
>(mem);
132 template <
typename Device>
133 EIGEN_DEVICE_FUNC
static void deallocate(Device& d, BlockMemHandle handle) {
134 d.deallocate(handle);
142 EIGEN_DEVICE_FUNC
static BlockSizes ComputeLhsRhsBlockSizes(
const Index bm,
145 Index align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
147 sz.lhs_size = divup<Index>(bm * bk *
sizeof(LhsScalar), align) * align;
148 sz.rhs_size = divup<Index>(bn * bk *
sizeof(RhsScalar), align) * align;
181 template <
typename ResScalar,
typename LhsScalar,
typename RhsScalar,
182 typename StorageIndex,
typename OutputMapper,
typename LhsMapper,
184 struct TensorContractionKernel {
187 enum { HasBeta =
false };
190 TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_,
191 StorageIndex bm_, StorageIndex bk_, StorageIndex bn_)
192 : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {}
195 typedef LhsScalar* LhsBlock;
196 typedef RhsScalar* RhsBlock;
199 typedef TensorContractionBlockMemAllocator<LhsScalar, RhsScalar>
201 typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle;
203 typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
205 typedef internal::gemm_pack_lhs<
206 LhsScalar, StorageIndex,
typename LhsMapper::SubMapper, Traits::mr,
207 Traits::LhsProgress,
typename Traits::LhsPacket4Packing,
ColMajor>
210 typedef internal::gemm_pack_rhs<RhsScalar, StorageIndex,
211 typename RhsMapper::SubMapper, Traits::nr,
215 typedef internal::gebp_kernel<LhsScalar, RhsScalar, StorageIndex,
216 OutputMapper, Traits::mr, Traits::nr,
220 template <
typename Device>
221 EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block,
222 RhsBlock* rhs_block) {
223 return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block);
226 template <
typename Device>
227 EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(
228 Device& d,
const StorageIndex num_lhs,
const StorageIndex num_rhs,
229 const StorageIndex num_slices, std::vector<LhsBlock>* lhs_blocks,
230 std::vector<RhsBlock>* rhs_blocks) {
231 return BlockMemAllocator::allocateSlices(
232 d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks);
235 template <
typename Device>
236 EIGEN_DEVICE_FUNC
static void deallocate(Device& d, BlockMemHandle handle) {
237 BlockMemAllocator::deallocate(d, handle);
240 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE
void packLhs(
241 LhsBlock* lhsBlock,
const typename LhsMapper::SubMapper& data_mapper,
242 const StorageIndex depth,
const StorageIndex rows) {
243 LhsPacker()(*lhsBlock, data_mapper, depth, rows, 0,
247 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE
void packRhs(
248 RhsBlock* rhsBlock,
const typename RhsMapper::SubMapper& data_mapper,
249 const StorageIndex depth,
const StorageIndex cols) {
250 RhsPacker()(*rhsBlock, data_mapper, depth, cols);
253 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE
void invoke(
254 const OutputMapper& output_mapper,
const LhsBlock& lhsBlock,
255 const RhsBlock& rhsBlock,
const StorageIndex rows,
256 const StorageIndex depth,
const StorageIndex cols,
257 const ResScalar alpha,
const ResScalar beta) {
259 eigen_assert(beta == ResScalar(1));
260 static const int kComputeStrideFromBlockDimensions = -1;
261 GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha,
262 kComputeStrideFromBlockDimensions,
263 kComputeStrideFromBlockDimensions,
271 const StorageIndex m;
272 const StorageIndex k;
273 const StorageIndex n;
274 const StorageIndex bm;
275 const StorageIndex bk;
276 const StorageIndex bn;
283 struct TensorContractionParams {
286 bool swapped_arguments;
296 struct NoOpOutputKernel {
312 template <
typename Index,
typename Scalar>
313 EIGEN_ALWAYS_INLINE
void operator()(
314 const internal::blas_data_mapper<Scalar, Index, ColMajor>& output_mapper,
315 const TensorContractionParams& params,
Index i,
317 EIGEN_UNUSED_VARIABLE(output_mapper);
318 EIGEN_UNUSED_VARIABLE(params);
319 EIGEN_UNUSED_VARIABLE(i);
320 EIGEN_UNUSED_VARIABLE(j);
321 EIGEN_UNUSED_VARIABLE(num_rows);
322 EIGEN_UNUSED_VARIABLE(num_cols);
326 template<
typename Indices,
typename LhsXprType,
typename RhsXprType,
typename OutputKernelType = const NoOpOutputKernel>
327 class TensorContractionOp :
public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType, OutputKernelType>, ReadOnlyAccessors>
330 typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
331 typedef typename internal::gebp_traits<
typename LhsXprType::CoeffReturnType,
332 typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
333 typedef typename Eigen::internal::nested<TensorContractionOp>::type Nested;
334 typedef typename Eigen::internal::traits<TensorContractionOp>::StorageKind StorageKind;
335 typedef typename Eigen::internal::traits<TensorContractionOp>::Index
Index;
337 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(
338 const LhsXprType& lhs,
const RhsXprType& rhs,
const Indices& dims,
339 const OutputKernelType& output_kernel = OutputKernelType())
340 : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims),
341 m_output_kernel(output_kernel) {}
344 const Indices& indices()
const {
return m_indices; }
348 const internal::remove_all_t<typename LhsXprType::Nested>&
349 lhsExpression()
const {
return m_lhs_xpr; }
352 const internal::remove_all_t<typename RhsXprType::Nested>&
353 rhsExpression()
const {
return m_rhs_xpr; }
356 const OutputKernelType& outputKernel()
const {
return m_output_kernel; }
359 typename LhsXprType::Nested m_lhs_xpr;
360 typename RhsXprType::Nested m_rhs_xpr;
361 const Indices m_indices;
362 const OutputKernelType m_output_kernel;
366 template<
typename Derived>
367 struct TensorContractionEvaluatorBase : internal::no_assignment_operator
369 typedef typename internal::traits<Derived>::Indices Indices;
370 typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
371 typedef typename internal::traits<Derived>::RightArgType RightArgType;
372 typedef typename internal::traits<Derived>::OutputKernelType OutputKernelType;
373 typedef typename internal::traits<Derived>::Device Device;
375 typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
376 typedef std::remove_const_t<typename XprType::Scalar> Scalar;
377 typedef typename XprType::Index
Index;
378 typedef typename XprType::CoeffReturnType CoeffReturnType;
379 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
380 typedef StorageMemory<Scalar, Device> Storage;
381 typedef typename Storage::Type EvaluatorPointerType;
383 static constexpr
int Layout = TensorEvaluator<LeftArgType, Device>::Layout;
386 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
388 PreferBlockAccess =
false,
394 typedef internal::TensorBlockNotImplemented TensorBlock;
401 typedef std::conditional_t<
402 static_cast<int>(Layout) ==
static_cast<int>(
ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
403 typedef std::conditional_t<
404 static_cast<int>(Layout) ==
static_cast<int>(
ColMajor), RightArgType, LeftArgType> EvalRightArgType;
406 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluatorType;
407 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluatorType;
409 static constexpr
int LDims =
410 internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
411 static constexpr
int RDims =
412 internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
413 static constexpr
int ContractDims = internal::array_size<Indices>::value;
414 static constexpr
int NumDims = LDims + RDims - 2 * ContractDims;
416 typedef array<Index, ContractDims> contract_t;
417 typedef array<
Index, LDims - ContractDims> left_nocontract_t;
418 typedef array<
Index, RDims - ContractDims> right_nocontract_t;
420 typedef DSizes<Index, NumDims> Dimensions;
423 TensorContractionEvaluatorBase(
const XprType& op,
const Device& device)
424 : m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(
ColMajor)>(),
425 op.lhsExpression(), op.rhsExpression()), device),
426 m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(
ColMajor)>(),
427 op.rhsExpression(), op.lhsExpression()), device),
429 m_output_kernel(op.outputKernel()),
431 EIGEN_STATIC_ASSERT((
static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
432 static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
433 YOU_MADE_A_PROGRAMMING_MISTAKE);
436 DSizes<Index, LDims> eval_left_dims;
437 DSizes<Index, RDims> eval_right_dims;
438 array<IndexPair<Index>, ContractDims> eval_op_indices;
439 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
441 for (
int i = 0; i < LDims; i++) {
442 eval_left_dims[i] = m_leftImpl.dimensions()[i];
444 for (
int i = 0; i < RDims; i++) {
445 eval_right_dims[i] = m_rightImpl.dimensions()[i];
448 for (
int i = 0; i < ContractDims; i++) {
449 eval_op_indices[i].first = op.indices()[i].first;
450 eval_op_indices[i].second = op.indices()[i].second;
454 for (
int i = 0; i < LDims; i++) {
455 eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
457 for (
int i = 0; i < RDims; i++) {
458 eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
462 for (
int i = 0; i < ContractDims; i++) {
463 eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
464 eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
470 for (
int i = 0; i < ContractDims; i++) {
471 for (
int j = i + 1; j < ContractDims; j++) {
472 eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
473 eval_op_indices[j].second != eval_op_indices[i].second &&
474 "contraction axes should be unique");
475 if (eval_op_indices[j].first < eval_op_indices[i].first) {
476 numext::swap(eval_op_indices[j], eval_op_indices[i]);
481 array<Index, LDims> lhs_strides;
483 for (
int i = 0; i < LDims-1; ++i) {
484 lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i];
487 array<Index, RDims> rhs_strides;
489 for (
int i = 0; i < RDims-1; ++i) {
490 rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i];
493 if (m_i_strides.size() > 0) m_i_strides[0] = 1;
494 if (m_j_strides.size() > 0) m_j_strides[0] = 1;
495 if (m_k_strides.size() > 0) m_k_strides[0] = 1;
505 m_lhs_inner_dim_contiguous =
true;
507 Index nocontract_idx = 0;
509 for (
int i = 0; i < LDims; i++) {
511 bool contracting =
false;
512 for (
int j = 0; j < ContractDims; j++) {
513 if (eval_op_indices[j].first == i) {
520 m_dimensions[dim_idx] = eval_left_dims[i];
521 m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
523 m_lhs_inner_dim_contiguous =
false;
525 if (nocontract_idx+1 < internal::array_size<left_nocontract_t>::value) {
526 m_i_strides[nocontract_idx+1] =
527 m_i_strides[nocontract_idx] * eval_left_dims[i];
529 m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
537 for (
int i = 0; i < RDims; i++) {
538 bool contracting =
false;
540 for (
int j = 0; j < ContractDims; j++) {
541 if (eval_op_indices[j].second == i) {
547 m_dimensions[dim_idx] = eval_right_dims[i];
548 if (nocontract_idx+1 < internal::array_size<right_nocontract_t>::value) {
549 m_j_strides[nocontract_idx+1] =
550 m_j_strides[nocontract_idx] * eval_right_dims[i];
552 m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
554 m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
565 m_rhs_inner_dim_contiguous =
true;
566 m_rhs_inner_dim_reordered =
false;
567 for (
int i = 0; i < ContractDims; i++) {
568 Index left = eval_op_indices[i].first;
569 Index right = eval_op_indices[i].second;
571 Index size = eval_left_dims[left];
572 eigen_assert(size == eval_right_dims[right] &&
573 "Contraction axes must be same size");
575 if (i+1 <
static_cast<int>(internal::array_size<contract_t>::value)) {
576 m_k_strides[i+1] = m_k_strides[i] * size;
578 m_k_size = m_k_strides[i] * size;
580 m_left_contracting_strides[i] = lhs_strides[left];
581 m_right_contracting_strides[i] = rhs_strides[right];
583 if (i > 0 && right < eval_op_indices[i-1].second) {
584 m_rhs_inner_dim_reordered =
true;
587 m_rhs_inner_dim_contiguous =
false;
592 if (
static_cast<int>(Layout) ==
static_cast<int>(
RowMajor)) {
593 for (
int i = 0, j = NumDims - 1; i < j; i++, j--) {
594 numext::swap(m_dimensions[i], m_dimensions[j]);
602 m_tensor_contraction_params.swapped_arguments =
static_cast<int>(Layout) ==
RowMajor;
605 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
607 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
608 m_leftImpl.evalSubExprsIfNeeded(NULL);
609 m_rightImpl.evalSubExprsIfNeeded(NULL);
614 m_result =
static_cast<EvaluatorPointerType
>(m_device.allocate(dimensions().TotalSize() *
sizeof(Scalar)));
620 #ifdef EIGEN_USE_THREADS
621 template <
typename EvalSubExprsCallback>
622 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(
623 EvaluatorPointerType dest, EvalSubExprsCallback done) {
624 m_leftImpl.evalSubExprsIfNeededAsync(
nullptr, [
this, done, dest](
bool) {
625 m_rightImpl.evalSubExprsIfNeededAsync(
nullptr, [
this, done, dest](
bool) {
627 evalToAsync(dest, [done]() { done(
false); });
629 m_result =
static_cast<EvaluatorPointerType
>(
630 m_device.allocate(dimensions().TotalSize() *
sizeof(Scalar)));
631 evalToAsync(m_result, [done]() { done(
true); });
638 #ifndef TENSOR_CONTRACTION_DISPATCH
639 #define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
640 if (this->m_lhs_inner_dim_contiguous) { \
641 if (this->m_rhs_inner_dim_contiguous) { \
642 if (this->m_rhs_inner_dim_reordered) { \
643 METHOD<true, true, true, ALIGNMENT> ARGS; \
645 METHOD<true, true, false, ALIGNMENT> ARGS; \
648 if (this->m_rhs_inner_dim_reordered) { \
649 METHOD<true, false, true, ALIGNMENT> ARGS; \
651 METHOD<true, false, false, ALIGNMENT> ARGS; \
655 if (this->m_rhs_inner_dim_contiguous) { \
656 if (this->m_rhs_inner_dim_reordered) { \
657 METHOD<false, true, true, ALIGNMENT> ARGS; \
659 METHOD<false, true, false, ALIGNMENT> ARGS; \
662 if (this->m_rhs_inner_dim_reordered) { \
663 METHOD<false, false, true, ALIGNMENT> ARGS; \
665 METHOD<false, false, false, ALIGNMENT> ARGS; \
671 #ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH
672 #define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \
673 if (this->m_lhs_inner_dim_contiguous) { \
674 if (this->m_rhs_inner_dim_contiguous) { \
675 if (this->m_rhs_inner_dim_reordered) { \
676 (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \
678 (new METHOD<DONE, true, true, false, ALIGNMENT> ARGS)->FN; \
681 if (this->m_rhs_inner_dim_reordered) { \
682 (new METHOD<DONE, true, false, true, ALIGNMENT> ARGS)->FN; \
684 (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \
688 if (this->m_rhs_inner_dim_contiguous) { \
689 if (this->m_rhs_inner_dim_reordered) { \
690 (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \
692 (new METHOD<DONE, false, true, false, ALIGNMENT> ARGS)->FN; \
695 if (this->m_rhs_inner_dim_reordered) { \
696 (new METHOD<DONE, false, false, true, ALIGNMENT> ARGS)->FN; \
698 (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \
704 EIGEN_DEVICE_FUNC
void evalTo(Scalar* buffer)
const {
705 static_cast<const Derived*
>(
this)->
template evalProduct<Unaligned>(buffer);
708 #ifdef EIGEN_USE_THREADS
709 template <
typename EvalToCallback>
710 void evalToAsync(Scalar* buffer, EvalToCallback done)
const {
711 static_cast<const Derived*
>(
this)
712 ->
template evalProductAsync<EvalToCallback, Unaligned>(buffer,
717 template <
bool lhs_inner_dim_contiguous,
bool rhs_inner_dim_contiguous,
718 bool rhs_inner_dim_reordered,
int Alignment>
719 void evalProductSequential(Scalar* buffer)
const {
720 if (this->m_j_size == 1) {
721 this->
template evalGemv<lhs_inner_dim_contiguous,
722 rhs_inner_dim_contiguous, rhs_inner_dim_reordered,
725 this->
template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
726 rhs_inner_dim_reordered, Alignment>(buffer);
730 template <
bool lhs_inner_dim_contiguous,
bool rhs_inner_dim_contiguous,
bool rhs_inner_dim_reordered,
int Alignment>
731 #if !defined(EIGEN_HIPCC)
734 void evalGemv(Scalar* buffer)
const {
735 const Index rows = m_i_size;
736 const Index cols = m_k_size;
738 typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
739 typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
740 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
741 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
742 const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
743 const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
746 typedef internal::TensorContractionInputMapper<LhsScalar,
Index, internal::Lhs,
747 LeftEvaluator, left_nocontract_t,
748 contract_t, lhs_packet_size,
749 lhs_inner_dim_contiguous,
750 false, lhs_alignment> LhsMapper;
752 typedef internal::TensorContractionInputMapper<RhsScalar,
Index, internal::Rhs,
753 RightEvaluator, right_nocontract_t,
754 contract_t, rhs_packet_size,
755 rhs_inner_dim_contiguous,
756 rhs_inner_dim_reordered, rhs_alignment> RhsMapper;
758 LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides,
759 m_left_contracting_strides, m_k_strides);
760 RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides,
761 m_right_contracting_strides, m_k_strides);
763 const Scalar alpha(1);
764 const Index resIncr(1);
767 m_device.fill(buffer, buffer + rows, Scalar(0));
769 internal::general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,false,RhsScalar,RhsMapper,false>::run(
770 rows, cols, lhs, rhs,
771 buffer, resIncr, alpha);
773 typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
774 m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params,
775 static_cast<Index>(0),
static_cast<Index>(0), rows,
776 static_cast<Index>(1));
779 template <
bool lhs_inner_dim_contiguous,
bool rhs_inner_dim_contiguous,
bool rhs_inner_dim_reordered,
int Alignment>
780 #if !defined(EIGEN_HIPCC)
783 void evalGemm(Scalar* buffer)
const {
785 const Index k = this->m_k_size;
786 this->
template evalGemmPartial<lhs_inner_dim_contiguous,
787 rhs_inner_dim_contiguous,
788 rhs_inner_dim_reordered,
789 Alignment,
true>(buffer, 0, k, 1);
792 template <
bool lhs_inner_dim_contiguous,
bool rhs_inner_dim_contiguous,
793 bool rhs_inner_dim_reordered,
int Alignment>
794 EIGEN_DEVICE_FUNC
void evalGemmPartialWithoutOutputKernel(
795 Scalar* buffer,
Index k_start,
Index k_end,
int num_threads)
const {
796 evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
797 rhs_inner_dim_reordered, Alignment,
798 false>(buffer, k_start, k_end,
802 template <
bool lhs_inner_dim_contiguous,
bool rhs_inner_dim_contiguous,
bool rhs_inner_dim_reordered,
int Alignment,
bool use_output_kernel>
803 EIGEN_DEVICE_FUNC
void evalGemmPartial(Scalar* buffer,
Index k_start,
Index k_end,
int num_threads)
const {
804 eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size);
806 const Index k_slice = k_end - k_start;
809 const Index m = this->m_i_size;
812 const Index n = this->m_j_size;
815 typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
816 typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
818 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
819 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
821 const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
822 const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
824 typedef internal::TensorContractionInputMapper<LhsScalar,
Index, internal::Lhs,
825 LeftEvaluator, left_nocontract_t,
826 contract_t, lhs_packet_size,
827 lhs_inner_dim_contiguous,
830 typedef internal::TensorContractionInputMapper<RhsScalar,
Index, internal::Rhs,
831 RightEvaluator, right_nocontract_t,
832 contract_t, rhs_packet_size,
833 rhs_inner_dim_contiguous,
834 rhs_inner_dim_reordered,
Unaligned> RhsMapper;
836 typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
838 typedef internal::TensorContractionKernel<
839 Scalar, LhsScalar, RhsScalar,
Index, OutputMapper, LhsMapper, RhsMapper>
840 TensorContractionKernel;
843 LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
844 this->m_left_contracting_strides, this->m_k_strides);
846 RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
847 this->m_right_contracting_strides, this->m_k_strides);
849 OutputMapper output(buffer, m);
852 internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar,
853 Index, internal::ShardByCol>
854 blocking(k_slice, m, n, num_threads);
855 const Index kc = blocking.kc();
856 const Index mc = numext::mini(m, blocking.mc());
857 const Index nc = numext::mini(n, blocking.nc());
859 typedef typename TensorContractionKernel::LhsBlock LhsBlock;
860 typedef typename TensorContractionKernel::RhsBlock RhsBlock;
865 TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc);
867 typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle;
868 const BlockMemHandle packed_mem =
869 kernel.allocate(this->m_device, &blockA, &blockB);
873 if (!TensorContractionKernel::HasBeta) {
874 this->m_device.fill(buffer, buffer + m * n, Scalar(0));
877 for(
Index i2=0; i2<m; i2+=mc)
879 const Index actual_mc = numext::mini(i2+mc,m)-i2;
880 for (
Index k2 = k_start; k2 < k_end; k2 += kc) {
882 const Index actual_kc = numext::mini(k2 + kc, k_end) - k2;
883 kernel.packLhs(&blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
887 const Scalar alpha = Scalar(1);
888 const Scalar beta = (TensorContractionKernel::HasBeta && k2 == k_start)
893 for (
Index j2 = 0; j2 < n; j2 += nc) {
895 const Index actual_nc = numext::mini(j2 + nc, n) - j2;
896 kernel.packRhs(&blockB, rhs.getSubMapper(k2, j2), actual_kc,
901 const OutputMapper output_mapper = output.getSubMapper(i2, j2);
902 kernel.invoke(output_mapper, blockA, blockB, actual_mc, actual_kc,
903 actual_nc, alpha, beta);
906 if (use_output_kernel && k2 + kc >= k_end) {
907 m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2,
908 actual_mc, actual_nc);
914 kernel.deallocate(this->m_device, packed_mem);
917 EIGEN_STRONG_INLINE
void cleanup() {
918 m_leftImpl.cleanup();
919 m_rightImpl.cleanup();
921 if (m_result != NULL) {
922 m_device.deallocate(m_result);
927 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(
Index index)
const {
928 return m_result[index];
931 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool)
const {
932 return TensorOpCost(
sizeof(CoeffReturnType), 0, 0);
935 template<
int LoadMode>
936 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(
Index index)
const {
937 return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
940 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data()
const {
return m_result; }
943 Dimensions m_dimensions;
945 contract_t m_k_strides;
946 contract_t m_left_contracting_strides;
947 contract_t m_right_contracting_strides;
949 bool m_lhs_inner_dim_contiguous;
950 bool m_rhs_inner_dim_contiguous;
951 bool m_rhs_inner_dim_reordered;
953 left_nocontract_t m_i_strides;
954 right_nocontract_t m_j_strides;
955 left_nocontract_t m_left_nocontract_strides;
956 right_nocontract_t m_right_nocontract_strides;
962 TensorContractionParams m_tensor_contraction_params;
964 TensorEvaluator<EvalLeftArgType, Device> m_leftImpl;
965 TensorEvaluator<EvalRightArgType, Device> m_rightImpl;
966 const Device EIGEN_DEVICE_REF m_device;
967 OutputKernelType m_output_kernel;
968 EvaluatorPointerType m_result;
973 template<
typename Indices,
typename LeftArgType,
typename RightArgType,
typename OutputKernelType,
typename Device>
974 struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> :
975 public TensorContractionEvaluatorBase<
976 TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> > {
977 typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
978 typedef TensorContractionEvaluatorBase<Self> Base;
980 typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
981 typedef std::remove_const_t<typename XprType::Scalar> Scalar;
982 typedef typename XprType::Index
Index;
983 typedef typename XprType::CoeffReturnType CoeffReturnType;
984 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
986 static constexpr
int Layout = TensorEvaluator<LeftArgType, Device>::Layout;
992 typedef std::conditional_t<Layout == static_cast<int>(
ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
993 typedef std::conditional_t<Layout == static_cast<int>(
ColMajor), RightArgType, LeftArgType> EvalRightArgType;
995 static constexpr
int LDims =
996 internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
997 static constexpr
int RDims =
998 internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
999 static constexpr
int ContractDims = internal::array_size<Indices>::value;
1001 typedef array<Index, ContractDims> contract_t;
1002 typedef array<
Index, LDims - ContractDims> left_nocontract_t;
1003 typedef array<
Index, RDims - ContractDims> right_nocontract_t;
1005 static constexpr
int NumDims = LDims + RDims - 2 * ContractDims;
1008 typedef DSizes<Index, NumDims> Dimensions;
1010 TensorEvaluator(
const XprType& op,
const Device& device) :
1011 Base(op, device) { }
1013 template <
int Alignment>
1014 void evalProduct(Scalar* buffer)
const {
1015 TENSOR_CONTRACTION_DISPATCH(this->
template evalProductSequential, Alignment, (buffer));
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index