Eigen-unsupported  3.4.90 (git rev 67eeba6e720c5745abc77ae6c92ce0a44aa7b7ae)
TensorPadding.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
24 namespace internal {
25 template<typename PaddingDimensions, typename XprType>
26 struct traits<TensorPaddingOp<PaddingDimensions, XprType> > : public traits<XprType>
27 {
28  typedef typename XprType::Scalar Scalar;
29  typedef traits<XprType> XprTraits;
30  typedef typename XprTraits::StorageKind StorageKind;
31  typedef typename XprTraits::Index Index;
32  typedef typename XprType::Nested Nested;
33  typedef std::remove_reference_t<Nested> Nested_;
34  static constexpr int NumDimensions = XprTraits::NumDimensions;
35  static constexpr int Layout = XprTraits::Layout;
36  typedef typename XprTraits::PointerType PointerType;
37 };
38 
39 template<typename PaddingDimensions, typename XprType>
40 struct eval<TensorPaddingOp<PaddingDimensions, XprType>, Eigen::Dense>
41 {
42  typedef const TensorPaddingOp<PaddingDimensions, XprType>& type;
43 };
44 
45 template<typename PaddingDimensions, typename XprType>
46 struct nested<TensorPaddingOp<PaddingDimensions, XprType>, 1, typename eval<TensorPaddingOp<PaddingDimensions, XprType> >::type>
47 {
48  typedef TensorPaddingOp<PaddingDimensions, XprType> type;
49 };
50 
51 } // end namespace internal
52 
53 
54 
55 template<typename PaddingDimensions, typename XprType>
56 class TensorPaddingOp : public TensorBase<TensorPaddingOp<PaddingDimensions, XprType>, ReadOnlyAccessors>
57 {
58  public:
59  typedef typename Eigen::internal::traits<TensorPaddingOp>::Scalar Scalar;
60  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
61  typedef typename XprType::CoeffReturnType CoeffReturnType;
62  typedef typename Eigen::internal::nested<TensorPaddingOp>::type Nested;
63  typedef typename Eigen::internal::traits<TensorPaddingOp>::StorageKind StorageKind;
64  typedef typename Eigen::internal::traits<TensorPaddingOp>::Index Index;
65 
66  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType& expr, const PaddingDimensions& padding_dims, const Scalar padding_value)
67  : m_xpr(expr), m_padding_dims(padding_dims), m_padding_value(padding_value) {}
68 
69  EIGEN_DEVICE_FUNC
70  const PaddingDimensions& padding() const { return m_padding_dims; }
71  EIGEN_DEVICE_FUNC
72  Scalar padding_value() const { return m_padding_value; }
73 
74  EIGEN_DEVICE_FUNC
75  const internal::remove_all_t<typename XprType::Nested>&
76  expression() const { return m_xpr; }
77 
78  protected:
79  typename XprType::Nested m_xpr;
80  const PaddingDimensions m_padding_dims;
81  const Scalar m_padding_value;
82 };
83 
84 
85 // Eval as rvalue
86 template<typename PaddingDimensions, typename ArgType, typename Device>
87 struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device>
88 {
89  typedef TensorPaddingOp<PaddingDimensions, ArgType> XprType;
90  typedef typename XprType::Index Index;
91  static constexpr int NumDims = internal::array_size<PaddingDimensions>::value;
92  typedef DSizes<Index, NumDims> Dimensions;
93  typedef typename XprType::Scalar Scalar;
94  typedef typename XprType::CoeffReturnType CoeffReturnType;
95  typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
96  static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
97  typedef StorageMemory<CoeffReturnType, Device> Storage;
98  typedef typename Storage::Type EvaluatorPointerType;
99 
100  static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
101  enum {
102  IsAligned = true,
103  PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
104  BlockAccess = TensorEvaluator<ArgType, Device>::RawAccess,
105  PreferBlockAccess = true,
106  CoordAccess = true,
107  RawAccess = false
108  };
109 
110  typedef std::remove_const_t<Scalar> ScalarNoConst;
111 
112  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
113  typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
114  typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
115 
116  typedef typename internal::TensorMaterializedBlock<ScalarNoConst, NumDims,
117  Layout, Index>
118  TensorBlock;
119  //===--------------------------------------------------------------------===//
120 
121  EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
122  : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value()), m_device(device)
123  {
124  // The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead
125  // to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector
126  // of 1 element first and then pad.
127  EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
128 
129  // Compute dimensions
130  m_dimensions = m_impl.dimensions();
131  for (int i = 0; i < NumDims; ++i) {
132  m_dimensions[i] += m_padding[i].first + m_padding[i].second;
133  }
134  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
135  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
136  m_inputStrides[0] = 1;
137  m_outputStrides[0] = 1;
138  for (int i = 1; i < NumDims; ++i) {
139  m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
140  m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
141  }
142  m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1];
143  } else {
144  m_inputStrides[NumDims - 1] = 1;
145  m_outputStrides[NumDims] = 1;
146  for (int i = NumDims - 2; i >= 0; --i) {
147  m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
148  m_outputStrides[i+1] = m_outputStrides[i+2] * m_dimensions[i+1];
149  }
150  m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0];
151  }
152  }
153 
154  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
155 
156  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
157  m_impl.evalSubExprsIfNeeded(NULL);
158  return true;
159  }
160 
161 #ifdef EIGEN_USE_THREADS
162  template <typename EvalSubExprsCallback>
163  EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
164  EvaluatorPointerType, EvalSubExprsCallback done) {
165  m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
166  }
167 #endif // EIGEN_USE_THREADS
168 
169  EIGEN_STRONG_INLINE void cleanup() {
170  m_impl.cleanup();
171  }
172 
173  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
174  {
175  eigen_assert(index < dimensions().TotalSize());
176  Index inputIndex = 0;
177  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
178  EIGEN_UNROLL_LOOP
179  for (int i = NumDims - 1; i > 0; --i) {
180  const Index idx = index / m_outputStrides[i];
181  if (isPaddingAtIndexForDim(idx, i)) {
182  return m_paddingValue;
183  }
184  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
185  index -= idx * m_outputStrides[i];
186  }
187  if (isPaddingAtIndexForDim(index, 0)) {
188  return m_paddingValue;
189  }
190  inputIndex += (index - m_padding[0].first);
191  } else {
192  EIGEN_UNROLL_LOOP
193  for (int i = 0; i < NumDims - 1; ++i) {
194  const Index idx = index / m_outputStrides[i+1];
195  if (isPaddingAtIndexForDim(idx, i)) {
196  return m_paddingValue;
197  }
198  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
199  index -= idx * m_outputStrides[i+1];
200  }
201  if (isPaddingAtIndexForDim(index, NumDims-1)) {
202  return m_paddingValue;
203  }
204  inputIndex += (index - m_padding[NumDims-1].first);
205  }
206  return m_impl.coeff(inputIndex);
207  }
208 
209  template<int LoadMode>
210  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
211  {
212  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
213  return packetColMajor(index);
214  }
215  return packetRowMajor(index);
216  }
217 
218  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
219  TensorOpCost cost = m_impl.costPerCoeff(vectorized);
220  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
221  EIGEN_UNROLL_LOOP
222  for (int i = 0; i < NumDims; ++i)
223  updateCostPerDimension(cost, i, i == 0);
224  } else {
225  EIGEN_UNROLL_LOOP
226  for (int i = NumDims - 1; i >= 0; --i)
227  updateCostPerDimension(cost, i, i == NumDims - 1);
228  }
229  return cost;
230  }
231 
232  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
233  internal::TensorBlockResourceRequirements getResourceRequirements() const {
234  const size_t target_size = m_device.lastLevelCacheSize();
235  return internal::TensorBlockResourceRequirements::merge(
236  internal::TensorBlockResourceRequirements::skewed<Scalar>(target_size),
237  m_impl.getResourceRequirements());
238  }
239 
240  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
241  block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
242  bool /*root_of_expr_ast*/ = false) const {
243  // If one of the dimensions is zero, return empty block view.
244  if (desc.size() == 0) {
245  return TensorBlock(internal::TensorBlockKind::kView, NULL,
246  desc.dimensions());
247  }
248 
249  static const bool IsColMajor = Layout == static_cast<int>(ColMajor);
250  const int inner_dim_idx = IsColMajor ? 0 : NumDims - 1;
251 
252  Index offset = desc.offset();
253 
254  // Compute offsets in the output tensor corresponding to the desc.offset().
255  DSizes<Index, NumDims> output_offsets;
256  for (int i = NumDims - 1; i > 0; --i) {
257  const int dim = IsColMajor ? i : NumDims - i - 1;
258  const int stride_dim = IsColMajor ? dim : dim + 1;
259  output_offsets[dim] = offset / m_outputStrides[stride_dim];
260  offset -= output_offsets[dim] * m_outputStrides[stride_dim];
261  }
262  output_offsets[inner_dim_idx] = offset;
263 
264  // Offsets in the input corresponding to output offsets.
265  DSizes<Index, NumDims> input_offsets = output_offsets;
266  for (int i = 0; i < NumDims; ++i) {
267  const int dim = IsColMajor ? i : NumDims - i - 1;
268  input_offsets[dim] = input_offsets[dim] - m_padding[dim].first;
269  }
270 
271  // Compute offset in the input buffer (at this point it might be illegal and
272  // point outside of the input buffer, because we don't check for negative
273  // offsets, it will be autocorrected in the block iteration loop below).
274  Index input_offset = 0;
275  for (int i = 0; i < NumDims; ++i) {
276  const int dim = IsColMajor ? i : NumDims - i - 1;
277  input_offset += input_offsets[dim] * m_inputStrides[dim];
278  }
279 
280  // Destination buffer and scratch buffer both indexed from 0 and have the
281  // same dimensions as the requested block (for destination buffer this
282  // property is guaranteed by `desc.destination()`).
283  Index output_offset = 0;
284  const DSizes<Index, NumDims> output_strides =
285  internal::strides<Layout>(desc.dimensions());
286 
287  // NOTE(ezhulenev): We initialize bock iteration state for `NumDims - 1`
288  // dimensions, skipping innermost dimension. In theory it should be possible
289  // to squeeze matching innermost dimensions, however in practice that did
290  // not show any improvements in benchmarks. Also in practice first outer
291  // dimension usually has padding, and will prevent squeezing.
292 
293  // Initialize output block iterator state. Dimension in this array are
294  // always in inner_most -> outer_most order (col major layout).
295  array<BlockIteratorState, NumDims - 1> it;
296  for (int i = 0; i < NumDims - 1; ++i) {
297  const int dim = IsColMajor ? i + 1 : NumDims - i - 2;
298  it[i].count = 0;
299  it[i].size = desc.dimension(dim);
300 
301  it[i].input_stride = m_inputStrides[dim];
302  it[i].input_span = it[i].input_stride * (it[i].size - 1);
303 
304  it[i].output_stride = output_strides[dim];
305  it[i].output_span = it[i].output_stride * (it[i].size - 1);
306  }
307 
308  const Index input_inner_dim_size =
309  static_cast<Index>(m_impl.dimensions()[inner_dim_idx]);
310 
311  // Total output size.
312  const Index output_size = desc.size();
313 
314  // We will fill inner dimension of this size in the output. It might be
315  // larger than the inner dimension in the input, so we might have to pad
316  // before/after we copy values from the input inner dimension.
317  const Index output_inner_dim_size = desc.dimension(inner_dim_idx);
318 
319  // How many values to fill with padding BEFORE reading from the input inner
320  // dimension.
321  const Index output_inner_pad_before_size =
322  input_offsets[inner_dim_idx] < 0
323  ? numext::mini(numext::abs(input_offsets[inner_dim_idx]),
324  output_inner_dim_size)
325  : 0;
326 
327  // How many values we can actually copy from the input inner dimension.
328  const Index output_inner_copy_size = numext::mini(
329  // Want to copy from input.
330  (output_inner_dim_size - output_inner_pad_before_size),
331  // Can copy from input.
332  numext::maxi(input_inner_dim_size - (input_offsets[inner_dim_idx] +
333  output_inner_pad_before_size),
334  Index(0)));
335 
336  eigen_assert(output_inner_copy_size >= 0);
337 
338  // How many values to fill with padding AFTER reading from the input inner
339  // dimension.
340  const Index output_inner_pad_after_size =
341  (output_inner_dim_size - output_inner_copy_size -
342  output_inner_pad_before_size);
343 
344  // Sanity check, sum of all sizes must be equal to the output size.
345  eigen_assert(output_inner_dim_size ==
346  (output_inner_pad_before_size + output_inner_copy_size +
347  output_inner_pad_after_size));
348 
349  // Keep track of current coordinates and padding in the output.
350  DSizes<Index, NumDims> output_coord = output_offsets;
351  DSizes<Index, NumDims> output_padded;
352  for (int i = 0; i < NumDims; ++i) {
353  const int dim = IsColMajor ? i : NumDims - i - 1;
354  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
355  }
356 
357  typedef internal::StridedLinearBufferCopy<ScalarNoConst, Index> LinCopy;
358 
359  // Prepare storage for the materialized padding result.
360  const typename TensorBlock::Storage block_storage =
361  TensorBlock::prepareStorage(desc, scratch);
362 
363  // TODO(ezhulenev): Squeeze multiple non-padded inner dimensions into a
364  // single logical inner dimension.
365 
366  // When possible we squeeze writes for the innermost (only if non-padded)
367  // dimension with the first padded dimension. This allows to reduce the
368  // number of calls to LinCopy and better utilize vector instructions.
369  const bool squeeze_writes =
370  NumDims > 1 &&
371  // inner dimension is not padded
372  (input_inner_dim_size == m_dimensions[inner_dim_idx]) &&
373  // and equal to the block inner dimension
374  (input_inner_dim_size == output_inner_dim_size);
375 
376  const int squeeze_dim = IsColMajor ? inner_dim_idx + 1 : inner_dim_idx - 1;
377 
378  // Maximum coordinate on a squeeze dimension that we can write to.
379  const Index squeeze_max_coord =
380  squeeze_writes ? numext::mini(
381  // max non-padded element in the input
382  static_cast<Index>(m_dimensions[squeeze_dim] -
383  m_padding[squeeze_dim].second),
384  // max element in the output buffer
385  static_cast<Index>(output_offsets[squeeze_dim] +
386  desc.dimension(squeeze_dim)))
387  : static_cast<Index>(0);
388 
389  // Iterate copying data from `m_impl.data()` to the output buffer.
390  for (Index size = 0; size < output_size;) {
391  // Detect if we are in the padded region (exclude innermost dimension).
392  bool is_padded = false;
393  for (int j = 1; j < NumDims; ++j) {
394  const int dim = IsColMajor ? j : NumDims - j - 1;
395  is_padded = output_padded[dim];
396  if (is_padded) break;
397  }
398 
399  if (is_padded) {
400  // Fill single innermost dimension with padding value.
401  size += output_inner_dim_size;
402 
403  LinCopy::template Run<LinCopy::Kind::FillLinear>(
404  typename LinCopy::Dst(output_offset, 1, block_storage.data()),
405  typename LinCopy::Src(0, 0, &m_paddingValue),
406  output_inner_dim_size);
407 
408 
409  } else if (squeeze_writes) {
410  // Squeeze multiple reads from innermost dimensions.
411  const Index squeeze_num = squeeze_max_coord - output_coord[squeeze_dim];
412  size += output_inner_dim_size * squeeze_num;
413 
414  // Copy `squeeze_num` inner dimensions from input to output.
415  LinCopy::template Run<LinCopy::Kind::Linear>(
416  typename LinCopy::Dst(output_offset, 1, block_storage.data()),
417  typename LinCopy::Src(input_offset, 1, m_impl.data()),
418  output_inner_dim_size * squeeze_num);
419 
420  // Update iteration state for only `squeeze_num - 1` processed inner
421  // dimensions, because we have another iteration state update at the end
422  // of the loop that will update iteration state for the last inner
423  // processed dimension.
424  it[0].count += (squeeze_num - 1);
425  input_offset += it[0].input_stride * (squeeze_num - 1);
426  output_offset += it[0].output_stride * (squeeze_num - 1);
427  output_coord[squeeze_dim] += (squeeze_num - 1);
428 
429  } else {
430  // Single read from innermost dimension.
431  size += output_inner_dim_size;
432 
433  { // Fill with padding before copying from input inner dimension.
434  const Index out = output_offset;
435 
436  LinCopy::template Run<LinCopy::Kind::FillLinear>(
437  typename LinCopy::Dst(out, 1, block_storage.data()),
438  typename LinCopy::Src(0, 0, &m_paddingValue),
439  output_inner_pad_before_size);
440  }
441 
442  { // Copy data from input inner dimension.
443  const Index out = output_offset + output_inner_pad_before_size;
444  const Index in = input_offset + output_inner_pad_before_size;
445 
446  eigen_assert(output_inner_copy_size == 0 || m_impl.data() != NULL);
447 
448  LinCopy::template Run<LinCopy::Kind::Linear>(
449  typename LinCopy::Dst(out, 1, block_storage.data()),
450  typename LinCopy::Src(in, 1, m_impl.data()),
451  output_inner_copy_size);
452  }
453 
454  { // Fill with padding after copying from input inner dimension.
455  const Index out = output_offset + output_inner_pad_before_size +
456  output_inner_copy_size;
457 
458  LinCopy::template Run<LinCopy::Kind::FillLinear>(
459  typename LinCopy::Dst(out, 1, block_storage.data()),
460  typename LinCopy::Src(0, 0, &m_paddingValue),
461  output_inner_pad_after_size);
462  }
463  }
464 
465  for (int j = 0; j < NumDims - 1; ++j) {
466  const int dim = IsColMajor ? j + 1 : NumDims - j - 2;
467 
468  if (++it[j].count < it[j].size) {
469  input_offset += it[j].input_stride;
470  output_offset += it[j].output_stride;
471  output_coord[dim] += 1;
472  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
473  break;
474  }
475  it[j].count = 0;
476  input_offset -= it[j].input_span;
477  output_offset -= it[j].output_span;
478  output_coord[dim] -= it[j].size - 1;
479  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
480  }
481  }
482 
483  return block_storage.AsTensorMaterializedBlock();
484  }
485 
486  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return NULL; }
487 
488 #ifdef EIGEN_USE_SYCL
489  // binding placeholder accessors to a command group handler for SYCL
490  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
491  m_impl.bind(cgh);
492  }
493 #endif
494 
495  private:
496  struct BlockIteratorState {
497  BlockIteratorState()
498  : count(0),
499  size(0),
500  input_stride(0),
501  input_span(0),
502  output_stride(0),
503  output_span(0) {}
504 
505  Index count;
506  Index size;
507  Index input_stride;
508  Index input_span;
509  Index output_stride;
510  Index output_span;
511  };
512 
513  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim(
514  Index index, int dim_index) const {
515  return (!internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0) &&
516  index < m_padding[dim_index].first) ||
517  (!internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0) &&
518  index >= m_dimensions[dim_index] - m_padding[dim_index].second);
519  }
520 
521  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isLeftPaddingCompileTimeZero(
522  int dim_index) const {
523  return internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0);
524  }
525 
526  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isRightPaddingCompileTimeZero(
527  int dim_index) const {
528  return internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0);
529  }
530 
531 
532  void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const {
533  const double in = static_cast<double>(m_impl.dimensions()[i]);
534  const double out = in + m_padding[i].first + m_padding[i].second;
535  if (out == 0)
536  return;
537  const double reduction = in / out;
538  cost *= reduction;
539  if (first) {
540  cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() +
541  reduction * (1 * TensorOpCost::AddCost<Index>()));
542  } else {
543  cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() +
544  2 * TensorOpCost::MulCost<Index>() +
545  reduction * (2 * TensorOpCost::MulCost<Index>() +
546  1 * TensorOpCost::DivCost<Index>()));
547  }
548  }
549 
550  protected:
551 
552  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
553  {
554  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
555 
556  const Index initialIndex = index;
557  Index inputIndex = 0;
558  EIGEN_UNROLL_LOOP
559  for (int i = NumDims - 1; i > 0; --i) {
560  const Index firstIdx = index;
561  const Index lastIdx = index + PacketSize - 1;
562  const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i];
563  const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i];
564  const Index lastPaddedRight = m_outputStrides[i+1];
565 
566  if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
567  // all the coefficient are in the padding zone.
568  return internal::pset1<PacketReturnType>(m_paddingValue);
569  }
570  else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
571  // all the coefficient are in the padding zone.
572  return internal::pset1<PacketReturnType>(m_paddingValue);
573  }
574  else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
575  // all the coefficient are between the 2 padding zones.
576  const Index idx = index / m_outputStrides[i];
577  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
578  index -= idx * m_outputStrides[i];
579  }
580  else {
581  // Every other case
582  return packetWithPossibleZero(initialIndex);
583  }
584  }
585 
586  const Index lastIdx = index + PacketSize - 1;
587  const Index firstIdx = index;
588  const Index lastPaddedLeft = m_padding[0].first;
589  const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second);
590  const Index lastPaddedRight = m_outputStrides[1];
591 
592  if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) {
593  // all the coefficient are in the padding zone.
594  return internal::pset1<PacketReturnType>(m_paddingValue);
595  }
596  else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
597  // all the coefficient are in the padding zone.
598  return internal::pset1<PacketReturnType>(m_paddingValue);
599  }
600  else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
601  // all the coefficient are between the 2 padding zones.
602  inputIndex += (index - m_padding[0].first);
603  return m_impl.template packet<Unaligned>(inputIndex);
604  }
605  // Every other case
606  return packetWithPossibleZero(initialIndex);
607  }
608 
609  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
610  {
611  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
612 
613  const Index initialIndex = index;
614  Index inputIndex = 0;
615  EIGEN_UNROLL_LOOP
616  for (int i = 0; i < NumDims - 1; ++i) {
617  const Index firstIdx = index;
618  const Index lastIdx = index + PacketSize - 1;
619  const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1];
620  const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1];
621  const Index lastPaddedRight = m_outputStrides[i];
622 
623  if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
624  // all the coefficient are in the padding zone.
625  return internal::pset1<PacketReturnType>(m_paddingValue);
626  }
627  else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
628  // all the coefficient are in the padding zone.
629  return internal::pset1<PacketReturnType>(m_paddingValue);
630  }
631  else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
632  // all the coefficient are between the 2 padding zones.
633  const Index idx = index / m_outputStrides[i+1];
634  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
635  index -= idx * m_outputStrides[i+1];
636  }
637  else {
638  // Every other case
639  return packetWithPossibleZero(initialIndex);
640  }
641  }
642 
643  const Index lastIdx = index + PacketSize - 1;
644  const Index firstIdx = index;
645  const Index lastPaddedLeft = m_padding[NumDims-1].first;
646  const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second);
647  const Index lastPaddedRight = m_outputStrides[NumDims-1];
648 
649  if (!isLeftPaddingCompileTimeZero(NumDims-1) && lastIdx < lastPaddedLeft) {
650  // all the coefficient are in the padding zone.
651  return internal::pset1<PacketReturnType>(m_paddingValue);
652  }
653  else if (!isRightPaddingCompileTimeZero(NumDims-1) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
654  // all the coefficient are in the padding zone.
655  return internal::pset1<PacketReturnType>(m_paddingValue);
656  }
657  else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
658  // all the coefficient are between the 2 padding zones.
659  inputIndex += (index - m_padding[NumDims-1].first);
660  return m_impl.template packet<Unaligned>(inputIndex);
661  }
662  // Every other case
663  return packetWithPossibleZero(initialIndex);
664  }
665 
666  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
667  {
668  EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
669  EIGEN_UNROLL_LOOP
670  for (int i = 0; i < PacketSize; ++i) {
671  values[i] = coeff(index+i);
672  }
673  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
674  return rslt;
675  }
676 
677  Dimensions m_dimensions;
678  array<Index, NumDims+1> m_outputStrides;
679  array<Index, NumDims> m_inputStrides;
680  TensorEvaluator<ArgType, Device> m_impl;
681  PaddingDimensions m_padding;
682 
683  Scalar m_paddingValue;
684 
685  const Device EIGEN_DEVICE_REF m_device;
686 };
687 
688 
689 
690 
691 } // end namespace Eigen
692 
693 #endif // EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index