10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
13 #include "./InternalHeaderCheck.h"
25 template<
typename Generator,
typename XprType>
26 struct traits<TensorGeneratorOp<Generator, XprType> > :
public traits<XprType>
28 typedef typename XprType::Scalar Scalar;
29 typedef traits<XprType> XprTraits;
30 typedef typename XprTraits::StorageKind StorageKind;
31 typedef typename XprTraits::Index
Index;
32 typedef typename XprType::Nested Nested;
33 typedef std::remove_reference_t<Nested> Nested_;
34 static constexpr
int NumDimensions = XprTraits::NumDimensions;
35 static constexpr
int Layout = XprTraits::Layout;
36 typedef typename XprTraits::PointerType PointerType;
39 template<
typename Generator,
typename XprType>
40 struct eval<TensorGeneratorOp<Generator, XprType>,
Eigen::Dense>
42 typedef const TensorGeneratorOp<Generator, XprType>& type;
45 template<
typename Generator,
typename XprType>
46 struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type>
48 typedef TensorGeneratorOp<Generator, XprType> type;
55 template<
typename Generator,
typename XprType>
59 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
61 typedef typename XprType::CoeffReturnType CoeffReturnType;
62 typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
63 typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
64 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
66 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorGeneratorOp(
const XprType& expr,
const Generator& generator)
67 : m_xpr(expr), m_generator(generator) {}
70 const Generator& generator()
const {
return m_generator; }
73 const internal::remove_all_t<typename XprType::Nested>&
74 expression()
const {
return m_xpr; }
77 typename XprType::Nested m_xpr;
78 const Generator m_generator;
83 template<
typename Generator,
typename ArgType,
typename Device>
87 typedef typename XprType::Index
Index;
89 static constexpr
int NumDims = internal::array_size<Dimensions>::value;
90 typedef typename XprType::Scalar Scalar;
91 typedef typename XprType::CoeffReturnType CoeffReturnType;
92 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
93 typedef StorageMemory<CoeffReturnType, Device> Storage;
94 typedef typename Storage::Type EvaluatorPointerType;
98 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
100 PreferBlockAccess =
true,
105 typedef internal::TensorIntDivisor<Index> IndexDivisor;
108 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
109 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
111 typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
116 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
117 : m_device(device), m_generator(op.generator())
119 TensorEvaluator<ArgType, Device> argImpl(op.expression(), device);
120 m_dimensions = argImpl.dimensions();
122 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
125 for (
int i = 1; i < NumDims; ++i) {
126 m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
127 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
130 m_strides[NumDims - 1] = 1;
132 for (
int i = NumDims - 2; i >= 0; --i) {
133 m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
134 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
139 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
141 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
144 EIGEN_STRONG_INLINE
void cleanup() {
147 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
149 array<Index, NumDims> coords;
150 extract_coordinates(index, coords);
151 return m_generator(coords);
154 template<
int LoadMode>
155 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const
157 const int packetSize = PacketType<CoeffReturnType, Device>::size;
158 eigen_assert(index+packetSize-1 < dimensions().TotalSize());
160 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[packetSize];
161 for (
int i = 0; i < packetSize; ++i) {
162 values[i] = coeff(index+i);
164 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
168 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
169 internal::TensorBlockResourceRequirements getResourceRequirements()
const {
170 const size_t target_size = m_device.firstLevelCacheSize();
172 return internal::TensorBlockResourceRequirements::skewed<Scalar>(
176 struct BlockIteratorState {
183 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
184 block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
185 bool =
false)
const {
186 static const bool is_col_major =
187 static_cast<int>(Layout) ==
static_cast<int>(
ColMajor);
190 array<Index, NumDims> coords;
191 extract_coordinates(desc.offset(), coords);
192 array<Index, NumDims> initial_coords = coords;
199 array<BlockIteratorState, NumDims> it;
200 for (
int i = 0; i < NumDims; ++i) {
201 const int dim = is_col_major ? i : NumDims - 1 - i;
202 it[i].size = desc.dimension(dim);
203 it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride);
204 it[i].span = it[i].stride * (it[i].size - 1);
207 eigen_assert(it[0].stride == 1);
210 const typename TensorBlock::Storage block_storage =
211 TensorBlock::prepareStorage(desc, scratch);
213 CoeffReturnType* block_buffer = block_storage.data();
215 static const int packet_size = PacketType<CoeffReturnType, Device>::size;
217 static const int inner_dim = is_col_major ? 0 : NumDims - 1;
218 const Index inner_dim_size = it[0].size;
219 const Index inner_dim_vectorized = inner_dim_size - packet_size;
221 while (it[NumDims - 1].count < it[NumDims - 1].size) {
224 for (; i <= inner_dim_vectorized; i += packet_size) {
225 for (Index j = 0; j < packet_size; ++j) {
226 array<Index, NumDims> j_coords = coords;
227 j_coords[inner_dim] += j;
228 *(block_buffer + offset + i + j) = m_generator(j_coords);
230 coords[inner_dim] += packet_size;
233 for (; i < inner_dim_size; ++i) {
234 *(block_buffer + offset + i) = m_generator(coords);
237 coords[inner_dim] = initial_coords[inner_dim];
240 if (NumDims == 1)
break;
243 for (i = 1; i < NumDims; ++i) {
244 if (++it[i].count < it[i].size) {
245 offset += it[i].stride;
246 coords[is_col_major ? i : NumDims - 1 - i]++;
249 if (i != NumDims - 1) it[i].count = 0;
250 coords[is_col_major ? i : NumDims - 1 - i] =
251 initial_coords[is_col_major ? i : NumDims - 1 - i];
252 offset -= it[i].span;
256 return block_storage.AsTensorMaterializedBlock();
259 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
260 costPerCoeff(
bool)
const {
263 return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() +
264 TensorOpCost::MulCost<Scalar>());
267 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return NULL; }
269 #ifdef EIGEN_USE_SYCL
271 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler&)
const {}
275 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
276 void extract_coordinates(Index index, array<Index, NumDims>& coords)
const {
277 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
278 for (
int i = NumDims - 1; i > 0; --i) {
279 const Index idx = index / m_fast_strides[i];
280 index -= idx * m_strides[i];
285 for (
int i = 0; i < NumDims - 1; ++i) {
286 const Index idx = index / m_fast_strides[i];
287 index -= idx * m_strides[i];
290 coords[NumDims-1] = index;
294 const Device EIGEN_DEVICE_REF m_device;
295 Dimensions m_dimensions;
296 array<Index, NumDims> m_strides;
297 array<IndexDivisor, NumDims> m_fast_strides;
298 Generator m_generator;
The tensor base class.
Definition: TensorForwardDeclarations.h:58
Tensor generator class.
Definition: TensorGenerator.h:57
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:31