Eigen  3.4.90 (git rev 67eeba6e720c5745abc77ae6c92ce0a44aa7b7ae)
SparseAssign.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEASSIGN_H
11 #define EIGEN_SPARSEASSIGN_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
17 template<typename Derived>
18 template<typename OtherDerived>
19 Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
20 {
21  internal::call_assignment_no_alias(derived(), other.derived());
22  return derived();
23 }
24 
25 template<typename Derived>
26 template<typename OtherDerived>
27 Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
28 {
29  // TODO use the evaluator mechanism
30  other.evalTo(derived());
31  return derived();
32 }
33 
34 template<typename Derived>
35 template<typename OtherDerived>
36 inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
37 {
38  // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
39  internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
40  ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
41  return derived();
42 }
43 
44 template<typename Derived>
45 inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
46 {
47  internal::call_assignment_no_alias(derived(), other.derived());
48  return derived();
49 }
50 
51 namespace internal {
52 
53 template<>
54 struct storage_kind_to_evaluator_kind<Sparse> {
55  typedef IteratorBased Kind;
56 };
57 
58 template<>
59 struct storage_kind_to_shape<Sparse> {
60  typedef SparseShape Shape;
61 };
62 
63 struct Sparse2Sparse {};
64 struct Sparse2Dense {};
65 
66 template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
67 template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
68 template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
69 template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; };
70 
71 
72 template<typename DstXprType, typename SrcXprType>
73 void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
74 {
75  typedef typename DstXprType::Scalar Scalar;
76  typedef internal::evaluator<DstXprType> DstEvaluatorType;
77  typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
78 
79  SrcEvaluatorType srcEvaluator(src);
80 
81  const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
82  const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
83  if ((!transpose) && src.isRValue())
84  {
85  // eval without temporary
86  dst.resize(src.rows(), src.cols());
87  dst.setZero();
88  dst.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
89  for (Index j=0; j<outerEvaluationSize; ++j)
90  {
91  dst.startVec(j);
92  for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
93  {
94  Scalar v = it.value();
95  dst.insertBackByOuterInner(j,it.index()) = v;
96  }
97  }
98  dst.finalize();
99  }
100  else
101  {
102  // eval through a temporary
103  eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
104  (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
105  "the transpose operation is supposed to be handled in SparseMatrix::operator=");
106 
107  enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
108 
109 
110  DstXprType temp(src.rows(), src.cols());
111 
112  temp.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
113  for (Index j=0; j<outerEvaluationSize; ++j)
114  {
115  temp.startVec(j);
116  for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
117  {
118  Scalar v = it.value();
119  temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
120  }
121  }
122  temp.finalize();
123 
124  dst = temp.markAsRValue();
125  }
126 }
127 
128 // Generic Sparse to Sparse assignment
129 template< typename DstXprType, typename SrcXprType, typename Functor>
130 struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
131 {
132  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
133  {
134  assign_sparse_to_sparse(dst.derived(), src.derived());
135  }
136 };
137 
138 // Generic Sparse to Dense assignment
139 template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
140 struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Weak>
141 {
142  static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
143  {
144  if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
145  dst.setZero();
146 
147  internal::evaluator<SrcXprType> srcEval(src);
148  resize_if_allowed(dst, src, func);
149  internal::evaluator<DstXprType> dstEval(dst);
150 
151  const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
152  for (Index j=0; j<outerEvaluationSize; ++j)
153  for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
154  func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
155  }
156 };
157 
158 // Specialization for dense ?= dense +/- sparse and dense ?= sparse +/- dense
159 template<typename DstXprType, typename Func1, typename Func2>
160 struct assignment_from_dense_op_sparse
161 {
162  template<typename SrcXprType, typename InitialFunc>
163  static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
164  void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
165  {
166  #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
167  EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
168  #endif
169 
170  call_assignment_no_alias(dst, src.lhs(), Func1());
171  call_assignment_no_alias(dst, src.rhs(), Func2());
172  }
173 
174  // Specialization for dense1 = sparse + dense2; -> dense1 = dense2; dense1 += sparse;
175  template<typename Lhs, typename Rhs, typename Scalar>
176  static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
177  std::enable_if_t<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>
178  run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
179  const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
180  {
181  #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
182  EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
183  #endif
184 
185  // Apply the dense matrix first, then the sparse one.
186  call_assignment_no_alias(dst, src.rhs(), Func1());
187  call_assignment_no_alias(dst, src.lhs(), Func2());
188  }
189 
190  // Specialization for dense1 = sparse - dense2; -> dense1 = -dense2; dense1 += sparse;
191  template<typename Lhs, typename Rhs, typename Scalar>
192  static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
193  std::enable_if_t<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>
194  run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_difference_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
195  const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
196  {
197  #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
198  EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
199  #endif
200 
201  // Apply the dense matrix first, then the sparse one.
202  call_assignment_no_alias(dst, -src.rhs(), Func1());
203  call_assignment_no_alias(dst, src.lhs(), add_assign_op<typename DstXprType::Scalar,typename Lhs::Scalar>());
204  }
205 };
206 
207 #define EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(ASSIGN_OP,BINOP,ASSIGN_OP2) \
208  template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> \
209  struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<Scalar,Scalar>, const Lhs, const Rhs>, internal::ASSIGN_OP<typename DstXprType::Scalar,Scalar>, \
210  Sparse2Dense, \
211  std::enable_if_t< internal::is_same<typename internal::evaluator_traits<Lhs>::Shape,DenseShape>::value \
212  || internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>> \
213  : assignment_from_dense_op_sparse<DstXprType, internal::ASSIGN_OP<typename DstXprType::Scalar,typename Lhs::Scalar>, internal::ASSIGN_OP2<typename DstXprType::Scalar,typename Rhs::Scalar> > \
214  {}
215 
216 EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_sum_op,add_assign_op);
217 EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_sum_op,add_assign_op);
218 EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_sum_op,sub_assign_op);
219 
220 EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_difference_op,sub_assign_op);
221 EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_difference_op,sub_assign_op);
222 EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_difference_op,add_assign_op);
223 
224 
225 // Specialization for "dst = dec.solve(rhs)"
226 // NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
227 template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
228 struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
229 {
230  typedef Solve<DecType,RhsType> SrcXprType;
231  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
232  {
233  Index dstRows = src.rows();
234  Index dstCols = src.cols();
235  if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
236  dst.resize(dstRows, dstCols);
237 
238  src.dec()._solve_impl(src.rhs(), dst);
239  }
240 };
241 
242 struct Diagonal2Sparse {};
243 
244 template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
245 
246 template< typename DstXprType, typename SrcXprType, typename Functor>
247 struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
248 {
249  typedef typename DstXprType::StorageIndex StorageIndex;
250  typedef typename DstXprType::Scalar Scalar;
251 
252  template<int Options, typename AssignFunc>
253  static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const AssignFunc &func)
254  { dst.assignDiagonal(src.diagonal(), func); }
255 
256  template<typename DstDerived>
257  static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
258  { dst.derived().diagonal() = src.diagonal(); }
259 
260  template<typename DstDerived>
261  static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
262  { dst.derived().diagonal() += src.diagonal(); }
263 
264  template<typename DstDerived>
265  static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
266  { dst.derived().diagonal() -= src.diagonal(); }
267 };
268 } // end namespace internal
269 
270 } // end namespace Eigen
271 
272 #endif // EIGEN_SPARSEASSIGN_H
const unsigned int RowMajorBit
Definition: Constants.h:68
Namespace containing all symbols from the Eigen library.
Definition: Core:139
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:59