Reference documentation for deal.II version 9.1.0-pre
trilinos_sparse_matrix.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_trilinos_sparse_matrix_h
17 # define dealii_trilinos_sparse_matrix_h
18 
19 
20 # include <deal.II/base/config.h>
21 
22 # ifdef DEAL_II_WITH_TRILINOS
23 
24 # include <deal.II/base/index_set.h>
25 # include <deal.II/base/subscriptor.h>
26 
27 # include <deal.II/lac/exceptions.h>
28 # include <deal.II/lac/full_matrix.h>
29 # include <deal.II/lac/trilinos_epetra_vector.h>
30 # include <deal.II/lac/trilinos_vector.h>
31 # include <deal.II/lac/vector_memory.h>
32 # include <deal.II/lac/vector_operation.h>
33 
34 # include <Epetra_Comm.h>
35 # include <Epetra_CrsGraph.h>
36 # include <Epetra_Export.h>
37 # include <Epetra_FECrsMatrix.h>
38 # include <Epetra_Map.h>
39 # include <Epetra_MultiVector.h>
40 # include <Epetra_Operator.h>
41 
42 # include <cmath>
43 # include <memory>
44 # include <type_traits>
45 # include <vector>
46 # ifdef DEAL_II_WITH_MPI
47 # include <Epetra_MpiComm.h>
48 # include <mpi.h>
49 # else
50 # include <Epetra_SerialComm.h>
51 # endif
52 
53 DEAL_II_NAMESPACE_OPEN
54 
55 // forward declarations
56 template <typename MatrixType>
57 class BlockMatrixBase;
58 
59 template <typename number>
60 class SparseMatrix;
61 class SparsityPattern;
63 
64 
65 
66 namespace TrilinosWrappers
67 {
68  // forward declarations
69  class SparseMatrix;
70  class SparsityPattern;
71 
76  {
77  // forward declaration
78  template <bool Constness>
79  class Iterator;
80 
85 
90  std::size_t,
91  std::size_t,
92  std::size_t,
93  << "You tried to access row " << arg1
94  << " of a distributed sparsity pattern, "
95  << " but only rows " << arg2 << " through " << arg3
96  << " are stored locally and can be accessed.");
97 
109  {
110  public:
115 
120  const size_type row,
121  const size_type index);
122 
126  size_type
127  row() const;
128 
132  size_type
133  index() const;
134 
138  size_type
139  column() const;
140 
141  protected:
153 
158 
164  void
166 
179  std::shared_ptr<std::vector<size_type>> colnum_cache;
180 
184  std::shared_ptr<std::vector<TrilinosScalar>> value_cache;
185  };
186 
197  template <bool Constess>
198  class Accessor : public AccessorBase
199  {
203  TrilinosScalar
204  value() const;
205 
209  TrilinosScalar &
210  value();
211  };
212 
216  template <>
217  class Accessor<true> : public AccessorBase
218  {
219  public:
224  using MatrixType = const SparseMatrix;
225 
231 
236  template <bool Other>
237  Accessor(const Accessor<Other> &a);
238 
242  TrilinosScalar
243  value() const;
244 
245  private:
249  template <bool>
250  friend class Iterator;
251  };
252 
256  template <>
257  class Accessor<false> : public AccessorBase
258  {
259  class Reference
260  {
261  public:
265  Reference(const Accessor<false> &accessor);
266 
270  operator TrilinosScalar() const;
271 
275  const Reference &
276  operator=(const TrilinosScalar n) const;
277 
281  const Reference &
282  operator+=(const TrilinosScalar n) const;
283 
287  const Reference &
288  operator-=(const TrilinosScalar n) const;
289 
293  const Reference &
294  operator*=(const TrilinosScalar n) const;
295 
299  const Reference &
300  operator/=(const TrilinosScalar n) const;
301 
302  private:
307  Accessor &accessor;
308  };
309 
310  public:
316 
322 
326  Reference
327  value() const;
328 
329  private:
333  template <bool>
334  friend class Iterator;
338  friend class Reference;
339  };
340 
355  template <bool Constness>
356  class Iterator
357  {
358  public:
363 
369 
375 
379  template <bool Other>
380  Iterator(const Iterator<Other> &other);
381 
386  operator++();
387 
392  operator++(int);
393 
397  const Accessor<Constness> &operator*() const;
398 
402  const Accessor<Constness> *operator->() const;
403 
408  bool
409  operator==(const Iterator<Constness> &) const;
410 
414  bool
415  operator!=(const Iterator<Constness> &) const;
416 
422  bool
423  operator<(const Iterator<Constness> &) const;
424 
428  bool
429  operator>(const Iterator<Constness> &) const;
430 
434  DeclException2(ExcInvalidIndexWithinRow,
435  size_type,
436  size_type,
437  << "Attempt to access element " << arg2 << " of row "
438  << arg1 << " which doesn't have that many elements.");
439 
440  private:
445 
446  template <bool Other>
447  friend class Iterator;
448  };
449 
450  } // namespace SparseMatrixIterators
451 
452 
514  class SparseMatrix : public Subscriptor
515  {
516  public:
521 
526  std::size_t,
527  << "You tried to access row " << arg1
528  << " of a non-contiguous locally owned row set."
529  << " The row " << arg1
530  << " is not stored locally and can't be accessed.");
531 
539  struct Traits
540  {
545  static const bool zero_addition_can_be_elided = true;
546  };
547 
552 
557 
561  using value_type = TrilinosScalar;
562 
570  SparseMatrix();
571 
579  SparseMatrix(const size_type m,
580  const size_type n,
581  const unsigned int n_max_entries_per_row);
582 
590  SparseMatrix(const size_type m,
591  const size_type n,
592  const std::vector<unsigned int> &n_entries_per_row);
593 
597  SparseMatrix(const SparsityPattern &InputSparsityPattern);
598 
603  SparseMatrix(SparseMatrix &&other) noexcept;
604 
608  SparseMatrix(const SparseMatrix &) = delete;
609 
613  SparseMatrix &
614  operator=(const SparseMatrix &) = delete;
615 
619  virtual ~SparseMatrix() override = default;
620 
636  template <typename SparsityPatternType>
637  void
638  reinit(const SparsityPatternType &sparsity_pattern);
639 
652  void
653  reinit(const SparsityPattern &sparsity_pattern);
654 
663  void
664  reinit(const SparseMatrix &sparse_matrix);
665 
686  template <typename number>
687  void
688  reinit(const ::SparseMatrix<number> &dealii_sparse_matrix,
689  const double drop_tolerance = 1e-13,
690  const bool copy_values = true,
691  const ::SparsityPattern * use_this_sparsity = nullptr);
692 
698  void
699  reinit(const Epetra_CrsMatrix &input_matrix, const bool copy_values = true);
701 
719  DEAL_II_DEPRECATED
720  SparseMatrix(const Epetra_Map &parallel_partitioning,
721  const size_type n_max_entries_per_row = 0);
722 
732  DEAL_II_DEPRECATED
733  SparseMatrix(const Epetra_Map & parallel_partitioning,
734  const std::vector<unsigned int> &n_entries_per_row);
735 
754  DEAL_II_DEPRECATED
755  SparseMatrix(const Epetra_Map &row_parallel_partitioning,
756  const Epetra_Map &col_parallel_partitioning,
757  const size_type n_max_entries_per_row = 0);
758 
775  DEAL_II_DEPRECATED
776  SparseMatrix(const Epetra_Map & row_parallel_partitioning,
777  const Epetra_Map & col_parallel_partitioning,
778  const std::vector<unsigned int> &n_entries_per_row);
779 
806  template <typename SparsityPatternType>
807  DEAL_II_DEPRECATED void
808  reinit(const Epetra_Map & parallel_partitioning,
809  const SparsityPatternType &sparsity_pattern,
810  const bool exchange_data = false);
811 
826  template <typename SparsityPatternType>
827  DEAL_II_DEPRECATED void
828  reinit(const Epetra_Map & row_parallel_partitioning,
829  const Epetra_Map & col_parallel_partitioning,
830  const SparsityPatternType &sparsity_pattern,
831  const bool exchange_data = false);
832 
851  template <typename number>
852  DEAL_II_DEPRECATED void
853  reinit(const Epetra_Map & parallel_partitioning,
854  const ::SparseMatrix<number> &dealii_sparse_matrix,
855  const double drop_tolerance = 1e-13,
856  const bool copy_values = true,
857  const ::SparsityPattern * use_this_sparsity = nullptr);
858 
874  template <typename number>
875  DEAL_II_DEPRECATED void
876  reinit(const Epetra_Map & row_parallel_partitioning,
877  const Epetra_Map & col_parallel_partitioning,
878  const ::SparseMatrix<number> &dealii_sparse_matrix,
879  const double drop_tolerance = 1e-13,
880  const bool copy_values = true,
881  const ::SparsityPattern * use_this_sparsity = nullptr);
883 
899  SparseMatrix(const IndexSet & parallel_partitioning,
900  const MPI_Comm & communicator = MPI_COMM_WORLD,
901  const unsigned int n_max_entries_per_row = 0);
902 
910  SparseMatrix(const IndexSet & parallel_partitioning,
911  const MPI_Comm & communicator,
912  const std::vector<unsigned int> &n_entries_per_row);
913 
928  SparseMatrix(const IndexSet &row_parallel_partitioning,
929  const IndexSet &col_parallel_partitioning,
930  const MPI_Comm &communicator = MPI_COMM_WORLD,
931  const size_type n_max_entries_per_row = 0);
932 
947  SparseMatrix(const IndexSet & row_parallel_partitioning,
948  const IndexSet & col_parallel_partitioning,
949  const MPI_Comm & communicator,
950  const std::vector<unsigned int> &n_entries_per_row);
951 
972  template <typename SparsityPatternType>
973  void
974  reinit(const IndexSet & parallel_partitioning,
975  const SparsityPatternType &sparsity_pattern,
976  const MPI_Comm & communicator = MPI_COMM_WORLD,
977  const bool exchange_data = false);
978 
991  template <typename SparsityPatternType>
992  void
993  reinit(const IndexSet & row_parallel_partitioning,
994  const IndexSet & col_parallel_partitioning,
995  const SparsityPatternType &sparsity_pattern,
996  const MPI_Comm & communicator = MPI_COMM_WORLD,
997  const bool exchange_data = false);
998 
1015  template <typename number>
1016  void
1017  reinit(const IndexSet & parallel_partitioning,
1018  const ::SparseMatrix<number> &dealii_sparse_matrix,
1019  const MPI_Comm & communicator = MPI_COMM_WORLD,
1020  const double drop_tolerance = 1e-13,
1021  const bool copy_values = true,
1022  const ::SparsityPattern * use_this_sparsity = nullptr);
1023 
1037  template <typename number>
1038  void
1039  reinit(const IndexSet & row_parallel_partitioning,
1040  const IndexSet & col_parallel_partitioning,
1041  const ::SparseMatrix<number> &dealii_sparse_matrix,
1042  const MPI_Comm & communicator = MPI_COMM_WORLD,
1043  const double drop_tolerance = 1e-13,
1044  const bool copy_values = true,
1045  const ::SparsityPattern * use_this_sparsity = nullptr);
1047 
1051 
1055  size_type
1056  m() const;
1057 
1061  size_type
1062  n() const;
1063 
1072  unsigned int
1073  local_size() const;
1074 
1083  std::pair<size_type, size_type>
1084  local_range() const;
1085 
1090  bool
1091  in_local_range(const size_type index) const;
1092 
1097  size_type
1098  n_nonzero_elements() const;
1099 
1103  unsigned int
1104  row_length(const size_type row) const;
1105 
1112  bool
1113  is_compressed() const;
1114 
1120  size_type
1121  memory_consumption() const;
1122 
1126  MPI_Comm
1127  get_mpi_communicator() const;
1128 
1130 
1134 
1144  SparseMatrix &
1145  operator=(const double d);
1146 
1154  void
1155  clear();
1156 
1184  void
1185  compress(::VectorOperation::values operation);
1186 
1208  void
1209  set(const size_type i, const size_type j, const TrilinosScalar value);
1210 
1243  void
1244  set(const std::vector<size_type> & indices,
1245  const FullMatrix<TrilinosScalar> &full_matrix,
1246  const bool elide_zero_values = false);
1247 
1253  void
1254  set(const std::vector<size_type> & row_indices,
1255  const std::vector<size_type> & col_indices,
1256  const FullMatrix<TrilinosScalar> &full_matrix,
1257  const bool elide_zero_values = false);
1258 
1286  void
1287  set(const size_type row,
1288  const std::vector<size_type> & col_indices,
1289  const std::vector<TrilinosScalar> &values,
1290  const bool elide_zero_values = false);
1291 
1319  void
1320  set(const size_type row,
1321  const size_type n_cols,
1322  const size_type * col_indices,
1323  const TrilinosScalar *values,
1324  const bool elide_zero_values = false);
1325 
1335  void
1336  add(const size_type i, const size_type j, const TrilinosScalar value);
1337 
1356  void
1357  add(const std::vector<size_type> & indices,
1358  const FullMatrix<TrilinosScalar> &full_matrix,
1359  const bool elide_zero_values = true);
1360 
1366  void
1367  add(const std::vector<size_type> & row_indices,
1368  const std::vector<size_type> & col_indices,
1369  const FullMatrix<TrilinosScalar> &full_matrix,
1370  const bool elide_zero_values = true);
1371 
1385  void
1386  add(const size_type row,
1387  const std::vector<size_type> & col_indices,
1388  const std::vector<TrilinosScalar> &values,
1389  const bool elide_zero_values = true);
1390 
1404  void
1405  add(const size_type row,
1406  const size_type n_cols,
1407  const size_type * col_indices,
1408  const TrilinosScalar *values,
1409  const bool elide_zero_values = true,
1410  const bool col_indices_are_sorted = false);
1411 
1415  SparseMatrix &
1416  operator*=(const TrilinosScalar factor);
1417 
1421  SparseMatrix &
1422  operator/=(const TrilinosScalar factor);
1423 
1427  void
1428  copy_from(const SparseMatrix &source);
1429 
1437  void
1438  add(const TrilinosScalar factor, const SparseMatrix &matrix);
1439 
1466  void
1467  clear_row(const size_type row, const TrilinosScalar new_diag_value = 0);
1468 
1489  void
1490  clear_rows(const std::vector<size_type> &rows,
1491  const TrilinosScalar new_diag_value = 0);
1492 
1502  void
1503  transpose();
1504 
1506 
1510 
1519  TrilinosScalar
1520  operator()(const size_type i, const size_type j) const;
1521 
1538  TrilinosScalar
1539  el(const size_type i, const size_type j) const;
1540 
1547  TrilinosScalar
1548  diag_element(const size_type i) const;
1549 
1551 
1555 
1577  template <typename VectorType>
1578  void
1579  vmult(VectorType &dst, const VectorType &src) const;
1580 
1603  template <typename VectorType>
1604  void
1605  Tvmult(VectorType &dst, const VectorType &src) const;
1606 
1629  template <typename VectorType>
1630  void
1631  vmult_add(VectorType &dst, const VectorType &src) const;
1632 
1655  template <typename VectorType>
1656  void
1657  Tvmult_add(VectorType &dst, const VectorType &src) const;
1658 
1680  TrilinosScalar
1681  matrix_norm_square(const MPI::Vector &v) const;
1682 
1702  TrilinosScalar
1703  matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const;
1704 
1721  TrilinosScalar
1722  residual(MPI::Vector & dst,
1723  const MPI::Vector &x,
1724  const MPI::Vector &b) const;
1725 
1740  void
1741  mmult(SparseMatrix & C,
1742  const SparseMatrix &B,
1743  const MPI::Vector & V = MPI::Vector()) const;
1744 
1745 
1762  void
1763  Tmmult(SparseMatrix & C,
1764  const SparseMatrix &B,
1765  const MPI::Vector & V = MPI::Vector()) const;
1766 
1768 
1772 
1780  TrilinosScalar
1781  l1_norm() const;
1782 
1791  TrilinosScalar
1792  linfty_norm() const;
1793 
1798  TrilinosScalar
1799  frobenius_norm() const;
1800 
1802 
1806 
1811  const Epetra_CrsMatrix &
1812  trilinos_matrix() const;
1813 
1818  const Epetra_CrsGraph &
1819  trilinos_sparsity_pattern() const;
1820 
1828  DEAL_II_DEPRECATED
1829  const Epetra_Map &
1830  domain_partitioner() const;
1831 
1840  DEAL_II_DEPRECATED
1841  const Epetra_Map &
1842  range_partitioner() const;
1843 
1851  DEAL_II_DEPRECATED
1852  const Epetra_Map &
1853  row_partitioner() const;
1854 
1864  DEAL_II_DEPRECATED
1865  const Epetra_Map &
1866  col_partitioner() const;
1868 
1873 
1878  IndexSet
1879  locally_owned_domain_indices() const;
1880 
1886  IndexSet
1887  locally_owned_range_indices() const;
1888 
1890 
1895 
1915  begin() const;
1916 
1920  iterator
1921  begin();
1922 
1928  end() const;
1929 
1933  iterator
1934  end();
1935 
1965  begin(const size_type r) const;
1966 
1970  iterator
1971  begin(const size_type r);
1972 
1983  end(const size_type r) const;
1984 
1988  iterator
1989  end(const size_type r);
1990 
1992 
1996 
2002  void
2003  write_ascii();
2004 
2012  void
2013  print(std::ostream &out,
2014  const bool write_extended_trilinos_info = false) const;
2015 
2017 
2026  int,
2027  << "An error with error number " << arg1
2028  << " occurred while calling a Trilinos function");
2029 
2033  DeclException2(ExcInvalidIndex,
2034  size_type,
2035  size_type,
2036  << "The entry with index <" << arg1 << ',' << arg2
2037  << "> does not exist.");
2038 
2042  DeclExceptionMsg(ExcSourceEqualsDestination,
2043  "You are attempting an operation on two matrices that "
2044  "are the same object, but the operation requires that the "
2045  "two objects are in fact different.");
2046 
2050  DeclException0(ExcMatrixNotCompressed);
2051 
2055  DeclException4(ExcAccessToNonLocalElement,
2056  size_type,
2057  size_type,
2058  size_type,
2059  size_type,
2060  << "You tried to access element (" << arg1 << "/" << arg2
2061  << ")"
2062  << " of a distributed matrix, but only rows " << arg3
2063  << " through " << arg4
2064  << " are stored locally and can be accessed.");
2065 
2069  DeclException2(ExcAccessToNonPresentElement,
2070  size_type,
2071  size_type,
2072  << "You tried to access element (" << arg1 << "/" << arg2
2073  << ")"
2074  << " of a sparse matrix, but it appears to not"
2075  << " exist in the Trilinos sparsity pattern.");
2077 
2078 
2079 
2080  protected:
2091  void
2092  prepare_add();
2093 
2099  void
2100  prepare_set();
2101 
2102 
2103 
2104  private:
2109  std::unique_ptr<Epetra_Map> column_space_map;
2110 
2116  std::unique_ptr<Epetra_FECrsMatrix> matrix;
2117 
2123  std::unique_ptr<Epetra_CrsMatrix> nonlocal_matrix;
2124 
2128  std::unique_ptr<Epetra_Export> nonlocal_matrix_exporter;
2129 
2141  Epetra_CombineMode last_action;
2142 
2147  bool compressed;
2148 
2153  };
2154 
2155 
2156 
2157  // forwards declarations
2158  class SolverBase;
2159  class PreconditionBase;
2160 
2161  namespace internal
2162  {
2163  inline void
2164  check_vector_map_equality(const Epetra_CrsMatrix & mtrx,
2165  const Epetra_MultiVector &src,
2166  const Epetra_MultiVector &dst,
2167  const bool transpose)
2168  {
2169  if (transpose == false)
2170  {
2171  Assert(src.Map().SameAs(mtrx.DomainMap()) == true,
2172  ExcMessage(
2173  "Column map of matrix does not fit with vector map!"));
2174  Assert(dst.Map().SameAs(mtrx.RangeMap()) == true,
2175  ExcMessage("Row map of matrix does not fit with vector map!"));
2176  }
2177  else
2178  {
2179  Assert(src.Map().SameAs(mtrx.RangeMap()) == true,
2180  ExcMessage(
2181  "Column map of matrix does not fit with vector map!"));
2182  Assert(dst.Map().SameAs(mtrx.DomainMap()) == true,
2183  ExcMessage("Row map of matrix does not fit with vector map!"));
2184  }
2185  (void)mtrx; // removes -Wunused-variable in optimized mode
2186  (void)src;
2187  (void)dst;
2188  }
2189 
2190  inline void
2191  check_vector_map_equality(const Epetra_Operator & op,
2192  const Epetra_MultiVector &src,
2193  const Epetra_MultiVector &dst,
2194  const bool transpose)
2195  {
2196  if (transpose == false)
2197  {
2198  Assert(src.Map().SameAs(op.OperatorDomainMap()) == true,
2199  ExcMessage(
2200  "Column map of operator does not fit with vector map!"));
2201  Assert(dst.Map().SameAs(op.OperatorRangeMap()) == true,
2202  ExcMessage(
2203  "Row map of operator does not fit with vector map!"));
2204  }
2205  else
2206  {
2207  Assert(src.Map().SameAs(op.OperatorRangeMap()) == true,
2208  ExcMessage(
2209  "Column map of operator does not fit with vector map!"));
2210  Assert(dst.Map().SameAs(op.OperatorDomainMap()) == true,
2211  ExcMessage(
2212  "Row map of operator does not fit with vector map!"));
2213  }
2214  (void)op; // removes -Wunused-variable in optimized mode
2215  (void)src;
2216  (void)dst;
2217  }
2218 
2219  namespace LinearOperatorImplementation
2220  {
2241  class TrilinosPayload : public Epetra_Operator
2242  {
2243  public:
2247  using VectorType = Epetra_MultiVector;
2248 
2253 
2258 
2263 
2271  TrilinosPayload();
2272 
2276  TrilinosPayload(const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2277  const TrilinosWrappers::SparseMatrix &matrix);
2278 
2283  const TrilinosWrappers::SparseMatrix & matrix_exemplar,
2284  const TrilinosWrappers::PreconditionBase &preconditioner);
2285 
2290  const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2291  const TrilinosWrappers::PreconditionBase &preconditioner);
2292 
2296  TrilinosPayload(const TrilinosPayload &payload);
2297 
2305  TrilinosPayload(const TrilinosPayload &first_op,
2306  const TrilinosPayload &second_op);
2307 
2311  virtual ~TrilinosPayload() override = default;
2312 
2317  identity_payload() const;
2318 
2323  null_payload() const;
2324 
2329  transpose_payload() const;
2330 
2347  template <typename Solver, typename Preconditioner>
2348  typename std::enable_if<
2349  std::is_base_of<TrilinosWrappers::SolverBase, Solver>::value &&
2350  std::is_base_of<TrilinosWrappers::PreconditionBase,
2351  Preconditioner>::value,
2352  TrilinosPayload>::type
2353  inverse_payload(Solver &, const Preconditioner &) const;
2354 
2372  template <typename Solver, typename Preconditioner>
2373  typename std::enable_if<
2374  !(std::is_base_of<TrilinosWrappers::SolverBase, Solver>::value &&
2375  std::is_base_of<TrilinosWrappers::PreconditionBase,
2376  Preconditioner>::value),
2377  TrilinosPayload>::type
2378  inverse_payload(Solver &, const Preconditioner &) const;
2379 
2381 
2386 
2392  IndexSet
2393  locally_owned_domain_indices() const;
2394 
2400  IndexSet
2401  locally_owned_range_indices() const;
2402 
2406  MPI_Comm
2407  get_mpi_communicator() const;
2408 
2415  void
2416  transpose();
2417 
2425  std::function<void(VectorType &, const VectorType &)> vmult;
2426 
2434  std::function<void(VectorType &, const VectorType &)> Tvmult;
2435 
2444  std::function<void(VectorType &, const VectorType &)> inv_vmult;
2445 
2454  std::function<void(VectorType &, const VectorType &)> inv_Tvmult;
2455 
2457 
2462 
2469  virtual bool
2470  UseTranspose() const override;
2471 
2487  virtual int
2488  SetUseTranspose(bool UseTranspose) override;
2489 
2501  virtual int
2502  Apply(const VectorType &X, VectorType &Y) const override;
2503 
2522  virtual int
2523  ApplyInverse(const VectorType &Y, VectorType &X) const override;
2525 
2530 
2537  virtual const char *
2538  Label() const override;
2539 
2547  virtual const Epetra_Comm &
2548  Comm() const override;
2549 
2557  virtual const Epetra_Map &
2558  OperatorDomainMap() const override;
2559 
2568  virtual const Epetra_Map &
2569  OperatorRangeMap() const override;
2571 
2572  private:
2578 
2583 # ifdef DEAL_II_WITH_MPI
2584  Epetra_MpiComm communicator;
2585 # else
2586  Epetra_SerialComm communicator;
2587 # endif
2588 
2593  Epetra_Map domain_map;
2594 
2599  Epetra_Map range_map;
2600 
2609  virtual bool
2610  HasNormInf() const override;
2611 
2619  virtual double
2620  NormInf() const override;
2621  };
2622 
2628  operator+(const TrilinosPayload &first_op,
2629  const TrilinosPayload &second_op);
2630 
2635  TrilinosPayload operator*(const TrilinosPayload &first_op,
2636  const TrilinosPayload &second_op);
2637 
2638  } // namespace LinearOperatorImplementation
2639  } /* namespace internal */
2640 
2641 
2642 
2643  // ----------------------- inline and template functions --------------------
2644 
2645 # ifndef DOXYGEN
2646 
2647  namespace SparseMatrixIterators
2648  {
2649  inline AccessorBase::AccessorBase(SparseMatrix *matrix,
2650  size_type row,
2651  size_type index)
2652  : matrix(matrix)
2653  , a_row(row)
2654  , a_index(index)
2655  {
2656  visit_present_row();
2657  }
2658 
2659 
2660  inline AccessorBase::size_type
2661  AccessorBase::row() const
2662  {
2663  Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
2664  return a_row;
2665  }
2666 
2667 
2668  inline AccessorBase::size_type
2669  AccessorBase::column() const
2670  {
2671  Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
2672  return (*colnum_cache)[a_index];
2673  }
2674 
2675 
2676  inline AccessorBase::size_type
2677  AccessorBase::index() const
2678  {
2679  Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
2680  return a_index;
2681  }
2682 
2683 
2684  inline Accessor<true>::Accessor(MatrixType * matrix,
2685  const size_type row,
2686  const size_type index)
2687  : AccessorBase(const_cast<SparseMatrix *>(matrix), row, index)
2688  {}
2689 
2690 
2691  template <bool Other>
2692  inline Accessor<true>::Accessor(const Accessor<Other> &other)
2693  : AccessorBase(other)
2694  {}
2695 
2696 
2697  inline TrilinosScalar
2698  Accessor<true>::value() const
2699  {
2700  Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
2701  return (*value_cache)[a_index];
2702  }
2703 
2704 
2705  inline Accessor<false>::Reference::Reference(const Accessor<false> &acc)
2706  : accessor(const_cast<Accessor<false> &>(acc))
2707  {}
2708 
2709 
2710  inline Accessor<false>::Reference::operator TrilinosScalar() const
2711  {
2712  return (*accessor.value_cache)[accessor.a_index];
2713  }
2714 
2715  inline const Accessor<false>::Reference &
2716  Accessor<false>::Reference::operator=(const TrilinosScalar n) const
2717  {
2718  (*accessor.value_cache)[accessor.a_index] = n;
2719  accessor.matrix->set(accessor.row(),
2720  accessor.column(),
2721  static_cast<TrilinosScalar>(*this));
2722  return *this;
2723  }
2724 
2725 
2726  inline const Accessor<false>::Reference &
2727  Accessor<false>::Reference::operator+=(const TrilinosScalar n) const
2728  {
2729  (*accessor.value_cache)[accessor.a_index] += n;
2730  accessor.matrix->set(accessor.row(),
2731  accessor.column(),
2732  static_cast<TrilinosScalar>(*this));
2733  return *this;
2734  }
2735 
2736 
2737  inline const Accessor<false>::Reference &
2738  Accessor<false>::Reference::operator-=(const TrilinosScalar n) const
2739  {
2740  (*accessor.value_cache)[accessor.a_index] -= n;
2741  accessor.matrix->set(accessor.row(),
2742  accessor.column(),
2743  static_cast<TrilinosScalar>(*this));
2744  return *this;
2745  }
2746 
2747 
2748  inline const Accessor<false>::Reference &
2749  Accessor<false>::Reference::operator*=(const TrilinosScalar n) const
2750  {
2751  (*accessor.value_cache)[accessor.a_index] *= n;
2752  accessor.matrix->set(accessor.row(),
2753  accessor.column(),
2754  static_cast<TrilinosScalar>(*this));
2755  return *this;
2756  }
2757 
2758 
2759  inline const Accessor<false>::Reference &
2760  Accessor<false>::Reference::operator/=(const TrilinosScalar n) const
2761  {
2762  (*accessor.value_cache)[accessor.a_index] /= n;
2763  accessor.matrix->set(accessor.row(),
2764  accessor.column(),
2765  static_cast<TrilinosScalar>(*this));
2766  return *this;
2767  }
2768 
2769 
2770  inline Accessor<false>::Accessor(MatrixType * matrix,
2771  const size_type row,
2772  const size_type index)
2773  : AccessorBase(matrix, row, index)
2774  {}
2775 
2776 
2777  inline Accessor<false>::Reference
2778  Accessor<false>::value() const
2779  {
2780  Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
2781  return Reference(*this);
2782  }
2783 
2784 
2785 
2786  template <bool Constness>
2787  inline Iterator<Constness>::Iterator(MatrixType * matrix,
2788  const size_type row,
2789  const size_type index)
2790  : accessor(matrix, row, index)
2791  {}
2792 
2793 
2794  template <bool Constness>
2795  template <bool Other>
2796  inline Iterator<Constness>::Iterator(const Iterator<Other> &other)
2797  : accessor(other.accessor)
2798  {}
2799 
2800 
2801  template <bool Constness>
2802  inline Iterator<Constness> &
2803  Iterator<Constness>::operator++()
2804  {
2805  Assert(accessor.a_row < accessor.matrix->m(), ExcIteratorPastEnd());
2806 
2807  ++accessor.a_index;
2808 
2809  // If at end of line: do one
2810  // step, then cycle until we
2811  // find a row with a nonzero
2812  // number of entries.
2813  if (accessor.a_index >= accessor.colnum_cache->size())
2814  {
2815  accessor.a_index = 0;
2816  ++accessor.a_row;
2817 
2818  while ((accessor.a_row < accessor.matrix->m()) &&
2819  ((accessor.matrix->in_local_range(accessor.a_row) == false) ||
2820  (accessor.matrix->row_length(accessor.a_row) == 0)))
2821  ++accessor.a_row;
2822 
2823  accessor.visit_present_row();
2824  }
2825  return *this;
2826  }
2827 
2828 
2829  template <bool Constness>
2830  inline Iterator<Constness>
2831  Iterator<Constness>::operator++(int)
2832  {
2833  const Iterator<Constness> old_state = *this;
2834  ++(*this);
2835  return old_state;
2836  }
2837 
2838 
2839 
2840  template <bool Constness>
2841  inline const Accessor<Constness> &Iterator<Constness>::operator*() const
2842  {
2843  return accessor;
2844  }
2845 
2846 
2847 
2848  template <bool Constness>
2849  inline const Accessor<Constness> *Iterator<Constness>::operator->() const
2850  {
2851  return &accessor;
2852  }
2853 
2854 
2855 
2856  template <bool Constness>
2857  inline bool
2858  Iterator<Constness>::operator==(const Iterator<Constness> &other) const
2859  {
2860  return (accessor.a_row == other.accessor.a_row &&
2861  accessor.a_index == other.accessor.a_index);
2862  }
2863 
2864 
2865 
2866  template <bool Constness>
2867  inline bool
2868  Iterator<Constness>::operator!=(const Iterator<Constness> &other) const
2869  {
2870  return !(*this == other);
2871  }
2872 
2873 
2874 
2875  template <bool Constness>
2876  inline bool
2877  Iterator<Constness>::operator<(const Iterator<Constness> &other) const
2878  {
2879  return (accessor.row() < other.accessor.row() ||
2880  (accessor.row() == other.accessor.row() &&
2881  accessor.index() < other.accessor.index()));
2882  }
2883 
2884 
2885  template <bool Constness>
2886  inline bool
2887  Iterator<Constness>::operator>(const Iterator<Constness> &other) const
2888  {
2889  return (other < *this);
2890  }
2891 
2892  } // namespace SparseMatrixIterators
2893 
2894 
2895 
2897  SparseMatrix::begin() const
2898  {
2899  return begin(0);
2900  }
2901 
2902 
2903 
2905  SparseMatrix::end() const
2906  {
2907  return const_iterator(this, m(), 0);
2908  }
2909 
2910 
2911 
2913  SparseMatrix::begin(const size_type r) const
2914  {
2915  Assert(r < m(), ExcIndexRange(r, 0, m()));
2916  if (in_local_range(r) && (row_length(r) > 0))
2917  return const_iterator(this, r, 0);
2918  else
2919  return end(r);
2920  }
2921 
2922 
2923 
2925  SparseMatrix::end(const size_type r) const
2926  {
2927  Assert(r < m(), ExcIndexRange(r, 0, m()));
2928 
2929  // place the iterator on the first entry
2930  // past this line, or at the end of the
2931  // matrix
2932  for (size_type i = r + 1; i < m(); ++i)
2933  if (in_local_range(i) && (row_length(i) > 0))
2934  return const_iterator(this, i, 0);
2935 
2936  // if there is no such line, then take the
2937  // end iterator of the matrix
2938  return end();
2939  }
2940 
2941 
2942 
2943  inline SparseMatrix::iterator
2945  {
2946  return begin(0);
2947  }
2948 
2949 
2950 
2951  inline SparseMatrix::iterator
2953  {
2954  return iterator(this, m(), 0);
2955  }
2956 
2957 
2958 
2959  inline SparseMatrix::iterator
2960  SparseMatrix::begin(const size_type r)
2961  {
2962  Assert(r < m(), ExcIndexRange(r, 0, m()));
2963  if (in_local_range(r) && (row_length(r) > 0))
2964  return iterator(this, r, 0);
2965  else
2966  return end(r);
2967  }
2968 
2969 
2970 
2971  inline SparseMatrix::iterator
2972  SparseMatrix::end(const size_type r)
2973  {
2974  Assert(r < m(), ExcIndexRange(r, 0, m()));
2975 
2976  // place the iterator on the first entry
2977  // past this line, or at the end of the
2978  // matrix
2979  for (size_type i = r + 1; i < m(); ++i)
2980  if (in_local_range(i) && (row_length(i) > 0))
2981  return iterator(this, i, 0);
2982 
2983  // if there is no such line, then take the
2984  // end iterator of the matrix
2985  return end();
2986  }
2987 
2988 
2989 
2990  inline bool
2991  SparseMatrix::in_local_range(const size_type index) const
2992  {
2993  TrilinosWrappers::types::int_type begin, end;
2994 # ifndef DEAL_II_WITH_64BIT_INDICES
2995  begin = matrix->RowMap().MinMyGID();
2996  end = matrix->RowMap().MaxMyGID() + 1;
2997 # else
2998  begin = matrix->RowMap().MinMyGID64();
2999  end = matrix->RowMap().MaxMyGID64() + 1;
3000 # endif
3001 
3002  return ((index >= static_cast<size_type>(begin)) &&
3003  (index < static_cast<size_type>(end)));
3004  }
3005 
3006 
3007 
3008  inline bool
3010  {
3011  return compressed;
3012  }
3013 
3014 
3015 
3016  // Inline the set() and add() functions, since they will be called
3017  // frequently, and the compiler can optimize away some unnecessary loops
3018  // when the sizes are given at compile time.
3019  inline void
3020  SparseMatrix::set(const size_type i,
3021  const size_type j,
3022  const TrilinosScalar value)
3023  {
3024  AssertIsFinite(value);
3025 
3026  set(i, 1, &j, &value, false);
3027  }
3028 
3029 
3030 
3031  inline void
3032  SparseMatrix::set(const std::vector<size_type> & indices,
3033  const FullMatrix<TrilinosScalar> &values,
3034  const bool elide_zero_values)
3035  {
3036  Assert(indices.size() == values.m(),
3037  ExcDimensionMismatch(indices.size(), values.m()));
3038  Assert(values.m() == values.n(), ExcNotQuadratic());
3039 
3040  for (size_type i = 0; i < indices.size(); ++i)
3041  set(indices[i],
3042  indices.size(),
3043  indices.data(),
3044  &values(i, 0),
3045  elide_zero_values);
3046  }
3047 
3048 
3049 
3050  inline void
3051  SparseMatrix::add(const size_type i,
3052  const size_type j,
3053  const TrilinosScalar value)
3054  {
3055  AssertIsFinite(value);
3056 
3057  if (value == 0)
3058  {
3059  // we have to check after Insert/Add in any case to be consistent
3060  // with the MPI communication model, but we can save some
3061  // work if the addend is zero. However, these actions are done in case
3062  // we pass on to the other function.
3063 
3064  // TODO: fix this (do not run compress here, but fail)
3065  if (last_action == Insert)
3066  {
3067  int ierr;
3068  ierr = matrix->GlobalAssemble(*column_space_map,
3069  matrix->RowMap(),
3070  false);
3071 
3072  Assert(ierr == 0, ExcTrilinosError(ierr));
3073  (void)ierr; // removes -Wunused-but-set-variable in optimized mode
3074  }
3075 
3076  last_action = Add;
3077 
3078  return;
3079  }
3080  else
3081  add(i, 1, &j, &value, false);
3082  }
3083 
3084 
3085 
3086  // inline "simple" functions that are called frequently and do only involve
3087  // a call to some Trilinos function.
3089  SparseMatrix::m() const
3090  {
3091 # ifndef DEAL_II_WITH_64BIT_INDICES
3092  return matrix->NumGlobalRows();
3093 # else
3094  return matrix->NumGlobalRows64();
3095 # endif
3096  }
3097 
3098 
3099 
3101  SparseMatrix::n() const
3102  {
3103  // If the matrix structure has not been fixed (i.e., we did not have a
3104  // sparsity pattern), it does not know about the number of columns so we
3105  // must always take this from the additional column space map
3106  Assert(column_space_map.get() != nullptr, ExcInternalError());
3107 # ifndef DEAL_II_WITH_64BIT_INDICES
3108  return column_space_map->NumGlobalElements();
3109 # else
3110  return column_space_map->NumGlobalElements64();
3111 # endif
3112  }
3113 
3114 
3115 
3116  inline unsigned int
3117  SparseMatrix::local_size() const
3118  {
3119  return matrix->NumMyRows();
3120  }
3121 
3122 
3123 
3124  inline std::pair<SparseMatrix::size_type, SparseMatrix::size_type>
3126  {
3127  size_type begin, end;
3128 # ifndef DEAL_II_WITH_64BIT_INDICES
3129  begin = matrix->RowMap().MinMyGID();
3130  end = matrix->RowMap().MaxMyGID() + 1;
3131 # else
3132  begin = matrix->RowMap().MinMyGID64();
3133  end = matrix->RowMap().MaxMyGID64() + 1;
3134 # endif
3135 
3136  return std::make_pair(begin, end);
3137  }
3138 
3139 
3140 
3143  {
3144 # ifndef DEAL_II_WITH_64BIT_INDICES
3145  return matrix->NumGlobalNonzeros();
3146 # else
3147  return matrix->NumGlobalNonzeros64();
3148 # endif
3149  }
3150 
3151 
3152 
3153  template <typename SparsityPatternType>
3154  inline void
3155  SparseMatrix::reinit(const IndexSet & parallel_partitioning,
3156  const SparsityPatternType &sparsity_pattern,
3157  const MPI_Comm & communicator,
3158  const bool exchange_data)
3159  {
3160  reinit(parallel_partitioning,
3161  parallel_partitioning,
3162  sparsity_pattern,
3163  communicator,
3164  exchange_data);
3165  }
3166 
3167 
3168 
3169  template <typename number>
3170  inline void
3171  SparseMatrix::reinit(const IndexSet &parallel_partitioning,
3172  const ::SparseMatrix<number> &sparse_matrix,
3173  const MPI_Comm & communicator,
3174  const double drop_tolerance,
3175  const bool copy_values,
3176  const ::SparsityPattern * use_this_sparsity)
3177  {
3178  Epetra_Map map =
3179  parallel_partitioning.make_trilinos_map(communicator, false);
3180  reinit(parallel_partitioning,
3181  parallel_partitioning,
3182  sparse_matrix,
3183  drop_tolerance,
3184  copy_values,
3185  use_this_sparsity);
3186  }
3187 
3188 
3189 
3190  inline const Epetra_CrsMatrix &
3192  {
3193  return static_cast<const Epetra_CrsMatrix &>(*matrix);
3194  }
3195 
3196 
3197 
3198  inline const Epetra_CrsGraph &
3200  {
3201  return matrix->Graph();
3202  }
3203 
3204 
3205 
3206  inline IndexSet
3208  {
3209  return IndexSet(matrix->DomainMap());
3210  }
3211 
3212 
3213 
3214  inline IndexSet
3216  {
3217  return IndexSet(matrix->RangeMap());
3218  }
3219 
3220 
3221 
3222  inline void
3224  {
3225  // nothing to do here
3226  }
3227 
3228 
3229 
3230  inline void
3232  {
3233  // nothing to do here
3234  }
3235 
3236 
3237  namespace internal
3238  {
3239  namespace LinearOperatorImplementation
3240  {
3241  template <typename Solver, typename Preconditioner>
3242  typename std::enable_if<
3243  std::is_base_of<TrilinosWrappers::SolverBase, Solver>::value &&
3244  std::is_base_of<TrilinosWrappers::PreconditionBase,
3245  Preconditioner>::value,
3246  TrilinosPayload>::type
3248  Solver & solver,
3249  const Preconditioner &preconditioner) const
3250  {
3251  const auto &payload = *this;
3252 
3253  TrilinosPayload return_op(payload);
3254 
3255  // Capture by copy so the payloads are always valid
3256 
3257  return_op.inv_vmult = [payload, &solver, &preconditioner](
3258  TrilinosPayload::Domain & tril_dst,
3259  const TrilinosPayload::Range &tril_src) {
3260  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
3261  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
3262  Assert(&tril_src != &tril_dst,
3264  internal::check_vector_map_equality(payload,
3265  tril_src,
3266  tril_dst,
3267  !payload.UseTranspose());
3268  solver.solve(payload, tril_dst, tril_src, preconditioner);
3269  };
3270 
3271  return_op.inv_Tvmult = [payload, &solver, &preconditioner](
3272  TrilinosPayload::Range & tril_dst,
3273  const TrilinosPayload::Domain &tril_src) {
3274  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
3275  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
3276  Assert(&tril_src != &tril_dst,
3278  internal::check_vector_map_equality(payload,
3279  tril_src,
3280  tril_dst,
3281  payload.UseTranspose());
3282 
3283  const_cast<TrilinosPayload &>(payload).transpose();
3284  solver.solve(payload, tril_dst, tril_src, preconditioner);
3285  const_cast<TrilinosPayload &>(payload).transpose();
3286  };
3287 
3288  // If the input operator is already setup for transpose operations, then
3289  // we must do similar with its inverse.
3290  if (return_op.UseTranspose() == true)
3291  std::swap(return_op.inv_vmult, return_op.inv_Tvmult);
3292 
3293  return return_op;
3294  }
3295 
3296  template <typename Solver, typename Preconditioner>
3297  typename std::enable_if<
3298  !(std::is_base_of<TrilinosWrappers::SolverBase, Solver>::value &&
3299  std::is_base_of<TrilinosWrappers::PreconditionBase,
3300  Preconditioner>::value),
3301  TrilinosPayload>::type
3302  TrilinosPayload::inverse_payload(Solver &, const Preconditioner &) const
3303  {
3304  TrilinosPayload return_op(*this);
3305 
3306  return_op.inv_vmult = [](TrilinosPayload::Domain &,
3307  const TrilinosPayload::Range &) {
3308  AssertThrow(false,
3309  ExcMessage("Payload inv_vmult disabled because of "
3310  "incompatible solver/preconditioner choice."));
3311  };
3312 
3313  return_op.inv_Tvmult = [](TrilinosPayload::Range &,
3314  const TrilinosPayload::Domain &) {
3315  AssertThrow(false,
3316  ExcMessage("Payload inv_vmult disabled because of "
3317  "incompatible solver/preconditioner choice."));
3318  };
3319 
3320  return return_op;
3321  }
3322  } // namespace LinearOperatorImplementation
3323  } // namespace internal
3324 
3325 # endif // DOXYGEN
3326 
3327 } /* namespace TrilinosWrappers */
3328 
3329 
3330 DEAL_II_NAMESPACE_CLOSE
3331 
3332 
3333 # endif // DEAL_II_WITH_TRILINOS
3334 
3335 
3336 /*----------------------- trilinos_sparse_matrix.h --------------------*/
3337 
3338 #endif
3339 /*----------------------- trilinos_sparse_matrix.h --------------------*/
static::ExceptionBase & ExcTrilinosError(int arg1)
#define DeclException2(Exception2, type1, type2, outsequence)
Definition: exceptions.h:420
static::ExceptionBase & ExcSourceEqualsDestination()
static::ExceptionBase & ExcBeyondEndOfMatrix()
const Epetra_CrsMatrix & trilinos_matrix() const
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
STL namespace.
static::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1, std::size_t arg2, std::size_t arg3)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1329
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
static::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
SymmetricTensor< rank_, dim, Number > operator*(const SymmetricTensor< rank_, dim, Number > &t, const Number &factor)
const_iterator begin() const
unsigned long long int global_dof_index
Definition: types.h:72
std::shared_ptr< std::vector< size_type > > colnum_cache
::types::global_dof_index size_type
static::ExceptionBase & ExcMessage(std::string arg1)
size_type n() const
#define DeclException1(Exception1, type1, outsequence)
Definition: exceptions.h:408
void add(const size_type i, const size_type j, const TrilinosScalar value)
#define Assert(cond, exc)
Definition: exceptions.h:1227
static::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define DeclExceptionMsg(Exception, defaulttext)
Definition: exceptions.h:397
#define DeclException0(Exception0)
Definition: exceptions.h:385
typename Accessor< Constness >::MatrixType MatrixType
IndexSet locally_owned_domain_indices() const
std::function< void(VectorType &, const VectorType &)> inv_vmult
std::enable_if< std::is_base_of< TrilinosWrappers::SolverBase, Solver >::value &&std::is_base_of< TrilinosWrappers::PreconditionBase, Preconditioner >::value, TrilinosPayload >::type inverse_payload(Solver &, const Preconditioner &) const
unsigned int local_size() const
size_type m() const
SymmetricTensor< rank_, dim, Number > transpose(const SymmetricTensor< rank_, dim, Number > &t)
AccessorBase(SparseMatrix *matrix, const size_type row, const size_type index)
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:523
static::ExceptionBase & ExcIteratorPastEnd()
static::ExceptionBase & ExcNotQuadratic()
void swap(Vector< Number > &u, Vector< Number > &v)
Definition: vector.h:1353
Definition: solver.h:328
size_type n_nonzero_elements() const
void reinit(const SparsityPatternType &sparsity_pattern)
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
#define DeclException4(Exception4, type1, type2, type3, type4, outsequence)
Definition: exceptions.h:444
const_iterator end() const
IndexSet locally_owned_range_indices() const
#define DeclException3(Exception3, type1, type2, type3, outsequence)
Definition: exceptions.h:432
void set(const size_type i, const size_type j, const TrilinosScalar value)
std::pair< size_type, size_type > local_range() const
#define AssertIsFinite(number)
Definition: exceptions.h:1428
bool in_local_range(const size_type index) const
static::ExceptionBase & ExcInternalError()
std::shared_ptr< std::vector< TrilinosScalar > > value_cache