Reference documentation for deal.II version 9.1.0-pre
trilinos_block_sparse_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_block_sparse_matrix.h>
17 
18 #ifdef DEAL_II_WITH_TRILINOS
19 
20 # include <deal.II/lac/block_sparse_matrix.h>
21 # include <deal.II/lac/block_sparsity_pattern.h>
22 
23 DEAL_II_NAMESPACE_OPEN
24 
25 namespace TrilinosWrappers
26 {
28  {
29  // delete previous content of
30  // the subobjects array
31  try
32  {
33  clear();
34  }
35  catch (...)
36  {}
37  }
38 
39 
40 
41  void
43  const size_type n_block_columns)
44  {
45  // first delete previous content of
46  // the subobjects array
47  clear();
48 
49  // then resize. set sizes of blocks to
50  // zero. user will later have to call
51  // collect_sizes for this
52  this->sub_objects.reinit(n_block_rows, n_block_columns);
53  this->row_block_indices.reinit(n_block_rows, 0);
54  this->column_block_indices.reinit(n_block_columns, 0);
55 
56  // and reinitialize the blocks
57  for (size_type r = 0; r < this->n_block_rows(); ++r)
58  for (size_type c = 0; c < this->n_block_cols(); ++c)
59  {
60  BlockType *p = new BlockType();
61 
62  Assert(this->sub_objects[r][c] == nullptr, ExcInternalError());
63  this->sub_objects[r][c] = p;
64  }
65  }
66 
67 
68 
69  template <typename BlockSparsityPatternType>
70  void
72  const std::vector<Epetra_Map> & parallel_partitioning,
73  const BlockSparsityPatternType &block_sparsity_pattern,
74  const bool exchange_data)
75  {
76  Assert(parallel_partitioning.size() ==
77  block_sparsity_pattern.n_block_rows(),
78  ExcDimensionMismatch(parallel_partitioning.size(),
79  block_sparsity_pattern.n_block_rows()));
80  Assert(parallel_partitioning.size() ==
81  block_sparsity_pattern.n_block_cols(),
82  ExcDimensionMismatch(parallel_partitioning.size(),
83  block_sparsity_pattern.n_block_cols()));
84 
85  const size_type n_block_rows = parallel_partitioning.size();
86  (void)n_block_rows;
87 
88  Assert(n_block_rows == block_sparsity_pattern.n_block_rows(),
89  ExcDimensionMismatch(n_block_rows,
90  block_sparsity_pattern.n_block_rows()));
91  Assert(n_block_rows == block_sparsity_pattern.n_block_cols(),
92  ExcDimensionMismatch(n_block_rows,
93  block_sparsity_pattern.n_block_cols()));
94 
95 
96  // Call the other basic reinit function, ...
97  reinit(block_sparsity_pattern.n_block_rows(),
98  block_sparsity_pattern.n_block_cols());
99 
100  // ... set the correct sizes, ...
101  this->row_block_indices = block_sparsity_pattern.get_row_indices();
102  this->column_block_indices = block_sparsity_pattern.get_column_indices();
103 
104  // ... and then assign the correct
105  // data to the blocks.
106  for (size_type r = 0; r < this->n_block_rows(); ++r)
107  for (size_type c = 0; c < this->n_block_cols(); ++c)
108  {
109  this->sub_objects[r][c]->reinit(parallel_partitioning[r],
110  parallel_partitioning[c],
111  block_sparsity_pattern.block(r, c),
112  exchange_data);
113  }
114  }
115 
116 
117 
118  template <typename BlockSparsityPatternType>
119  void
121  const std::vector<IndexSet> & parallel_partitioning,
122  const BlockSparsityPatternType &block_sparsity_pattern,
123  const MPI_Comm & communicator,
124  const bool exchange_data)
125  {
126  std::vector<Epetra_Map> epetra_maps;
127  for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i)
128  epetra_maps.push_back(
129  parallel_partitioning[i].make_trilinos_map(communicator, false));
130 
131  reinit(epetra_maps, block_sparsity_pattern, exchange_data);
132  }
133 
134 
135 
136  template <typename BlockSparsityPatternType>
137  void
139  const BlockSparsityPatternType &block_sparsity_pattern)
140  {
141  std::vector<Epetra_Map> parallel_partitioning;
142  for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i)
143  parallel_partitioning.emplace_back(
144  static_cast<TrilinosWrappers::types::int_type>(
145  block_sparsity_pattern.block(i, 0).n_rows()),
146  0,
148 
149  reinit(parallel_partitioning, block_sparsity_pattern);
150  }
151 
152 
153 
154  template <>
155  void
156  BlockSparseMatrix::reinit(const BlockSparsityPattern &block_sparsity_pattern)
157  {
158  // Call the other basic reinit function, ...
159  reinit(block_sparsity_pattern.n_block_rows(),
160  block_sparsity_pattern.n_block_cols());
161 
162  // ... set the correct sizes, ...
163  this->row_block_indices = block_sparsity_pattern.get_row_indices();
164  this->column_block_indices = block_sparsity_pattern.get_column_indices();
165 
166  // ... and then assign the correct
167  // data to the blocks.
168  for (size_type r = 0; r < this->n_block_rows(); ++r)
169  for (size_type c = 0; c < this->n_block_cols(); ++c)
170  {
171  this->sub_objects[r][c]->reinit(block_sparsity_pattern.block(r, c));
172  }
173  }
174 
175 
176 
177  void
179  const std::vector<Epetra_Map> & parallel_partitioning,
180  const ::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
181  const double drop_tolerance)
182  {
183  const size_type n_block_rows = parallel_partitioning.size();
184 
185  Assert(n_block_rows == dealii_block_sparse_matrix.n_block_rows(),
186  ExcDimensionMismatch(n_block_rows,
187  dealii_block_sparse_matrix.n_block_rows()));
188  Assert(n_block_rows == dealii_block_sparse_matrix.n_block_cols(),
189  ExcDimensionMismatch(n_block_rows,
190  dealii_block_sparse_matrix.n_block_cols()));
191 
192  // Call the other basic reinit function ...
193  reinit(n_block_rows, n_block_rows);
194 
195  // ... and then assign the correct
196  // data to the blocks.
197  for (size_type r = 0; r < this->n_block_rows(); ++r)
198  for (size_type c = 0; c < this->n_block_cols(); ++c)
199  {
200  this->sub_objects[r][c]->reinit(parallel_partitioning[r],
201  parallel_partitioning[c],
202  dealii_block_sparse_matrix.block(r,
203  c),
204  drop_tolerance);
205  }
206 
207  collect_sizes();
208  }
209 
210 
211 
212  void
214  const ::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
215  const double drop_tolerance)
216  {
217  Assert(dealii_block_sparse_matrix.n_block_rows() ==
218  dealii_block_sparse_matrix.n_block_cols(),
219  ExcDimensionMismatch(dealii_block_sparse_matrix.n_block_rows(),
220  dealii_block_sparse_matrix.n_block_cols()));
221  Assert(dealii_block_sparse_matrix.m() == dealii_block_sparse_matrix.n(),
222  ExcDimensionMismatch(dealii_block_sparse_matrix.m(),
223  dealii_block_sparse_matrix.n()));
224 
225  // produce a dummy local map and pass it
226  // off to the other function
227 # ifdef DEAL_II_WITH_MPI
228  Epetra_MpiComm trilinos_communicator(MPI_COMM_SELF);
229 # else
230  Epetra_SerialComm trilinos_communicator;
231 # endif
232 
233  std::vector<Epetra_Map> parallel_partitioning;
234  for (size_type i = 0; i < dealii_block_sparse_matrix.n_block_rows(); ++i)
235  parallel_partitioning.emplace_back(
236  static_cast<TrilinosWrappers::types::int_type>(
237  dealii_block_sparse_matrix.block(i, 0).m()),
238  0,
239  trilinos_communicator);
240 
241  reinit(parallel_partitioning, dealii_block_sparse_matrix, drop_tolerance);
242  }
243 
244 
245 
246  void
248  {
249  // simply forward to the (non-public) function of the base class
251  }
252 
253 
254 
255  BlockSparseMatrix::size_type
257  {
258  size_type n_nonzero = 0;
259  for (size_type rows = 0; rows < this->n_block_rows(); ++rows)
260  for (size_type cols = 0; cols < this->n_block_cols(); ++cols)
261  n_nonzero += this->block(rows, cols).n_nonzero_elements();
262 
263  return n_nonzero;
264  }
265 
266 
267 
268  TrilinosScalar
270  const MPI::BlockVector &x,
271  const MPI::BlockVector &b) const
272  {
273  vmult(dst, x);
274  dst -= b;
275  dst *= -1.;
276 
277  return dst.l2_norm();
278  }
279 
280 
281 
282  // TODO: In the following we
283  // use the same code as just
284  // above three more times. Use
285  // templates.
286  TrilinosScalar
288  const MPI::Vector & x,
289  const MPI::BlockVector &b) const
290  {
291  vmult(dst, x);
292  dst -= b;
293  dst *= -1.;
294 
295  return dst.l2_norm();
296  }
297 
298 
299 
300  TrilinosScalar
302  const MPI::BlockVector &x,
303  const MPI::Vector & b) const
304  {
305  vmult(dst, x);
306  dst -= b;
307  dst *= -1.;
308 
309  return dst.l2_norm();
310  }
311 
312 
313 
314  TrilinosScalar
316  const MPI::Vector &x,
317  const MPI::Vector &b) const
318  {
319  vmult(dst, x);
320  dst -= b;
321  dst *= -1.;
322 
323  return dst.l2_norm();
324  }
325 
326 
327 
328  std::vector<Epetra_Map>
330  {
331  Assert(this->n_block_cols() != 0, ExcNotInitialized());
332  Assert(this->n_block_rows() != 0, ExcNotInitialized());
333 
334  std::vector<Epetra_Map> domain_partitioner;
335  for (size_type c = 0; c < this->n_block_cols(); ++c)
336  domain_partitioner.push_back(
337  this->sub_objects[0][c]->domain_partitioner());
338 
339  return domain_partitioner;
340  }
341 
342 
343 
344  std::vector<Epetra_Map>
346  {
347  Assert(this->n_block_cols() != 0, ExcNotInitialized());
348  Assert(this->n_block_rows() != 0, ExcNotInitialized());
349 
350  std::vector<Epetra_Map> range_partitioner;
351  for (size_type r = 0; r < this->n_block_rows(); ++r)
352  range_partitioner.push_back(this->sub_objects[r][0]->range_partitioner());
353 
354  return range_partitioner;
355  }
356 
357 
358 
359  MPI_Comm
361  {
362  Assert(this->n_block_cols() != 0, ExcNotInitialized());
363  Assert(this->n_block_rows() != 0, ExcNotInitialized());
364  return this->sub_objects[0][0]->get_mpi_communicator();
365  }
366 
367 
368 
369  // -------------------- explicit instantiations -----------------------
370  //
371  template void
372  BlockSparseMatrix::reinit(const ::BlockSparsityPattern &);
373  template void
374  BlockSparseMatrix::reinit(const ::BlockDynamicSparsityPattern &);
375 
376  template void
377  BlockSparseMatrix::reinit(const std::vector<Epetra_Map> &,
378  const ::BlockSparsityPattern &,
379  const bool);
380  template void
381  BlockSparseMatrix::reinit(const std::vector<Epetra_Map> &,
382  const ::BlockDynamicSparsityPattern &,
383  const bool);
384 
385  template void
386  BlockSparseMatrix::reinit(const std::vector<IndexSet> &,
387  const ::BlockDynamicSparsityPattern &,
388  const MPI_Comm &,
389  const bool);
390 
391 } // namespace TrilinosWrappers
392 
393 
394 DEAL_II_NAMESPACE_CLOSE
395 
396 #endif
const BlockIndices & get_row_indices() const
static::ExceptionBase & ExcNotInitialized()
std::size_t n_nonzero_elements() const
unsigned int n_block_cols() const
const Epetra_Comm & comm_self()
Definition: utilities.cc:779
void vmult(VectorType1 &dst, const VectorType2 &src) const
void reinit(const size_type n_block_rows, const size_type n_block_columns)
SparsityPatternType & block(const size_type row, const size_type column)
Table< 2, SmartPointer< BlockType, BlockMatrixBase< SparseMatrix > > > sub_objects
std::vector< Epetra_Map > range_partitioner() const
void reinit(const TableIndices< N > &new_size, const bool omit_default_initialization=false)
#define Assert(cond, exc)
Definition: exceptions.h:1227
real_type l2_norm() const
static::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void reinit(const unsigned int n_blocks, const size_type n_elements_per_block)
TrilinosScalar residual(MPI::BlockVector &dst, const MPI::BlockVector &x, const MPI::BlockVector &b) const
const BlockIndices & get_column_indices() const
BlockType & block(const unsigned int row, const unsigned int column)
real_type l2_norm() const
std::vector< Epetra_Map > domain_partitioner() const
unsigned int n_block_rows() const
static::ExceptionBase & ExcInternalError()