Reference documentation for deal.II version 9.1.0-pre
petsc_parallel_sparse_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/petsc_sparse_matrix.h>
17 
18 #ifdef DEAL_II_WITH_PETSC
19 
20 # include <deal.II/base/mpi.h>
21 
22 # include <deal.II/lac/dynamic_sparsity_pattern.h>
23 # include <deal.II/lac/exceptions.h>
24 # include <deal.II/lac/petsc_compatibility.h>
25 # include <deal.II/lac/petsc_vector.h>
26 # include <deal.II/lac/sparsity_pattern.h>
27 
28 DEAL_II_NAMESPACE_OPEN
29 
30 namespace PETScWrappers
31 {
32  namespace MPI
33  {
35  : communicator(MPI_COMM_SELF)
36  {
37  // just like for vectors: since we
38  // create an empty matrix, we can as
39  // well make it sequential
40  const int m = 0, n = 0, n_nonzero_per_row = 0;
41  const PetscErrorCode ierr = MatCreateSeqAIJ(
42  PETSC_COMM_SELF, m, n, n_nonzero_per_row, nullptr, &matrix);
43  AssertThrow(ierr == 0, ExcPETScError(ierr));
44  }
45 
46 
48  {
50  }
51 
53  const size_type m,
54  const size_type n,
55  const size_type local_rows,
56  const size_type local_columns,
57  const size_type n_nonzero_per_row,
58  const bool is_symmetric,
59  const size_type n_offdiag_nonzero_per_row)
60  : communicator(communicator)
61  {
62  do_reinit(m,
63  n,
64  local_rows,
65  local_columns,
66  n_nonzero_per_row,
67  is_symmetric,
68  n_offdiag_nonzero_per_row);
69  }
70 
71 
72 
74  const MPI_Comm & communicator,
75  const size_type m,
76  const size_type n,
77  const size_type local_rows,
78  const size_type local_columns,
79  const std::vector<size_type> &row_lengths,
80  const bool is_symmetric,
81  const std::vector<size_type> &offdiag_row_lengths)
82  : communicator(communicator)
83  {
84  do_reinit(m,
85  n,
86  local_rows,
87  local_columns,
88  row_lengths,
89  is_symmetric,
90  offdiag_row_lengths);
91  }
92 
93 
94 
95  template <typename SparsityPatternType>
97  const MPI_Comm & communicator,
98  const SparsityPatternType & sparsity_pattern,
99  const std::vector<size_type> &local_rows_per_process,
100  const std::vector<size_type> &local_columns_per_process,
101  const unsigned int this_process,
102  const bool preset_nonzero_locations)
103  : communicator(communicator)
104  {
105  do_reinit(sparsity_pattern,
106  local_rows_per_process,
107  local_columns_per_process,
108  this_process,
109  preset_nonzero_locations);
110  }
111 
112 
113  void
115  {
116  if (&other == this)
117  return;
118 
119  this->communicator = other.communicator;
120 
121  PetscErrorCode ierr = destroy_matrix(matrix);
122  AssertThrow(ierr == 0, ExcPETScError(ierr));
123 
124  ierr = MatDuplicate(other.matrix, MAT_DO_NOT_COPY_VALUES, &matrix);
125  AssertThrow(ierr == 0, ExcPETScError(ierr));
126  }
127 
128 
129  SparseMatrix &
131  {
133  return *this;
134  }
135 
136  void
138  {
139  if (&other == this)
140  return;
141 
142  this->communicator = other.communicator;
143 
144  const PetscErrorCode ierr =
145  MatCopy(other.matrix, matrix, SAME_NONZERO_PATTERN);
146  AssertThrow(ierr == 0, ExcPETScError(ierr));
147  }
148 
149  void
151  const size_type m,
152  const size_type n,
153  const size_type local_rows,
154  const size_type local_columns,
155  const size_type n_nonzero_per_row,
156  const bool is_symmetric,
157  const size_type n_offdiag_nonzero_per_row)
158  {
159  this->communicator = communicator;
160 
161  // get rid of old matrix and generate a new one
162  const PetscErrorCode ierr = destroy_matrix(matrix);
163  AssertThrow(ierr == 0, ExcPETScError(ierr));
164 
165  do_reinit(m,
166  n,
167  local_rows,
168  local_columns,
169  n_nonzero_per_row,
170  is_symmetric,
171  n_offdiag_nonzero_per_row);
172  }
173 
174 
175 
176  void
178  const size_type m,
179  const size_type n,
180  const size_type local_rows,
181  const size_type local_columns,
182  const std::vector<size_type> &row_lengths,
183  const bool is_symmetric,
184  const std::vector<size_type> &offdiag_row_lengths)
185  {
186  this->communicator = communicator;
187 
188  // get rid of old matrix and generate a
189  // new one
190  const PetscErrorCode ierr = destroy_matrix(matrix);
191  AssertThrow(ierr == 0, ExcPETScError(ierr));
192 
193  do_reinit(m,
194  n,
195  local_rows,
196  local_columns,
197  row_lengths,
198  is_symmetric,
199  offdiag_row_lengths);
200  }
201 
202 
203 
204  template <typename SparsityPatternType>
205  void
207  const MPI_Comm & communicator,
208  const SparsityPatternType & sparsity_pattern,
209  const std::vector<size_type> &local_rows_per_process,
210  const std::vector<size_type> &local_columns_per_process,
211  const unsigned int this_process,
212  const bool preset_nonzero_locations)
213  {
214  this->communicator = communicator;
215 
216  // get rid of old matrix and generate a new one
217  const PetscErrorCode ierr = destroy_matrix(matrix);
218  AssertThrow(ierr == 0, ExcPETScError(ierr));
219 
220 
221  do_reinit(sparsity_pattern,
222  local_rows_per_process,
223  local_columns_per_process,
224  this_process,
225  preset_nonzero_locations);
226  }
227 
228  template <typename SparsityPatternType>
229  void
230  SparseMatrix::reinit(const IndexSet & local_rows,
231  const IndexSet & local_columns,
232  const SparsityPatternType &sparsity_pattern,
233  const MPI_Comm & communicator)
234  {
235  this->communicator = communicator;
236 
237  // get rid of old matrix and generate a new one
238  const PetscErrorCode ierr = destroy_matrix(matrix);
239  AssertThrow(ierr == 0, ExcPETScError(ierr));
240 
241  do_reinit(local_rows, local_columns, sparsity_pattern);
242  }
243 
244  void
246  const size_type n,
247  const size_type local_rows,
248  const size_type local_columns,
249  const size_type n_nonzero_per_row,
250  const bool is_symmetric,
251  const size_type n_offdiag_nonzero_per_row)
252  {
253  Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m));
254 
255  // use the call sequence indicating only
256  // a maximal number of elements per row
257  // for all rows globally
258  const PetscErrorCode ierr = MatCreateAIJ(communicator,
259  local_rows,
260  local_columns,
261  m,
262  n,
263  n_nonzero_per_row,
264  nullptr,
265  n_offdiag_nonzero_per_row,
266  nullptr,
267  &matrix);
268  set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
269  AssertThrow(ierr == 0, ExcPETScError(ierr));
270 
271  // set symmetric flag, if so requested
272  if (is_symmetric == true)
273  {
274  set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE);
275  }
276  }
277 
278 
279 
280  void
282  const size_type n,
283  const size_type local_rows,
284  const size_type local_columns,
285  const std::vector<size_type> &row_lengths,
286  const bool is_symmetric,
287  const std::vector<size_type> &offdiag_row_lengths)
288  {
289  Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m));
290 
291  Assert(row_lengths.size() == m,
292  ExcDimensionMismatch(row_lengths.size(), m));
293 
294  // For the case that
295  // local_columns is smaller
296  // than one of the row lengths
297  // MatCreateMPIAIJ throws an
298  // error. In this case use a
299  // PETScWrappers::SparseMatrix
300  for (size_type i = 0; i < row_lengths.size(); ++i)
301  Assert(row_lengths[i] <= local_columns,
302  ExcIndexRange(row_lengths[i], 1, local_columns + 1));
303 
304  // use the call sequence indicating a
305  // maximal number of elements for each
306  // row individually. annoyingly, we
307  // always use unsigned ints for cases
308  // like this, while PETSc wants to see
309  // signed integers. so we have to
310  // convert, unless we want to play dirty
311  // tricks with conversions of pointers
312  const std::vector<PetscInt> int_row_lengths(row_lengths.begin(),
313  row_lengths.end());
314  const std::vector<PetscInt> int_offdiag_row_lengths(
315  offdiag_row_lengths.begin(), offdiag_row_lengths.end());
316 
317  // TODO: There must be a significantly better way to provide information
318  // about the off-diagonal blocks of the matrix. this way, petsc keeps
319  // allocating tiny chunks of memory, and gets completely hung up over this
320  const PetscErrorCode ierr =
321  MatCreateAIJ(communicator,
322  local_rows,
323  local_columns,
324  m,
325  n,
326  0,
327  int_row_lengths.data(),
328  0,
329  offdiag_row_lengths.size() ?
330  int_offdiag_row_lengths.data() :
331  nullptr,
332  &matrix);
333 
334  // TODO: Sometimes the actual number of nonzero entries allocated is
335  // greater than the number of nonzero entries, which petsc will complain
336  // about unless explicitly disabled with MatSetOption. There is probably a
337  // way to prevent a different number nonzero elements being allocated in
338  // the first place. (See also previous TODO).
339  set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
340  AssertThrow(ierr == 0, ExcPETScError(ierr));
341 
342  // set symmetric flag, if so requested
343  if (is_symmetric == true)
344  {
345  set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE);
346  }
347  }
348 
349 
350  template <typename SparsityPatternType>
351  void
352  SparseMatrix::do_reinit(const IndexSet & local_rows,
353  const IndexSet & local_columns,
354  const SparsityPatternType &sparsity_pattern)
355  {
356  Assert(sparsity_pattern.n_rows() == local_rows.size(),
357  ExcMessage(
358  "SparsityPattern and IndexSet have different number of rows"));
359  Assert(
360  sparsity_pattern.n_cols() == local_columns.size(),
361  ExcMessage(
362  "SparsityPattern and IndexSet have different number of columns"));
363  Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
364  ExcMessage("PETSc only supports contiguous row/column ranges"));
367 
368 # ifdef DEBUG
369  {
370  // check indexsets
371  types::global_dof_index row_owners =
373  types::global_dof_index col_owners =
374  Utilities::MPI::sum(local_columns.n_elements(), communicator);
375  Assert(row_owners == sparsity_pattern.n_rows(),
376  ExcMessage(
377  std::string(
378  "Each row has to be owned by exactly one owner (n_rows()=") +
379  Utilities::to_string(sparsity_pattern.n_rows()) +
380  " but sum(local_rows.n_elements())=" +
381  Utilities::to_string(row_owners) + ")"));
382  Assert(
383  col_owners == sparsity_pattern.n_cols(),
384  ExcMessage(
385  std::string(
386  "Each column has to be owned by exactly one owner (n_cols()=") +
387  Utilities::to_string(sparsity_pattern.n_cols()) +
388  " but sum(local_columns.n_elements())=" +
389  Utilities::to_string(col_owners) + ")"));
390  }
391 # endif
392 
393 
394  // create the matrix. We do not set row length but set the
395  // correct SparsityPattern later.
396  PetscErrorCode ierr = MatCreate(communicator, &matrix);
397  AssertThrow(ierr == 0, ExcPETScError(ierr));
398 
399  ierr = MatSetSizes(matrix,
400  local_rows.n_elements(),
401  local_columns.n_elements(),
402  sparsity_pattern.n_rows(),
403  sparsity_pattern.n_cols());
404  AssertThrow(ierr == 0, ExcPETScError(ierr));
405 
406  ierr = MatSetType(matrix, MATMPIAIJ);
407  AssertThrow(ierr == 0, ExcPETScError(ierr));
408 
409 
410  // next preset the exact given matrix
411  // entries with zeros. this doesn't avoid any
412  // memory allocations, but it at least
413  // avoids some searches later on. the
414  // key here is that we can use the
415  // matrix set routines that set an
416  // entire row at once, not a single
417  // entry at a time
418  //
419  // for the usefulness of this option
420  // read the documentation of this
421  // class.
422  // if (preset_nonzero_locations == true)
423  if (local_rows.n_elements() > 0)
424  {
425  // MatMPIAIJSetPreallocationCSR
426  // can be used to allocate the sparsity
427  // pattern of a matrix
428 
429  const PetscInt local_row_start = local_rows.nth_index_in_set(0);
430  const PetscInt local_row_end =
431  local_row_start + local_rows.n_elements();
432 
433 
434  // first set up the column number
435  // array for the rows to be stored
436  // on the local processor. have one
437  // dummy entry at the end to make
438  // sure petsc doesn't read past the
439  // end
440  std::vector<PetscInt>
441 
442  rowstart_in_window(local_row_end - local_row_start + 1, 0),
443  colnums_in_window;
444  {
445  unsigned int n_cols = 0;
446  for (PetscInt i = local_row_start; i < local_row_end; ++i)
447  {
448  const PetscInt row_length = sparsity_pattern.row_length(i);
449  rowstart_in_window[i + 1 - local_row_start] =
450  rowstart_in_window[i - local_row_start] + row_length;
451  n_cols += row_length;
452  }
453  colnums_in_window.resize(n_cols + 1, -1);
454  }
455 
456  // now copy over the information
457  // from the sparsity pattern.
458  {
459  PetscInt *ptr = &colnums_in_window[0];
460  for (PetscInt i = local_row_start; i < local_row_end; ++i)
461  for (typename SparsityPatternType::iterator p =
462  sparsity_pattern.begin(i);
463  p != sparsity_pattern.end(i);
464  ++p, ++ptr)
465  *ptr = p->column();
466  }
467 
468 
469  // then call the petsc function
470  // that summarily allocates these
471  // entries:
472  ierr = MatMPIAIJSetPreallocationCSR(matrix,
473  rowstart_in_window.data(),
474  colnums_in_window.data(),
475  nullptr);
476  AssertThrow(ierr == 0, ExcPETScError(ierr));
477  }
478  else
479  {
480  PetscInt i = 0;
481  ierr = MatMPIAIJSetPreallocationCSR(matrix, &i, &i, nullptr);
482  AssertThrow(ierr == 0, ExcPETScError(ierr));
483  }
485 
486  {
489  }
490  }
491 
492 
493  template <typename SparsityPatternType>
494  void
496  const SparsityPatternType & sparsity_pattern,
497  const std::vector<size_type> &local_rows_per_process,
498  const std::vector<size_type> &local_columns_per_process,
499  const unsigned int this_process,
500  const bool preset_nonzero_locations)
501  {
502  Assert(local_rows_per_process.size() == local_columns_per_process.size(),
503  ExcDimensionMismatch(local_rows_per_process.size(),
504  local_columns_per_process.size()));
505  Assert(this_process < local_rows_per_process.size(), ExcInternalError());
507 
508  // for each row that we own locally, we
509  // have to count how many of the
510  // entries in the sparsity pattern lie
511  // in the column area we have locally,
512  // and how many aren't. for this, we
513  // first have to know which areas are
514  // ours
515  size_type local_row_start = 0;
516  size_type local_col_start = 0;
517  for (unsigned int p = 0; p < this_process; ++p)
518  {
519  local_row_start += local_rows_per_process[p];
520  local_col_start += local_columns_per_process[p];
521  }
522  const size_type local_row_end =
523  local_row_start + local_rows_per_process[this_process];
524 
525  // create the matrix. We
526  // do not set row length but set the
527  // correct SparsityPattern later.
528  PetscErrorCode ierr = MatCreate(communicator, &matrix);
529  AssertThrow(ierr == 0, ExcPETScError(ierr));
530 
531  ierr = MatSetSizes(matrix,
532  local_rows_per_process[this_process],
533  local_columns_per_process[this_process],
534  sparsity_pattern.n_rows(),
535  sparsity_pattern.n_cols());
536  AssertThrow(ierr == 0, ExcPETScError(ierr));
537 
538  ierr = MatSetType(matrix, MATMPIAIJ);
539  AssertThrow(ierr == 0, ExcPETScError(ierr));
540 
541  // next preset the exact given matrix
542  // entries with zeros, if the user
543  // requested so. this doesn't avoid any
544  // memory allocations, but it at least
545  // avoids some searches later on. the
546  // key here is that we can use the
547  // matrix set routines that set an
548  // entire row at once, not a single
549  // entry at a time
550  //
551  // for the usefulness of this option
552  // read the documentation of this
553  // class.
554  if (preset_nonzero_locations == true)
555  {
556  // MatMPIAIJSetPreallocationCSR
557  // can be used to allocate the sparsity
558  // pattern of a matrix if it is already
559  // available:
560 
561  // first set up the column number
562  // array for the rows to be stored
563  // on the local processor. have one
564  // dummy entry at the end to make
565  // sure petsc doesn't read past the
566  // end
567  std::vector<PetscInt>
568 
569  rowstart_in_window(local_row_end - local_row_start + 1, 0),
570  colnums_in_window;
571  {
572  size_type n_cols = 0;
573  for (size_type i = local_row_start; i < local_row_end; ++i)
574  {
575  const size_type row_length = sparsity_pattern.row_length(i);
576  rowstart_in_window[i + 1 - local_row_start] =
577  rowstart_in_window[i - local_row_start] + row_length;
578  n_cols += row_length;
579  }
580  colnums_in_window.resize(n_cols + 1, -1);
581  }
582 
583  // now copy over the information
584  // from the sparsity pattern.
585  {
586  PetscInt *ptr = &colnums_in_window[0];
587  for (size_type i = local_row_start; i < local_row_end; ++i)
588  for (typename SparsityPatternType::iterator p =
589  sparsity_pattern.begin(i);
590  p != sparsity_pattern.end(i);
591  ++p, ++ptr)
592  *ptr = p->column();
593  }
594 
595 
596  // then call the petsc function
597  // that summarily allocates these
598  // entries:
599  ierr = MatMPIAIJSetPreallocationCSR(matrix,
600  rowstart_in_window.data(),
601  colnums_in_window.data(),
602  nullptr);
603  AssertThrow(ierr == 0, ExcPETScError(ierr));
604 
607  }
608  }
609 
610  // explicit instantiations
611  //
612  template SparseMatrix::SparseMatrix(const MPI_Comm &,
613  const SparsityPattern &,
614  const std::vector<size_type> &,
615  const std::vector<size_type> &,
616  const unsigned int,
617  const bool);
618  template SparseMatrix::SparseMatrix(const MPI_Comm &,
619  const DynamicSparsityPattern &,
620  const std::vector<size_type> &,
621  const std::vector<size_type> &,
622  const unsigned int,
623  const bool);
624 
625  template void
626  SparseMatrix::reinit(const MPI_Comm &,
627  const SparsityPattern &,
628  const std::vector<size_type> &,
629  const std::vector<size_type> &,
630  const unsigned int,
631  const bool);
632  template void
633  SparseMatrix::reinit(const MPI_Comm &,
634  const DynamicSparsityPattern &,
635  const std::vector<size_type> &,
636  const std::vector<size_type> &,
637  const unsigned int,
638  const bool);
639 
640  template void
642  const IndexSet &,
643  const SparsityPattern &,
644  const MPI_Comm &);
645 
646  template void
648  const IndexSet &,
649  const DynamicSparsityPattern &,
650  const MPI_Comm &);
651 
652  template void
654  const std::vector<size_type> &,
655  const std::vector<size_type> &,
656  const unsigned int,
657  const bool);
658  template void
660  const std::vector<size_type> &,
661  const std::vector<size_type> &,
662  const unsigned int,
663  const bool);
664 
665  template void
667  const IndexSet &,
668  const SparsityPattern &);
669 
670  template void
672  const IndexSet &,
673  const DynamicSparsityPattern &);
674 
675 
676  PetscScalar
678  {
679  Vector tmp(v);
680  vmult(tmp, v);
681  // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
682  return v * tmp;
683  }
684 
685  PetscScalar
687  {
688  Vector tmp(v);
689  vmult(tmp, v);
690  // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
691  return u * tmp;
692  }
693 
694  IndexSet
696  {
697  PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
698  PetscErrorCode ierr;
699 
700  ierr = MatGetSize(matrix, &n_rows, &n_cols);
701  AssertThrow(ierr == 0, ExcPETScError(ierr));
702 
703  ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
704  AssertThrow(ierr == 0, ExcPETScError(ierr));
705 
706  ierr = MatGetOwnershipRangeColumn(matrix, &min, &max);
707  AssertThrow(ierr == 0, ExcPETScError(ierr));
708 
709  Assert(n_loc_cols == max - min,
710  ExcMessage(
711  "PETSc is requiring non contiguous memory allocation."));
712 
713  IndexSet indices(n_cols);
714  indices.add_range(min, max);
715  indices.compress();
716 
717  return indices;
718  }
719 
720  IndexSet
722  {
723  PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
724  PetscErrorCode ierr;
725 
726  ierr = MatGetSize(matrix, &n_rows, &n_cols);
727  AssertThrow(ierr == 0, ExcPETScError(ierr));
728 
729  ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
730  AssertThrow(ierr == 0, ExcPETScError(ierr));
731 
732  ierr = MatGetOwnershipRange(matrix, &min, &max);
733  AssertThrow(ierr == 0, ExcPETScError(ierr));
734 
735  Assert(n_loc_rows == max - min,
736  ExcMessage(
737  "PETSc is requiring non contiguous memory allocation."));
738 
739  IndexSet indices(n_rows);
740  indices.add_range(min, max);
741  indices.compress();
742 
743  return indices;
744  }
745 
746  void
748  const SparseMatrix &B,
749  const MPI::Vector & V) const
750  {
751  // Simply forward to the protected member function of the base class
752  // that takes abstract matrix and vector arguments (to which the compiler
753  // automatically casts the arguments).
754  MatrixBase::mmult(C, B, V);
755  }
756 
757  void
759  const SparseMatrix &B,
760  const MPI::Vector & V) const
761  {
762  // Simply forward to the protected member function of the base class
763  // that takes abstract matrix and vector arguments (to which the compiler
764  // automatically casts the arguments).
765  MatrixBase::Tmmult(C, B, V);
766  }
767 
768  } // namespace MPI
769 } // namespace PETScWrappers
770 
771 
772 DEAL_II_NAMESPACE_CLOSE
773 
774 #endif // DEAL_II_WITH_PETSC
static::ExceptionBase & ExcLocalRowsTooLarge(int arg1, int arg2)
PetscErrorCode destroy_matrix(Mat &matrix)
void copy_from(const SparseMatrix &other)
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
Definition: index_set.cc:593
void Tmmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
void do_reinit(const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const size_type n_nonzero_per_row, const bool is_symmetric=false, const size_type n_offdiag_nonzero_per_row=0)
size_type n_elements() const
Definition: index_set.h:1743
PetscScalar matrix_scalar_product(const Vector &u, const Vector &v) const
#define AssertThrow(cond, exc)
Definition: exceptions.h:1329
size_type size() const
Definition: index_set.h:1611
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
static::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition: utilities.cc:105
void set_matrix_option(Mat &matrix, const MatOption option_name, const PetscBool option_value=PETSC_FALSE)
PetscBool is_symmetric(const double tolerance=1.e-12)
unsigned long long int global_dof_index
Definition: types.h:72
bool is_contiguous() const
Definition: index_set.h:1726
void mmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
void compress(const VectorOperation::values operation)
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
MatrixBase & operator=(const MatrixBase &)=delete
static::ExceptionBase & ExcMessage(std::string arg1)
SparseMatrix & operator=(const value_type d)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1227
static::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
size_type row_length(const size_type row) const
void set_keep_zero_rows(Mat &matrix)
void compress() const
Definition: index_set.h:1619
void reinit(const MPI_Comm &communicator, const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const size_type n_nonzero_per_row, const bool is_symmetric=false, const size_type n_offdiag_nonzero_per_row=0)
void add_range(const size_type begin, const size_type end)
Definition: index_set.cc:96
types::global_dof_index size_type
static::ExceptionBase & ExcNotImplemented()
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1793
void vmult(VectorBase &dst, const VectorBase &src) const
PetscScalar matrix_norm_square(const Vector &v) const
static::ExceptionBase & ExcInternalError()
void close_matrix(Mat &matrix)