Reference documentation for deal.II version 9.1.0-pre
block_linear_operator.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2010 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_block_linear_operator_h
17 #define dealii_block_linear_operator_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/exceptions.h>
22 
23 #include <deal.II/lac/linear_operator.h>
24 
25 
26 DEAL_II_NAMESPACE_OPEN
27 
28 // Forward declarations:
29 
30 namespace internal
31 {
32  namespace BlockLinearOperatorImplementation
33  {
34  template <typename PayloadBlockType =
37  }
38 } // namespace internal
39 
40 template <typename Number>
42 
43 template <typename Range = BlockVector<double>,
44  typename Domain = Range,
45  typename BlockPayload =
46  internal::BlockLinearOperatorImplementation::EmptyBlockPayload<>>
48 
49 template <typename Range = BlockVector<double>,
50  typename Domain = Range,
51  typename BlockPayload =
52  internal::BlockLinearOperatorImplementation::EmptyBlockPayload<>,
53  typename BlockMatrixType>
55 block_operator(const BlockMatrixType &matrix);
56 
57 template <size_t m,
58  size_t n,
59  typename Range = BlockVector<double>,
60  typename Domain = Range,
61  typename BlockPayload =
65  const std::array<std::array<LinearOperator<typename Range::BlockType,
66  typename Domain::BlockType,
67  typename BlockPayload::BlockType>,
68  n>,
69  m> &);
70 
71 template <size_t m,
72  typename Range = BlockVector<double>,
73  typename Domain = Range,
74  typename BlockPayload =
78  const std::array<LinearOperator<typename Range::BlockType,
79  typename Domain::BlockType,
80  typename BlockPayload::BlockType>,
81  m> &);
82 
83 template <size_t m,
84  typename Range = BlockVector<double>,
85  typename Domain = Range,
86  typename BlockPayload =
90  const LinearOperator<typename Range::BlockType,
91  typename Domain::BlockType,
92  typename BlockPayload::BlockType> &op);
93 
94 // This is a workaround for a bug in <=gcc-4.7 that does not like partial
95 // template default values in combination with local lambda expressions [1]
96 //
97 // [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53624
98 //
99 // Forward declare functions with partial template defaults:
100 
101 template <typename Range = BlockVector<double>,
102  typename Domain = Range,
103  typename BlockPayload =
104  internal::BlockLinearOperatorImplementation::EmptyBlockPayload<>,
105  typename BlockMatrixType>
107 block_diagonal_operator(const BlockMatrixType &block_matrix);
108 
109 template <typename Range = BlockVector<double>,
110  typename Domain = Range,
111  typename BlockPayload =
112  internal::BlockLinearOperatorImplementation::EmptyBlockPayload<>>
117 
118 template <typename Range = BlockVector<double>,
119  typename Domain = Range,
120  typename BlockPayload =
121  internal::BlockLinearOperatorImplementation::EmptyBlockPayload<>>
126 
127 // end of workaround
128 
129 
130 
200 template <typename Range, typename Domain, typename BlockPayload>
202  : public LinearOperator<Range, Domain, typename BlockPayload::BlockType>
203 {
204 public:
205  using BlockType = LinearOperator<typename Range::BlockType,
206  typename Domain::BlockType,
207  typename BlockPayload::BlockType>;
208 
216  BlockLinearOperator(const BlockPayload &payload)
217  : LinearOperator<Range, Domain, typename BlockPayload::BlockType>(
218  typename BlockPayload::BlockType(payload, payload))
219  {
220  n_block_rows = []() -> unsigned int {
221  Assert(
222  false,
223  ExcMessage(
224  "Uninitialized BlockLinearOperator<Range, Domain>::n_block_rows called"));
225  return 0;
226  };
227 
228  n_block_cols = []() -> unsigned int {
229  Assert(
230  false,
231  ExcMessage(
232  "Uninitialized BlockLinearOperator<Range, Domain>::n_block_cols called"));
233  return 0;
234  };
235 
236  block = [](unsigned int, unsigned int) -> BlockType {
237  Assert(
238  false,
239  ExcMessage(
240  "Uninitialized BlockLinearOperator<Range, Domain>::block called"));
241  return BlockType();
242  };
243  }
244 
250 
256  template <typename Op>
257  BlockLinearOperator(const Op &op)
258  {
259  *this = block_operator<Range, Domain, BlockPayload, Op>(op);
260  }
261 
267  template <size_t m, size_t n>
268  BlockLinearOperator(const std::array<std::array<BlockType, n>, m> &ops)
269  {
270  *this = block_operator<m, n, Range, Domain, BlockPayload>(ops);
271  }
272 
278  template <size_t m>
279  BlockLinearOperator(const std::array<BlockType, m> &ops)
280  {
281  *this = block_diagonal_operator<m, Range, Domain, BlockPayload>(ops);
282  }
283 
288  operator=(const BlockLinearOperator<Range, Domain, BlockPayload> &) = default;
289 
294  template <typename Op>
296  operator=(const Op &op)
297  {
298  *this = block_operator<Range, Domain, BlockPayload, Op>(op);
299  return *this;
300  }
301 
307  template <size_t m, size_t n>
309  operator=(const std::array<std::array<BlockType, n>, m> &ops)
310  {
311  *this = block_operator<m, n, Range, Domain, BlockPayload>(ops);
312  return *this;
313  }
314 
320  template <size_t m>
322  operator=(const std::array<BlockType, m> &ops)
323  {
324  *this = block_diagonal_operator<m, Range, Domain, BlockPayload>(ops);
325  return *this;
326  }
327 
332  std::function<unsigned int()> n_block_rows;
333 
338  std::function<unsigned int()> n_block_cols;
339 
345  std::function<BlockType(unsigned int, unsigned int)> block;
346 };
347 
348 
349 namespace internal
350 {
351  namespace BlockLinearOperatorImplementation
352  {
353  // A helper function to apply a given vmult, or Tvmult to a vector with
354  // intermediate storage, similar to the corresponding helper
355  // function for LinearOperator. Here, two operators are used.
356  // The first one takes care of the first "column" and typically doesn't add.
357  // On the other hand, the second operator is normally an adding one.
358  template <typename Function1,
359  typename Function2,
360  typename Range,
361  typename Domain>
362  void
363  apply_with_intermediate_storage(const Function1 &first_op,
364  const Function2 &loop_op,
365  Range & v,
366  const Domain & u,
367  bool add)
368  {
369  GrowingVectorMemory<Range> vector_memory;
370 
371  typename VectorMemory<Range>::Pointer tmp(vector_memory);
372  tmp->reinit(v, /*bool omit_zeroing_entries =*/true);
373 
374  const unsigned int n = u.n_blocks();
375  const unsigned int m = v.n_blocks();
376 
377  for (unsigned int i = 0; i < m; ++i)
378  {
379  first_op(*tmp, u, i, 0);
380  for (unsigned int j = 1; j < n; ++j)
381  loop_op(*tmp, u, i, j);
382  }
383 
384  if (add)
385  v += *tmp;
386  else
387  v = *tmp;
388  }
389 
390  // Populate the LinearOperator interfaces with the help of the
391  // BlockLinearOperator functions
392  template <typename Range, typename Domain, typename BlockPayload>
393  inline void
394  populate_linear_operator_functions(
396  {
397  op.reinit_range_vector = [=](Range &v, bool omit_zeroing_entries) {
398  const unsigned int m = op.n_block_rows();
399 
400  // Reinitialize the block vector to m blocks:
401  v.reinit(m);
402 
403  // And reinitialize every individual block with reinit_range_vectors:
404  for (unsigned int i = 0; i < m; ++i)
405  op.block(i, 0).reinit_range_vector(v.block(i), omit_zeroing_entries);
406 
407  v.collect_sizes();
408  };
409 
410  op.reinit_domain_vector = [=](Domain &v, bool omit_zeroing_entries) {
411  const unsigned int n = op.n_block_cols();
412 
413  // Reinitialize the block vector to n blocks:
414  v.reinit(n);
415 
416  // And reinitialize every individual block with reinit_domain_vectors:
417  for (unsigned int i = 0; i < n; ++i)
418  op.block(0, i).reinit_domain_vector(v.block(i), omit_zeroing_entries);
419 
420  v.collect_sizes();
421  };
422 
423  op.vmult = [&op](Range &v, const Domain &u) {
424  const unsigned int m = op.n_block_rows();
425  const unsigned int n = op.n_block_cols();
426  Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m));
427  Assert(u.n_blocks() == n, ExcDimensionMismatch(u.n_blocks(), n));
428 
429  if (PointerComparison::equal(&v, &u))
430  {
431  const auto first_op = [&op](Range & v,
432  const Domain & u,
433  const unsigned int i,
434  const unsigned int j) {
435  op.block(i, j).vmult(v.block(i), u.block(j));
436  };
437 
438  const auto loop_op = [&op](Range & v,
439  const Domain & u,
440  const unsigned int i,
441  const unsigned int j) {
442  op.block(i, j).vmult_add(v.block(i), u.block(j));
443  };
444 
445  apply_with_intermediate_storage(first_op, loop_op, v, u, false);
446  }
447  else
448  {
449  for (unsigned int i = 0; i < m; ++i)
450  {
451  op.block(i, 0).vmult(v.block(i), u.block(0));
452  for (unsigned int j = 1; j < n; ++j)
453  op.block(i, j).vmult_add(v.block(i), u.block(j));
454  }
455  }
456  };
457 
458  op.vmult_add = [&op](Range &v, const Domain &u) {
459  const unsigned int m = op.n_block_rows();
460  const unsigned int n = op.n_block_cols();
461  Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m));
462  Assert(u.n_blocks() == n, ExcDimensionMismatch(u.n_blocks(), n));
463 
464  if (PointerComparison::equal(&v, &u))
465  {
466  const auto first_op = [&op](Range & v,
467  const Domain & u,
468  const unsigned int i,
469  const unsigned int j) {
470  op.block(i, j).vmult(v.block(i), u.block(j));
471  };
472 
473  const auto loop_op = [&op](Range & v,
474  const Domain & u,
475  const unsigned int i,
476  const unsigned int j) {
477  op.block(i, j).vmult_add(v.block(i), u.block(j));
478  };
479 
480  apply_with_intermediate_storage(first_op, loop_op, v, u, true);
481  }
482  else
483  {
484  for (unsigned int i = 0; i < m; ++i)
485  for (unsigned int j = 0; j < n; ++j)
486  op.block(i, j).vmult_add(v.block(i), u.block(j));
487  }
488  };
489 
490  op.Tvmult = [&op](Domain &v, const Range &u) {
491  const unsigned int n = op.n_block_cols();
492  const unsigned int m = op.n_block_rows();
493  Assert(v.n_blocks() == n, ExcDimensionMismatch(v.n_blocks(), n));
494  Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m));
495 
496  if (PointerComparison::equal(&v, &u))
497  {
498  const auto first_op = [&op](Range & v,
499  const Domain & u,
500  const unsigned int i,
501  const unsigned int j) {
502  op.block(j, i).Tvmult(v.block(i), u.block(j));
503  };
504 
505  const auto loop_op = [&op](Range & v,
506  const Domain & u,
507  const unsigned int i,
508  const unsigned int j) {
509  op.block(j, i).Tvmult_add(v.block(i), u.block(j));
510  };
511 
512  apply_with_intermediate_storage(first_op, loop_op, v, u, false);
513  }
514  else
515  {
516  for (unsigned int i = 0; i < n; ++i)
517  {
518  op.block(0, i).Tvmult(v.block(i), u.block(0));
519  for (unsigned int j = 1; j < m; ++j)
520  op.block(j, i).Tvmult_add(v.block(i), u.block(j));
521  }
522  }
523  };
524 
525  op.Tvmult_add = [&op](Domain &v, const Range &u) {
526  const unsigned int n = op.n_block_cols();
527  const unsigned int m = op.n_block_rows();
528  Assert(v.n_blocks() == n, ExcDimensionMismatch(v.n_blocks(), n));
529  Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m));
530 
531  if (PointerComparison::equal(&v, &u))
532  {
533  const auto first_op = [&op](Range & v,
534  const Domain & u,
535  const unsigned int i,
536  const unsigned int j) {
537  op.block(j, i).Tvmult(v.block(i), u.block(j));
538  };
539 
540  const auto loop_op = [&op](Range & v,
541  const Domain & u,
542  const unsigned int i,
543  const unsigned int j) {
544  op.block(j, i).Tvmult_add(v.block(i), u.block(j));
545  };
546 
547  apply_with_intermediate_storage(first_op, loop_op, v, u, true);
548  }
549  else
550  {
551  for (unsigned int i = 0; i < n; ++i)
552  for (unsigned int j = 0; j < m; ++j)
553  op.block(j, i).Tvmult_add(v.block(i), u.block(j));
554  }
555  };
556  }
557 
558 
559 
574  template <typename PayloadBlockType>
575  class EmptyBlockPayload
576  {
577  public:
581  using BlockType = PayloadBlockType;
582 
590  template <typename... Args>
591  EmptyBlockPayload(const Args &...)
592  {}
593  };
594 
595  } // namespace BlockLinearOperatorImplementation
596 } // namespace internal
597 
598 
599 
604 
616 template <typename Range,
617  typename Domain,
618  typename BlockPayload,
619  typename BlockMatrixType>
621 block_operator(const BlockMatrixType &block_matrix)
622 {
623  using BlockType =
625 
627  BlockPayload(block_matrix, block_matrix));
628 
629  return_op.n_block_rows = [&block_matrix]() -> unsigned int {
630  return block_matrix.n_block_rows();
631  };
632 
633  return_op.n_block_cols = [&block_matrix]() -> unsigned int {
634  return block_matrix.n_block_cols();
635  };
636 
637  return_op.block = [&block_matrix](unsigned int i,
638  unsigned int j) -> BlockType {
639 #ifdef DEBUG
640  const unsigned int m = block_matrix.n_block_rows();
641  const unsigned int n = block_matrix.n_block_cols();
642  Assert(i < m, ExcIndexRange(i, 0, m));
643  Assert(j < n, ExcIndexRange(j, 0, n));
644 #endif
645 
646  return BlockType(block_matrix.block(i, j));
647  };
648 
649  populate_linear_operator_functions(return_op);
650  return return_op;
651 }
652 
653 
654 
682 template <size_t m,
683  size_t n,
684  typename Range,
685  typename Domain,
686  typename BlockPayload>
689  const std::array<std::array<LinearOperator<typename Range::BlockType,
690  typename Domain::BlockType,
691  typename BlockPayload::BlockType>,
692  n>,
693  m> &ops)
694 {
695  static_assert(m > 0 && n > 0,
696  "a blocked LinearOperator must consist of at least one block");
697 
698  using BlockType =
700 
702  (BlockPayload())); // TODO: Create block payload so that this can be
703  // initialized correctly
704 
705  return_op.n_block_rows = []() -> unsigned int { return m; };
706 
707  return_op.n_block_cols = []() -> unsigned int { return n; };
708 
709  return_op.block = [ops](unsigned int i, unsigned int j) -> BlockType {
710  Assert(i < m, ExcIndexRange(i, 0, m));
711  Assert(j < n, ExcIndexRange(j, 0, n));
712 
713  return ops[i][j];
714  };
715 
716  populate_linear_operator_functions(return_op);
717  return return_op;
718 }
719 
720 
721 
737 template <typename Range,
738  typename Domain,
739  typename BlockPayload,
740  typename BlockMatrixType>
742 block_diagonal_operator(const BlockMatrixType &block_matrix)
743 {
744  using BlockType =
746 
748  BlockPayload(block_matrix, block_matrix));
749 
750  return_op.n_block_rows = [&block_matrix]() -> unsigned int {
751  return block_matrix.n_block_rows();
752  };
753 
754  return_op.n_block_cols = [&block_matrix]() -> unsigned int {
755  return block_matrix.n_block_cols();
756  };
757 
758  return_op.block = [&block_matrix](unsigned int i,
759  unsigned int j) -> BlockType {
760 #ifdef DEBUG
761  const unsigned int m = block_matrix.n_block_rows();
762  const unsigned int n = block_matrix.n_block_cols();
763  Assert(m == n, ExcDimensionMismatch(m, n));
764  Assert(i < m, ExcIndexRange(i, 0, m));
765  Assert(j < n, ExcIndexRange(j, 0, n));
766 #endif
767  if (i == j)
768  return BlockType(block_matrix.block(i, j));
769  else
770  return null_operator(BlockType(block_matrix.block(i, j)));
771  };
772 
773  populate_linear_operator_functions(return_op);
774  return return_op;
775 }
776 
777 
778 
796 template <size_t m, typename Range, typename Domain, typename BlockPayload>
799  const std::array<LinearOperator<typename Range::BlockType,
800  typename Domain::BlockType,
801  typename BlockPayload::BlockType>,
802  m> &ops)
803 {
804  static_assert(
805  m > 0, "a blockdiagonal LinearOperator must consist of at least one block");
806 
807  using BlockType =
809 
810  std::array<std::array<BlockType, m>, m> new_ops;
811 
812  // This is a bit tricky. We have to make sure that the off-diagonal
813  // elements of return_op.ops are populated correctly. They must be
814  // null_operators, but with correct reinit_domain_vector and
815  // reinit_range_vector functions.
816  for (unsigned int i = 0; i < m; ++i)
817  for (unsigned int j = 0; j < m; ++j)
818  if (i == j)
819  {
820  // diagonal elements are easy:
821  new_ops[i][j] = ops[i];
822  }
823  else
824  {
825  // create a null-operator...
826  new_ops[i][j] = null_operator(ops[i]);
827  // ... and fix up reinit_domain_vector:
828  new_ops[i][j].reinit_domain_vector = ops[j].reinit_domain_vector;
829  }
830 
831  return block_operator<m, m, Range, Domain>(new_ops);
832 }
833 
834 
835 
845 template <size_t m, typename Range, typename Domain, typename BlockPayload>
848  const LinearOperator<typename Range::BlockType,
849  typename Domain::BlockType,
850  typename BlockPayload::BlockType> &op)
851 {
852  static_assert(m > 0,
853  "a blockdiagonal LinearOperator must consist of at least "
854  "one block");
855 
856  using BlockType =
858  std::array<BlockType, m> new_ops;
859  new_ops.fill(op);
860 
861  return block_diagonal_operator(new_ops);
862 }
863 
864 
865 
867 
871 
907 template <typename Range, typename Domain, typename BlockPayload>
911  const BlockLinearOperator<Domain, Range, BlockPayload> &diagonal_inverse)
912 {
914  (typename BlockPayload::BlockType(diagonal_inverse)));
915 
916  return_op.reinit_range_vector = diagonal_inverse.reinit_range_vector;
917  return_op.reinit_domain_vector = diagonal_inverse.reinit_domain_vector;
918 
919  return_op.vmult = [block_operator, diagonal_inverse](Range & v,
920  const Range &u) {
921  const unsigned int m = block_operator.n_block_rows();
922  Assert(block_operator.n_block_cols() == m,
923  ExcDimensionMismatch(block_operator.n_block_cols(), m));
924  Assert(diagonal_inverse.n_block_rows() == m,
925  ExcDimensionMismatch(diagonal_inverse.n_block_rows(), m));
926  Assert(diagonal_inverse.n_block_cols() == m,
927  ExcDimensionMismatch(diagonal_inverse.n_block_cols(), m));
928  Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m));
929  Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m));
930 
931  if (m == 0)
932  return;
933 
934  diagonal_inverse.block(0, 0).vmult(v.block(0), u.block(0));
935  for (unsigned int i = 1; i < m; ++i)
936  {
937  auto &dst = v.block(i);
938  dst = u.block(i);
939  dst *= -1.;
940  for (unsigned int j = 0; j < i; ++j)
941  block_operator.block(i, j).vmult_add(dst, v.block(j));
942  dst *= -1.;
943  diagonal_inverse.block(i, i).vmult(dst,
944  dst); // uses intermediate storage
945  }
946  };
947 
948  return_op.vmult_add = [block_operator, diagonal_inverse](Range & v,
949  const Range &u) {
950  const unsigned int m = block_operator.n_block_rows();
951  Assert(block_operator.n_block_cols() == m,
952  ExcDimensionMismatch(block_operator.n_block_cols(), m));
953  Assert(diagonal_inverse.n_block_rows() == m,
954  ExcDimensionMismatch(diagonal_inverse.n_block_rows(), m));
955  Assert(diagonal_inverse.n_block_cols() == m,
956  ExcDimensionMismatch(diagonal_inverse.n_block_cols(), m));
957  Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m));
958  Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m));
959 
960  if (m == 0)
961  return;
962 
965  vector_memory);
966 
967  diagonal_inverse.block(0, 0).vmult_add(v.block(0), u.block(0));
968 
969  for (unsigned int i = 1; i < m; ++i)
970  {
971  diagonal_inverse.block(i, i).reinit_range_vector(
972  *tmp, /*bool omit_zeroing_entries=*/true);
973  *tmp = u.block(i);
974  *tmp *= -1.;
975  for (unsigned int j = 0; j < i; ++j)
976  block_operator.block(i, j).vmult_add(*tmp, v.block(j));
977  *tmp *= -1.;
978  diagonal_inverse.block(i, i).vmult_add(v.block(i), *tmp);
979  }
980  };
981 
982  return return_op;
983 }
984 
985 
986 
1022 template <typename Range, typename Domain, typename BlockPayload>
1026  const BlockLinearOperator<Domain, Range, BlockPayload> &diagonal_inverse)
1027 {
1029  (typename BlockPayload::BlockType(diagonal_inverse)));
1030 
1031  return_op.reinit_range_vector = diagonal_inverse.reinit_range_vector;
1032  return_op.reinit_domain_vector = diagonal_inverse.reinit_domain_vector;
1033 
1034  return_op.vmult = [block_operator, diagonal_inverse](Range & v,
1035  const Range &u) {
1036  const unsigned int m = block_operator.n_block_rows();
1037  Assert(block_operator.n_block_cols() == m,
1038  ExcDimensionMismatch(block_operator.n_block_cols(), m));
1039  Assert(diagonal_inverse.n_block_rows() == m,
1040  ExcDimensionMismatch(diagonal_inverse.n_block_rows(), m));
1041  Assert(diagonal_inverse.n_block_cols() == m,
1042  ExcDimensionMismatch(diagonal_inverse.n_block_cols(), m));
1043  Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m));
1044  Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m));
1045 
1046  if (m == 0)
1047  return;
1048 
1049  diagonal_inverse.block(m - 1, m - 1).vmult(v.block(m - 1), u.block(m - 1));
1050 
1051  for (int i = m - 2; i >= 0; --i)
1052  {
1053  auto &dst = v.block(i);
1054  dst = u.block(i);
1055  dst *= -1.;
1056  for (unsigned int j = i + 1; j < m; ++j)
1057  block_operator.block(i, j).vmult_add(dst, v.block(j));
1058  dst *= -1.;
1059  diagonal_inverse.block(i, i).vmult(dst,
1060  dst); // uses intermediate storage
1061  }
1062  };
1063 
1064  return_op.vmult_add = [block_operator, diagonal_inverse](Range & v,
1065  const Range &u) {
1066  const unsigned int m = block_operator.n_block_rows();
1067  Assert(block_operator.n_block_cols() == m,
1068  ExcDimensionMismatch(block_operator.n_block_cols(), m));
1069  Assert(diagonal_inverse.n_block_rows() == m,
1070  ExcDimensionMismatch(diagonal_inverse.n_block_rows(), m));
1071  Assert(diagonal_inverse.n_block_cols() == m,
1072  ExcDimensionMismatch(diagonal_inverse.n_block_cols(), m));
1073  Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m));
1074  Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m));
1077  vector_memory);
1078 
1079  if (m == 0)
1080  return;
1081 
1082  diagonal_inverse.block(m - 1, m - 1)
1083  .vmult_add(v.block(m - 1), u.block(m - 1));
1084 
1085  for (int i = m - 2; i >= 0; --i)
1086  {
1087  diagonal_inverse.block(i, i).reinit_range_vector(
1088  *tmp, /*bool omit_zeroing_entries=*/true);
1089  *tmp = u.block(i);
1090  *tmp *= -1.;
1091  for (unsigned int j = i + 1; j < m; ++j)
1092  block_operator.block(i, j).vmult_add(*tmp, v.block(j));
1093  *tmp *= -1.;
1094  diagonal_inverse.block(i, i).vmult_add(v.block(i), *tmp);
1095  }
1096  };
1097 
1098  return return_op;
1099 }
1100 
1102 
1103 DEAL_II_NAMESPACE_CLOSE
1104 
1105 #endif
BlockLinearOperator(const std::array< BlockType, m > &ops)
BlockLinearOperator(const Op &op)
std::function< void(Range &v, bool omit_zeroing_entries)> reinit_range_vector
std::function< void(Domain &v, const Range &u)> Tvmult_add
std::function< void(Domain &v, const Range &u)> Tvmult
BlockLinearOperator< Range, Domain, BlockPayload > & operator=(const std::array< BlockType, m > &ops)
BlockLinearOperator< Range, Domain, BlockPayload > block_operator(const BlockMatrixType &matrix)
BlockLinearOperator< Range, Domain, BlockPayload > & operator=(const Op &op)
static::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
LinearOperator< Range, Domain, Payload > null_operator(const LinearOperator< Range, Domain, Payload > &)
BlockLinearOperator< Range, Domain, BlockPayload > & operator=(const std::array< std::array< BlockType, n >, m > &ops)
static::ExceptionBase & ExcMessage(std::string arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1227
static::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
std::function< void(Range &v, const Domain &u)> vmult
std::function< void(Domain &v, bool omit_zeroing_entries)> reinit_domain_vector
BlockLinearOperator< Range, Domain, BlockPayload > block_diagonal_operator(const std::array< LinearOperator< typename Range::BlockType, typename Domain::BlockType, typename BlockPayload::BlockType >, m > &)
BlockLinearOperator(const std::array< std::array< BlockType, n >, m > &ops)
std::function< void(Range &v, const Domain &u)> vmult_add
std::function< BlockType(unsigned int, unsigned int)> block
std::function< unsigned int()> n_block_cols
BlockLinearOperator(const BlockPayload &payload)
std::function< unsigned int()> n_block_rows
LinearOperator< Domain, Range, typename BlockPayload::BlockType > block_back_substitution(const BlockLinearOperator< Range, Domain, BlockPayload > &, const BlockLinearOperator< Domain, Range, BlockPayload > &)
static bool equal(const T *p1, const T *p2)
LinearOperator< Domain, Range, typename BlockPayload::BlockType > block_forward_substitution(const BlockLinearOperator< Range, Domain, BlockPayload > &, const BlockLinearOperator< Domain, Range, BlockPayload > &)