Reference documentation for deal.II version 9.1.0-pre
lapack_full_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/numbers.h>
17 
18 #include <deal.II/lac/block_vector.h>
19 #include <deal.II/lac/full_matrix.h>
20 #include <deal.II/lac/lapack_full_matrix.h>
21 #include <deal.II/lac/lapack_support.h>
22 #include <deal.II/lac/lapack_templates.h>
23 #include <deal.II/lac/sparse_matrix.h>
24 #include <deal.II/lac/utilities.h>
25 #include <deal.II/lac/vector.h>
26 
27 #include <iomanip>
28 #include <iostream>
29 
30 DEAL_II_NAMESPACE_OPEN
31 
32 using namespace LAPACKSupport;
33 
34 namespace internal
35 {
36  namespace LAPACKFullMatrixImplementation
37  {
38  // ZGEEV/CGEEV and DGEEV/SGEEV need different work arrays and different
39  // output arrays for eigenvalues. This makes working with generic scalar
40  // types a bit difficult. To get around this, geev_helper has the same
41  // signature for real and complex arguments, but it ignores some
42  // parameters when called with a real type and ignores different
43  // parameters when called with a complex type.
44  template <typename T>
45  void
46  geev_helper(const char vl,
47  const char vr,
49  const types::blas_int n_rows,
50  std::vector<T> & real_part_eigenvalues,
51  std::vector<T> & imag_part_eigenvalues,
52  std::vector<T> & left_eigenvectors,
53  std::vector<T> & right_eigenvectors,
54  std::vector<T> & real_work,
55  std::vector<T> & /*complex_work*/,
56  const types::blas_int work_flag,
57  types::blas_int & info)
58  {
59  static_assert(std::is_same<T, double>::value ||
60  std::is_same<T, float>::value,
61  "Only implemented for double and float");
62  Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
64  Assert(static_cast<std::size_t>(n_rows) <= real_part_eigenvalues.size(),
66  Assert(static_cast<std::size_t>(n_rows) <= imag_part_eigenvalues.size(),
68  if (vl == 'V')
69  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
70  left_eigenvectors.size(),
72  if (vr == 'V')
73  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
74  right_eigenvectors.size(),
76  Assert(work_flag == -1 ||
77  static_cast<std::size_t>(2 * n_rows) <= real_work.size(),
79  Assert(work_flag == -1 || std::max<long int>(1, 3 * n_rows) <= work_flag,
81  geev(&vl,
82  &vr,
83  &n_rows,
84  &matrix[0],
85  &n_rows,
86  real_part_eigenvalues.data(),
87  imag_part_eigenvalues.data(),
88  left_eigenvectors.data(),
89  &n_rows,
90  right_eigenvectors.data(),
91  &n_rows,
92  real_work.data(),
93  &work_flag,
94  &info);
95  }
96 
97  template <typename T>
98  void
99  geev_helper(const char vl,
100  const char vr,
101  AlignedVector<std::complex<T>> &matrix,
102  const types::blas_int n_rows,
103  std::vector<T> & /*real_part_eigenvalues*/,
104  std::vector<std::complex<T>> &eigenvalues,
105  std::vector<std::complex<T>> &left_eigenvectors,
106  std::vector<std::complex<T>> &right_eigenvectors,
107  std::vector<std::complex<T>> &complex_work,
108  std::vector<T> & real_work,
109  const types::blas_int work_flag,
110  types::blas_int & info)
111  {
112  static_assert(
113  std::is_same<T, double>::value || std::is_same<T, float>::value,
114  "Only implemented for std::complex<double> and std::complex<float>");
115  Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
116  ExcInternalError());
117  Assert(static_cast<std::size_t>(n_rows) <= eigenvalues.size(),
118  ExcInternalError());
119  if (vl == 'V')
120  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
121  left_eigenvectors.size(),
122  ExcInternalError());
123  if (vr == 'V')
124  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
125  right_eigenvectors.size(),
126  ExcInternalError());
127  Assert(std::max<std::size_t>(1, work_flag) <= real_work.size(),
128  ExcInternalError());
129  Assert(work_flag == -1 ||
130  std::max<long int>(1, 2 * n_rows) <= (work_flag),
131  ExcInternalError());
132 
133  geev(&vl,
134  &vr,
135  &n_rows,
136  &matrix[0],
137  &n_rows,
138  eigenvalues.data(),
139  left_eigenvectors.data(),
140  &n_rows,
141  right_eigenvectors.data(),
142  &n_rows,
143  complex_work.data(),
144  &work_flag,
145  real_work.data(),
146  &info);
147  }
148 
149 
150 
151  template <typename T>
152  void
153  gesdd_helper(const char job,
154  const types::blas_int n_rows,
155  const types::blas_int n_cols,
156  AlignedVector<T> & matrix,
157  std::vector<T> & singular_values,
158  AlignedVector<T> & left_vectors,
159  AlignedVector<T> & right_vectors,
160  std::vector<T> & real_work,
161  std::vector<T> & /*complex work*/,
162  std::vector<types::blas_int> &integer_work,
163  const types::blas_int & work_flag,
164  types::blas_int & info)
165  {
166  Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
167  ExcInternalError());
168  Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
169  ExcInternalError());
170  Assert(std::min<std::size_t>(n_rows, n_cols) <= singular_values.size(),
171  ExcInternalError());
172  Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
173  ExcInternalError());
174  Assert(work_flag == -1 ||
175  static_cast<std::size_t>(work_flag) <= real_work.size(),
176  ExcInternalError());
177  gesdd(&job,
178  &n_rows,
179  &n_cols,
180  &matrix[0],
181  &n_rows,
182  singular_values.data(),
183  &left_vectors[0],
184  &n_rows,
185  &right_vectors[0],
186  &n_cols,
187  real_work.data(),
188  &work_flag,
189  integer_work.data(),
190  &info);
191  }
192 
193 
194 
195  template <typename T>
196  void
197  gesdd_helper(const char job,
198  const types::blas_int n_rows,
199  const types::blas_int n_cols,
200  AlignedVector<std::complex<T>> &matrix,
201  std::vector<T> & singular_values,
202  AlignedVector<std::complex<T>> &left_vectors,
203  AlignedVector<std::complex<T>> &right_vectors,
204  std::vector<std::complex<T>> & work,
205  std::vector<T> & real_work,
206  std::vector<types::blas_int> & integer_work,
207  const types::blas_int & work_flag,
208  types::blas_int & info)
209  {
210  Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
211  ExcInternalError());
212  Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
213  ExcInternalError());
214  Assert(static_cast<std::size_t>(std::min(n_rows, n_cols)) <=
215  singular_values.size(),
216  ExcInternalError());
217  Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
218  ExcInternalError());
219  Assert(work_flag == -1 ||
220  static_cast<std::size_t>(work_flag) <= real_work.size(),
221  ExcInternalError());
222 
223  gesdd(&job,
224  &n_rows,
225  &n_cols,
226  &matrix[0],
227  &n_rows,
228  singular_values.data(),
229  &left_vectors[0],
230  &n_rows,
231  &right_vectors[0],
232  &n_cols,
233  work.data(),
234  &work_flag,
235  real_work.data(),
236  integer_work.data(),
237  &info);
238  }
239  } // namespace LAPACKFullMatrixImplementation
240 } // namespace internal
241 
242 template <typename number>
244  : TransposeTable<number>(n, n)
245  , state(matrix)
246  , property(general)
247 {}
248 
249 
250 template <typename number>
252  : TransposeTable<number>(m, n)
253  , state(matrix)
254  , property(general)
255 {}
256 
257 
258 template <typename number>
260  : TransposeTable<number>(M)
261  , state(matrix)
262  , property(general)
263 {}
264 
265 
266 template <typename number>
269 {
271  state = M.state;
272  property = M.property;
273  return *this;
274 }
275 
276 
277 
278 template <typename number>
279 void
281 {
282  this->TransposeTable<number>::reinit(n, n);
284 }
285 
286 
287 
288 template <typename number>
289 void
291 {
292  const size_type s = std::min(std::min(this->m(), n), this->n());
293  TransposeTable<number> copy(std::move(*this));
294  this->TransposeTable<number>::reinit(n, n);
295  for (size_type i = 0; i < s; ++i)
296  for (size_type j = 0; j < s; ++j)
297  (*this)(i, j) = copy(i, j);
298 }
299 
300 
301 
302 template <typename number>
303 void
305  const size_type col)
306 {
307  AssertIndexRange(row, this->m());
308  AssertIndexRange(col, this->n());
309 
310  const size_type nrows = this->m() - 1;
311  const size_type ncols = this->n() - 1;
312 
313  TransposeTable<number> copy(std::move(*this));
314  this->TransposeTable<number>::reinit(nrows, ncols);
315 
316  for (size_type j = 0; j < ncols; ++j)
317  {
318  const size_type jj = (j < col ? j : j + 1);
319  for (size_type i = 0; i < nrows; ++i)
320  {
321  const size_type ii = (i < row ? i : i + 1);
322  (*this)(i, j) = copy(ii, jj);
323  }
324  }
325 }
326 
327 
328 
329 template <typename number>
330 void
332 {
333  this->TransposeTable<number>::reinit(m, n);
335 }
336 
337 
338 template <typename number>
339 template <typename number2>
342 {
343  Assert(this->m() == M.m(), ExcDimensionMismatch(this->m(), M.m()));
344  Assert(this->n() == M.n(), ExcDimensionMismatch(this->n(), M.n()));
345  for (size_type i = 0; i < this->m(); ++i)
346  for (size_type j = 0; j < this->n(); ++j)
347  (*this)(i, j) = M(i, j);
348 
350  property = LAPACKSupport::general;
351  return *this;
352 }
353 
354 
355 template <typename number>
356 template <typename number2>
359 {
360  Assert(this->m() == M.n(), ExcDimensionMismatch(this->m(), M.n()));
361  Assert(this->n() == M.m(), ExcDimensionMismatch(this->n(), M.m()));
362  for (size_type i = 0; i < this->m(); ++i)
363  for (size_type j = 0; j < this->n(); ++j)
364  (*this)(i, j) = M.el(i, j);
365 
367  property = LAPACKSupport::general;
368  return *this;
369 }
370 
371 
372 template <typename number>
375 {
376  (void)d;
377  Assert(d == number(0), ExcScalarAssignmentOnlyForZeroValue());
378 
379  if (this->n_elements() != 0)
380  this->reset_values();
381 
383  return *this;
384 }
385 
386 
387 template <typename number>
390 {
393  ExcState(state));
394 
395  AssertIsFinite(factor);
396  const char type = 'G';
397  const number cfrom = 1.;
398  const types::blas_int m = this->m();
399  const types::blas_int n = this->n();
400  const types::blas_int lda = this->m();
401  types::blas_int info = 0;
402  // kl and ku will not be referenced for type = G (dense matrices).
403  const types::blas_int kl = 0;
404  number * values = &this->values[0];
405 
406  lascl(&type, &kl, &kl, &cfrom, &factor, &m, &n, values, &lda, &info);
407 
408  // Negative return value implies a wrong argument. This should be internal.
409  Assert(info >= 0, ExcInternalError());
410 
411  return *this;
412 }
413 
414 
415 template <typename number>
418 {
421  ExcState(state));
422 
423  AssertIsFinite(factor);
424  Assert(factor != number(0.), ExcZero());
425 
426  const char type = 'G';
427  const number cto = 1.;
428  const types::blas_int m = this->m();
429  const types::blas_int n = this->n();
430  const types::blas_int lda = this->m();
431  types::blas_int info = 0;
432  // kl and ku will not be referenced for type = G (dense matrices).
433  const types::blas_int kl = 0;
434  number * values = &this->values[0];
435 
436  lascl(&type, &kl, &kl, &factor, &cto, &m, &n, values, &lda, &info);
437 
438  // Negative return value implies a wrong argument. This should be internal.
439  Assert(info >= 0, ExcInternalError());
440 
441  return *this;
442 }
443 
444 
445 
446 template <typename number>
447 void
449 {
452  ExcState(state));
453 
454  Assert(m() == A.m(), ExcDimensionMismatch(m(), A.m()));
455  Assert(n() == A.n(), ExcDimensionMismatch(n(), A.n()));
456 
457  AssertIsFinite(a);
458 
459  // BLAS does not offer functions to add matrices.
460  // LapackFullMatrix is stored in contiguous array
461  // ==> use BLAS 1 for adding vectors
462  const types::blas_int n = this->m() * this->n();
463  const types::blas_int inc = 1;
464  number * values = &this->values[0];
465  const number * values_A = &A.values[0];
466 
467  axpy(&n, &a, values_A, &inc, values, &inc);
468 }
469 
470 
471 
472 namespace
473 {
474  template <typename number>
475  void
476  cholesky_rank1(LAPACKFullMatrix<number> &A,
477  const number a,
478  const Vector<number> & v)
479  {
480  const typename LAPACKFullMatrix<number>::size_type N = A.n();
481  Vector<number> z(v);
482  // Cholesky update / downdate, see
483  // 6.5.4 Cholesky Updating and Downdating, Golub 2013 Matrix computations
484  // Note that potrf() is called with LAPACKSupport::L , so the
485  // factorization is stored in lower triangular part.
486  // Also see discussion here
487  // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646
488  if (a > 0.)
489  {
490  // simple update via a sequence of Givens rotations.
491  // Observe that
492  //
493  // | L^T |T | L^T |
494  // A <-- | | | | = L L^T + z z^T
495  // | z^T | | z^T |
496  //
497  // so we can get updated factor by doing a sequence of Givens
498  // rotations to make the matrix lower-triangular
499  // Also see LINPACK's dchud http://www.netlib.org/linpack/dchud.f
500  z *= std::sqrt(a);
501  for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
502  {
503  const std::array<number, 3> csr =
505  A(k, k) = csr[2];
506  for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
507  ++i)
508  {
509  const number t = A(i, k);
510  A(i, k) = csr[0] * A(i, k) + csr[1] * z(i);
511  z(i) = -csr[1] * t + csr[0] * z(i);
512  }
513  }
514  }
515  else
516  {
517  // downdating is not always possible as we may end up with
518  // negative definite matrix. If it's possible, then it boils
519  // down to application of hyperbolic rotations.
520  // Observe that
521  //
522  // | L^T |T | L^T |
523  // A <-- | | S | | = L L^T - z z^T
524  // | z^T | | z^T |
525  //
526  // |In 0 |
527  // S := | |
528  // |0 -1 |
529  //
530  // We are looking for H which is S-orthogonal (HSH^T=S) and
531  // can restore upper-triangular factor of the factorization of A above.
532  // We will use Hyperbolic rotations to do the job
533  //
534  // | c -s | | x1 | | r |
535  // | | = | | = | |, c^2 - s^2 = 1
536  // |-s c | | x2 | | 0 |
537  //
538  // which have real solution only if x2 <= x1.
539  // See also Linpack's http://www.netlib.org/linpack/dchdd.f and
540  // https://infoscience.epfl.ch/record/161468/files/cholupdate.pdf and
541  // "Analysis of a recursive Least Squares Hyperbolic Rotation Algorithm
542  // for Signal Processing", Alexander, Pan, Plemmons, 1988.
543  z *= std::sqrt(-a);
544  for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
545  {
546  const std::array<number, 3> csr =
548  A(k, k) = csr[2];
549  for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
550  ++i)
551  {
552  const number t = A(i, k);
553  A(i, k) = csr[0] * A(i, k) - csr[1] * z(i);
554  z(i) = -csr[1] * t + csr[0] * z(i);
555  }
556  }
557  }
558  }
559 
560 
561  template <typename number>
562  void
563  cholesky_rank1(LAPACKFullMatrix<std::complex<number>> & /*A*/,
564  const std::complex<number> /*a*/,
565  const Vector<std::complex<number>> & /*v*/)
566  {
567  AssertThrow(false, ExcNotImplemented());
568  }
569 } // namespace
570 
571 
572 
573 template <typename number>
574 void
576 {
578 
579  Assert(n() == m(), ExcInternalError());
580  Assert(m() == v.size(), ExcDimensionMismatch(m(), v.size()));
581 
582  AssertIsFinite(a);
583 
585  {
586  {
587  const types::blas_int N = this->m();
588  const char uplo = LAPACKSupport::U;
589  const types::blas_int lda = N;
590  const types::blas_int incx = 1;
591 
592  syr(&uplo, &N, &a, v.begin(), &incx, this->values.begin(), &lda);
593  }
594 
595  const size_type N = this->m();
596  // FIXME: we should really only update upper or lower triangular parts
597  // of symmetric matrices and make sure the interface is consistent,
598  // for example operator(i,j) gives correct results regardless of storage.
599  for (size_type i = 0; i < N; ++i)
600  for (size_type j = 0; j < i; ++j)
601  (*this)(i, j) = (*this)(j, i);
602  }
603  else if (state == LAPACKSupport::cholesky)
604  {
605  cholesky_rank1(*this, a, v);
606  }
607  else
608  AssertThrow(false, ExcState(state));
609 }
610 
611 
612 
613 template <typename number>
614 void
616  const Vector<number> &v,
617  const bool adding) const
618 {
619  const types::blas_int mm = this->m();
620  const types::blas_int nn = this->n();
621  const number alpha = 1.;
622  const number beta = (adding ? 1. : 0.);
623  const number null = 0.;
624 
625  // use trmv for triangular matrices
627  (mm == nn) && state == matrix)
628  {
629  Assert(adding == false, ExcNotImplemented());
630 
631  AssertDimension(v.size(), this->n());
632  AssertDimension(w.size(), this->m());
633 
634  const char diag = 'N';
635  const char trans = 'N';
636  const char uplo =
638 
639  w = v;
640 
641  const types::blas_int N = mm;
642  const types::blas_int lda = N;
643  const types::blas_int incx = 1;
644 
645  trmv(&uplo, &trans, &diag, &N, &this->values[0], &lda, &w[0], &incx);
646 
647  return;
648  }
649 
650  switch (state)
651  {
652  case matrix:
653  case inverse_matrix:
654  {
655  AssertDimension(v.size(), this->n());
656  AssertDimension(w.size(), this->m());
657 
658  gemv("N",
659  &mm,
660  &nn,
661  &alpha,
662  &this->values[0],
663  &mm,
664  v.values.get(),
665  &one,
666  &beta,
667  w.values.get(),
668  &one);
669  break;
670  }
671  case svd:
672  {
674  AssertDimension(v.size(), this->n());
675  AssertDimension(w.size(), this->m());
676  // Compute V^T v
677  work.resize(std::max(mm, nn));
678  gemv("N",
679  &nn,
680  &nn,
681  &alpha,
682  &svd_vt->values[0],
683  &nn,
684  v.values.get(),
685  &one,
686  &null,
687  work.data(),
688  &one);
689  // Multiply by singular values
690  for (size_type i = 0; i < wr.size(); ++i)
691  work[i] *= wr[i];
692  // Multiply with U
693  gemv("N",
694  &mm,
695  &mm,
696  &alpha,
697  &svd_u->values[0],
698  &mm,
699  work.data(),
700  &one,
701  &beta,
702  w.values.get(),
703  &one);
704  break;
705  }
706  case inverse_svd:
707  {
709  AssertDimension(w.size(), this->n());
710  AssertDimension(v.size(), this->m());
711  // Compute U^T v
712  work.resize(std::max(mm, nn));
713  gemv("T",
714  &mm,
715  &mm,
716  &alpha,
717  &svd_u->values[0],
718  &mm,
719  v.values.get(),
720  &one,
721  &null,
722  work.data(),
723  &one);
724  // Multiply by singular values
725  for (size_type i = 0; i < wr.size(); ++i)
726  work[i] *= wr[i];
727  // Multiply with V
728  gemv("T",
729  &nn,
730  &nn,
731  &alpha,
732  &svd_vt->values[0],
733  &nn,
734  work.data(),
735  &one,
736  &beta,
737  w.values.get(),
738  &one);
739  break;
740  }
741  default:
742  Assert(false, ExcState(state));
743  }
744 }
745 
746 
747 template <typename number>
748 void
750  const Vector<number> &v,
751  const bool adding) const
752 {
753  const types::blas_int mm = this->m();
754  const types::blas_int nn = this->n();
755  const number alpha = 1.;
756  const number beta = (adding ? 1. : 0.);
757  const number null = 0.;
758 
759  // use trmv for triangular matrices
761  (mm == nn) && state == matrix)
762  {
763  Assert(adding == false, ExcNotImplemented());
764 
765  AssertDimension(v.size(), this->n());
766  AssertDimension(w.size(), this->m());
767 
768  const char diag = 'N';
769  const char trans = 'T';
770  const char uplo =
772 
773  w = v;
774 
775  const types::blas_int N = mm;
776  const types::blas_int lda = N;
777  const types::blas_int incx = 1;
778 
779  trmv(&uplo, &trans, &diag, &N, &this->values[0], &lda, &w[0], &incx);
780 
781  return;
782  }
783 
784 
785  switch (state)
786  {
787  case matrix:
788  case inverse_matrix:
789  {
790  AssertDimension(w.size(), this->n());
791  AssertDimension(v.size(), this->m());
792 
793  gemv("T",
794  &mm,
795  &nn,
796  &alpha,
797  &this->values[0],
798  &mm,
799  v.values.get(),
800  &one,
801  &beta,
802  w.values.get(),
803  &one);
804  break;
805  }
806  case svd:
807  {
809  AssertDimension(w.size(), this->n());
810  AssertDimension(v.size(), this->m());
811 
812  // Compute U^T v
813  work.resize(std::max(mm, nn));
814  gemv("T",
815  &mm,
816  &mm,
817  &alpha,
818  &svd_u->values[0],
819  &mm,
820  v.values.get(),
821  &one,
822  &null,
823  work.data(),
824  &one);
825  // Multiply by singular values
826  for (size_type i = 0; i < wr.size(); ++i)
827  work[i] *= wr[i];
828  // Multiply with V
829  gemv("T",
830  &nn,
831  &nn,
832  &alpha,
833  &svd_vt->values[0],
834  &nn,
835  work.data(),
836  &one,
837  &beta,
838  w.values.get(),
839  &one);
840  break;
841  }
842  case inverse_svd:
843  {
845  AssertDimension(v.size(), this->n());
846  AssertDimension(w.size(), this->m());
847 
848  // Compute V^T v
849  work.resize(std::max(mm, nn));
850  gemv("N",
851  &nn,
852  &nn,
853  &alpha,
854  &svd_vt->values[0],
855  &nn,
856  v.values.get(),
857  &one,
858  &null,
859  work.data(),
860  &one);
861  // Multiply by singular values
862  for (size_type i = 0; i < wr.size(); ++i)
863  work[i] *= wr[i];
864  // Multiply with U
865  gemv("N",
866  &mm,
867  &mm,
868  &alpha,
869  &svd_u->values[0],
870  &mm,
871  work.data(),
872  &one,
873  &beta,
874  w.values.get(),
875  &one);
876  break;
877  }
878  default:
879  Assert(false, ExcState(state));
880  }
881 }
882 
883 
884 template <typename number>
885 void
887  const Vector<number> &v) const
888 {
889  vmult(w, v, true);
890 }
891 
892 
893 template <typename number>
894 void
896  const Vector<number> &v) const
897 {
898  Tvmult(w, v, true);
899 }
900 
901 
902 template <typename number>
903 void
905  const LAPACKFullMatrix<number> &B,
906  const bool adding) const
907 {
908  Assert(state == matrix || state == inverse_matrix, ExcState(state));
909  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
910  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
911  Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
912  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
913  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
914  const types::blas_int mm = this->m();
915  const types::blas_int nn = B.n();
916  const types::blas_int kk = this->n();
917  const number alpha = 1.;
918  const number beta = (adding ? 1. : 0.);
919 
920  gemm("N",
921  "N",
922  &mm,
923  &nn,
924  &kk,
925  &alpha,
926  &this->values[0],
927  &mm,
928  &B.values[0],
929  &kk,
930  &beta,
931  &C.values[0],
932  &mm);
933 }
934 
935 
936 template <typename number>
937 void
939  const LAPACKFullMatrix<number> &B,
940  const bool adding) const
941 {
942  Assert(state == matrix || state == inverse_matrix, ExcState(state));
943  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
944  Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
945  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
946  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
947  const types::blas_int mm = this->m();
948  const types::blas_int nn = B.n();
949  const types::blas_int kk = this->n();
950  const number alpha = 1.;
951  const number beta = (adding ? 1. : 0.);
952 
953  // since FullMatrix stores the matrix in transposed order compared to this
954  // matrix, compute B^T * A^T = (A * B)^T
955  gemm("T",
956  "T",
957  &nn,
958  &mm,
959  &kk,
960  &alpha,
961  &B.values[0],
962  &kk,
963  &this->values[0],
964  &mm,
965  &beta,
966  &C(0, 0),
967  &nn);
968 }
969 
970 
971 
972 template <typename number>
973 void
975  const LAPACKFullMatrix<number> &B,
976  const Vector<number> & V,
977  const bool adding) const
978 {
979  Assert(state == matrix || state == inverse_matrix, ExcState(state));
980  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
981  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
982 
983  const LAPACKFullMatrix<number> &A = *this;
984 
985  Assert(A.m() == B.m(), ExcDimensionMismatch(A.m(), B.m()));
986  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
987  Assert(C.m() == A.n(), ExcDimensionMismatch(A.n(), C.m()));
988  Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
989 
990  const types::blas_int mm = A.n();
991  const types::blas_int nn = B.n();
992  const types::blas_int kk = B.m();
993 
994  // lapack does not have any tripple product routines, including the case of
995  // diagonal matrix in the middle, see
996  // https://stackoverflow.com/questions/3548069/multiplying-three-matrices-in-blas-with-the-middle-one-being-diagonal
997  // http://mathforum.org/kb/message.jspa?messageID=3546564
998 
1000  // First, get V*B into "work" array
1001  work.resize(kk * nn);
1002  // following http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=768#p2577
1003  // do left-multiplication manually. Note that Xscal would require to first
1004  // copy the input vector as multiplication is done inplace.
1005  for (types::blas_int j = 0; j < nn; ++j)
1006  for (types::blas_int i = 0; i < kk; ++i)
1007  {
1008  Assert(j * kk + i < static_cast<types::blas_int>(work.size()),
1009  ExcInternalError());
1010  work[j * kk + i] = V(i) * B(i, j);
1011  }
1012 
1013  // Now do the standard Tmmult:
1014  const number alpha = 1.;
1015  const number beta = (adding ? 1. : 0.);
1016 
1017  gemm("T",
1018  "N",
1019  &mm,
1020  &nn,
1021  &kk,
1022  &alpha,
1023  &this->values[0],
1024  &kk,
1025  &work[0],
1026  &kk,
1027  &beta,
1028  &C.values[0],
1029  &mm);
1030 }
1031 
1032 
1033 
1034 template <typename number>
1035 void
1037 {
1038  LAPACKFullMatrix<number> &A = *this;
1039  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1040  Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
1041 
1042  const types::blas_int nn = A.n();
1043  const types::blas_int kk = A.m();
1044  for (types::blas_int j = 0; j < nn; ++j)
1045  for (types::blas_int i = 0; i < kk; ++i)
1046  A(i, j) *= V(i);
1047 }
1048 
1049 
1050 
1051 template <typename number>
1052 void
1054  const LAPACKFullMatrix<number> &B,
1055  const bool adding) const
1056 {
1057  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1058  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1059  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1060  Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1061  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1062  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1063  const types::blas_int mm = this->n();
1064  const types::blas_int nn = B.n();
1065  const types::blas_int kk = B.m();
1066  const number alpha = 1.;
1067  const number beta = (adding ? 1. : 0.);
1068 
1069  if (PointerComparison::equal(this, &B))
1070  {
1071  syrk(&LAPACKSupport::U,
1072  "T",
1073  &nn,
1074  &kk,
1075  &alpha,
1076  &this->values[0],
1077  &kk,
1078  &beta,
1079  &C.values[0],
1080  &nn);
1081 
1082  // fill-in lower triangular part
1083  for (types::blas_int j = 0; j < nn; ++j)
1084  for (types::blas_int i = 0; i < j; ++i)
1085  C(j, i) = C(i, j);
1086 
1087  C.property = symmetric;
1088  }
1089  else
1090  {
1091  gemm("T",
1092  "N",
1093  &mm,
1094  &nn,
1095  &kk,
1096  &alpha,
1097  &this->values[0],
1098  &kk,
1099  &B.values[0],
1100  &kk,
1101  &beta,
1102  &C.values[0],
1103  &mm);
1104  }
1105 }
1106 
1107 
1108 template <typename number>
1109 void
1111  const LAPACKFullMatrix<number> &B,
1112  const bool adding) const
1113 {
1114  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1115  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1116  Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1117  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1118  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1119  const types::blas_int mm = this->n();
1120  const types::blas_int nn = B.n();
1121  const types::blas_int kk = B.m();
1122  const number alpha = 1.;
1123  const number beta = (adding ? 1. : 0.);
1124 
1125  // since FullMatrix stores the matrix in transposed order compared to this
1126  // matrix, compute B^T * A = (A^T * B)^T
1127  gemm("T",
1128  "N",
1129  &nn,
1130  &mm,
1131  &kk,
1132  &alpha,
1133  &B.values[0],
1134  &kk,
1135  &this->values[0],
1136  &kk,
1137  &beta,
1138  &C(0, 0),
1139  &nn);
1140 }
1141 
1142 
1143 template <typename number>
1144 void
1146  const LAPACKFullMatrix<number> &B,
1147  const bool adding) const
1148 {
1149  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1150  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1151  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1152  Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1153  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1154  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1155  const types::blas_int mm = this->m();
1156  const types::blas_int nn = B.m();
1157  const types::blas_int kk = B.n();
1158  const number alpha = 1.;
1159  const number beta = (adding ? 1. : 0.);
1160 
1161  if (PointerComparison::equal(this, &B))
1162  {
1163  syrk(&LAPACKSupport::U,
1164  "N",
1165  &nn,
1166  &kk,
1167  &alpha,
1168  &this->values[0],
1169  &nn,
1170  &beta,
1171  &C.values[0],
1172  &nn);
1173 
1174  // fill-in lower triangular part
1175  for (types::blas_int j = 0; j < nn; ++j)
1176  for (types::blas_int i = 0; i < j; ++i)
1177  C(j, i) = C(i, j);
1178 
1179  C.property = symmetric;
1180  }
1181  else
1182  {
1183  gemm("N",
1184  "T",
1185  &mm,
1186  &nn,
1187  &kk,
1188  &alpha,
1189  &this->values[0],
1190  &mm,
1191  &B.values[0],
1192  &nn,
1193  &beta,
1194  &C.values[0],
1195  &mm);
1196  }
1197 }
1198 
1199 
1200 
1201 template <typename number>
1202 void
1204  const LAPACKFullMatrix<number> &B,
1205  const bool adding) const
1206 {
1207  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1208  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1209  Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1210  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1211  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1212  const types::blas_int mm = this->m();
1213  const types::blas_int nn = B.m();
1214  const types::blas_int kk = B.n();
1215  const number alpha = 1.;
1216  const number beta = (adding ? 1. : 0.);
1217 
1218  // since FullMatrix stores the matrix in transposed order compared to this
1219  // matrix, compute B * A^T = (A * B^T)^T
1220  gemm("N",
1221  "T",
1222  &nn,
1223  &mm,
1224  &kk,
1225  &alpha,
1226  &B.values[0],
1227  &nn,
1228  &this->values[0],
1229  &mm,
1230  &beta,
1231  &C(0, 0),
1232  &nn);
1233 }
1234 
1235 
1236 template <typename number>
1237 void
1239  const LAPACKFullMatrix<number> &B,
1240  const bool adding) const
1241 {
1242  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1243  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1244  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1245  Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1246  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1247  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1248  const types::blas_int mm = this->n();
1249  const types::blas_int nn = B.m();
1250  const types::blas_int kk = B.n();
1251  const number alpha = 1.;
1252  const number beta = (adding ? 1. : 0.);
1253 
1254  gemm("T",
1255  "T",
1256  &mm,
1257  &nn,
1258  &kk,
1259  &alpha,
1260  &this->values[0],
1261  &kk,
1262  &B.values[0],
1263  &nn,
1264  &beta,
1265  &C.values[0],
1266  &mm);
1267 }
1268 
1269 
1270 template <typename number>
1271 void
1273  const LAPACKFullMatrix<number> &B,
1274  const bool adding) const
1275 {
1276  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1277  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1278  Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1279  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1280  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1281  const types::blas_int mm = this->n();
1282  const types::blas_int nn = B.m();
1283  const types::blas_int kk = B.n();
1284  const number alpha = 1.;
1285  const number beta = (adding ? 1. : 0.);
1286 
1287  // since FullMatrix stores the matrix in transposed order compared to this
1288  // matrix, compute B * A = (A^T * B^T)^T
1289  gemm("N",
1290  "N",
1291  &nn,
1292  &mm,
1293  &kk,
1294  &alpha,
1295  &B.values[0],
1296  &nn,
1297  &this->values[0],
1298  &kk,
1299  &beta,
1300  &C(0, 0),
1301  &nn);
1302 }
1303 
1304 
1305 template <typename number>
1306 void
1308 {
1309  Assert(state == matrix, ExcState(state));
1311 
1312  const types::blas_int mm = this->m();
1313  const types::blas_int nn = this->n();
1314  number *const values = &this->values[0];
1315  ipiv.resize(mm);
1316  types::blas_int info = 0;
1317  getrf(&mm, &nn, values, &mm, ipiv.data(), &info);
1318 
1319  Assert(info >= 0, ExcInternalError());
1320 
1321  // if info >= 0, the factorization has been completed
1322  state = lu;
1323 
1325 }
1326 
1327 
1328 
1329 template <typename number>
1330 void
1332 {
1333  property = p;
1334 }
1335 
1336 
1337 
1338 template <typename number>
1339 number
1341 {
1342  const char type('O');
1343  return norm(type);
1344 }
1345 
1346 
1347 
1348 template <typename number>
1349 number
1351 {
1352  const char type('I');
1353  return norm(type);
1354 }
1355 
1356 
1357 
1358 template <typename number>
1359 number
1361 {
1362  const char type('F');
1363  return norm(type);
1364 }
1365 
1366 
1367 
1368 template <typename number>
1369 number
1370 LAPACKFullMatrix<number>::norm(const char type) const
1371 {
1373 
1376  ExcMessage("norms can be called in matrix state only."));
1377 
1378  const types::blas_int N = this->n();
1379  const types::blas_int M = this->m();
1380  const number *const values = &this->values[0];
1381  if (property == symmetric)
1382  {
1383  const types::blas_int lda = std::max<types::blas_int>(1, N);
1384  const types::blas_int lwork =
1385  (type == 'I' || type == 'O') ? std::max<types::blas_int>(1, N) : 0;
1386  work.resize(lwork);
1387  return lansy(&type, &LAPACKSupport::L, &N, values, &lda, work.data());
1388  }
1389  else
1390  {
1391  const types::blas_int lda = std::max<types::blas_int>(1, M);
1392  const types::blas_int lwork =
1393  (type == 'I') ? std::max<types::blas_int>(1, M) : 0;
1394  work.resize(lwork);
1395  return lange(&type, &M, &N, values, &lda, work.data());
1396  }
1397 }
1398 
1399 
1400 
1401 template <typename number>
1402 number
1404 {
1407  ExcMessage("Trace can be called in matrix state only."));
1408  Assert(this->n() == this->m(), ExcDimensionMismatch(this->n(), this->m()));
1409 
1410  number tr = 0;
1411  for (size_type i = 0; i < this->m(); ++i)
1412  tr += (*this)(i, i);
1413 
1414  return tr;
1415 }
1416 
1417 
1418 
1419 template <typename number>
1420 void
1422 {
1423  Assert(state == matrix, ExcState(state));
1426 
1427  const types::blas_int mm = this->m();
1428  const types::blas_int nn = this->n();
1429  (void)mm;
1430  Assert(mm == nn, ExcDimensionMismatch(mm, nn));
1431 
1432  number *const values = &this->values[0];
1433  types::blas_int info = 0;
1434  const types::blas_int lda = std::max<types::blas_int>(1, nn);
1435  potrf(&LAPACKSupport::L, &nn, values, &lda, &info);
1436 
1437  // info < 0 : the info-th argument had an illegal value
1438  Assert(info >= 0, ExcInternalError());
1439 
1440  state = cholesky;
1442 }
1443 
1444 
1445 
1446 template <typename number>
1447 number
1449 {
1452  number rcond = 0.;
1453 
1454  const types::blas_int N = this->m();
1455  const number * values = &this->values[0];
1456  types::blas_int info = 0;
1457  const types::blas_int lda = std::max<types::blas_int>(1, N);
1458  work.resize(3 * N);
1459  iwork.resize(N);
1460 
1461  // use the same uplo as in Cholesky
1462  pocon(&LAPACKSupport::L,
1463  &N,
1464  values,
1465  &lda,
1466  &a_norm,
1467  &rcond,
1468  work.data(),
1469  iwork.data(),
1470  &info);
1471 
1472  Assert(info >= 0, ExcInternalError());
1473 
1474  return rcond;
1475 }
1476 
1477 
1478 
1479 template <typename number>
1480 number
1482 {
1486  number rcond = 0.;
1487 
1488  const types::blas_int N = this->m();
1489  const number *const values = &this->values[0];
1490  types::blas_int info = 0;
1491  const types::blas_int lda = std::max<types::blas_int>(1, N);
1492  work.resize(3 * N);
1493  iwork.resize(N);
1494 
1495  const char norm = '1';
1496  const char diag = 'N';
1497  const char uplo =
1499  trcon(&norm,
1500  &uplo,
1501  &diag,
1502  &N,
1503  values,
1504  &lda,
1505  &rcond,
1506  work.data(),
1507  iwork.data(),
1508  &info);
1509 
1510  Assert(info >= 0, ExcInternalError());
1511 
1512  return rcond;
1513 }
1514 
1515 
1516 
1517 template <typename number>
1518 void
1520 {
1521  Assert(state == matrix, ExcState(state));
1523 
1524  const types::blas_int mm = this->m();
1525  const types::blas_int nn = this->n();
1526  wr.resize(std::max(mm, nn));
1527  std::fill(wr.begin(), wr.end(), 0.);
1528  ipiv.resize(8 * mm);
1529 
1530  svd_u = std_cxx14::make_unique<LAPACKFullMatrix<number>>(mm, mm);
1531  svd_vt = std_cxx14::make_unique<LAPACKFullMatrix<number>>(nn, nn);
1532  types::blas_int info = 0;
1533 
1534  // First determine optimal workspace size
1535  work.resize(1);
1536  types::blas_int lwork = -1;
1537 
1538  // TODO double check size
1539  std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1541  {
1542  // This array is only used by the complex versions.
1543  std::size_t min = std::min(this->m(), this->n());
1544  std::size_t max = std::max(this->m(), this->n());
1545  real_work.resize(
1546  std::max(5 * min * min + 5 * min, 2 * max * min + 2 * min * min + min));
1547  }
1548 
1549  // make sure that the first entry in the work array is clear, in case the
1550  // routine does not completely overwrite the memory:
1551  work[0] = number();
1552  internal::LAPACKFullMatrixImplementation::gesdd_helper(LAPACKSupport::A,
1553  mm,
1554  nn,
1555  this->values,
1556  wr,
1557  svd_u->values,
1558  svd_vt->values,
1559  work,
1560  real_work,
1561  ipiv,
1562  lwork,
1563  info);
1564 
1565  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1566  // Resize the work array. Add one to the size computed by LAPACK to be on
1567  // the safe side.
1568  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1569 
1570  work.resize(lwork);
1571  // Do the actual SVD.
1572  internal::LAPACKFullMatrixImplementation::gesdd_helper(LAPACKSupport::A,
1573  mm,
1574  nn,
1575  this->values,
1576  wr,
1577  svd_u->values,
1578  svd_vt->values,
1579  work,
1580  real_work,
1581  ipiv,
1582  lwork,
1583  info);
1584  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1585 
1586  work.resize(0);
1587  ipiv.resize(0);
1588 
1590 }
1591 
1592 
1593 template <typename number>
1594 void
1596 {
1598  compute_svd();
1599 
1601 
1602  const typename numbers::NumberTraits<number>::real_type one(1.0);
1603  const double lim = std::abs(wr[0]) * threshold;
1604  for (size_type i = 0; i < wr.size(); ++i)
1605  {
1606  if (std::abs(wr[i]) > lim)
1607  wr[i] = one / wr[i];
1608  else
1609  wr[i] = 0.;
1610  }
1612 }
1613 
1614 
1615 
1616 template <typename number>
1617 void
1619  const unsigned int kernel_size)
1620 {
1622  compute_svd();
1623 
1625 
1626  const typename numbers::NumberTraits<number>::real_type one(1.0);
1627  const unsigned int n_wr = wr.size();
1628  for (size_type i = 0; i < n_wr - kernel_size; ++i)
1629  wr[i] = one / wr[i];
1630  for (size_type i = n_wr - kernel_size; i < n_wr; ++i)
1631  wr[i] = 0.;
1633 }
1634 
1635 
1636 
1637 template <typename number>
1638 void
1640 {
1641  Assert(state == matrix || state == lu || state == cholesky, ExcState(state));
1642  const types::blas_int mm = this->m();
1643  const types::blas_int nn = this->n();
1644  Assert(nn == mm, ExcNotQuadratic());
1645 
1646  number *const values = &this->values[0];
1647  types::blas_int info = 0;
1648 
1649  if (property != symmetric)
1650  {
1651  if (state == matrix)
1653 
1654  ipiv.resize(mm);
1655  inv_work.resize(mm);
1656  getri(&mm, values, &mm, ipiv.data(), inv_work.data(), &mm, &info);
1657  }
1658  else
1659  {
1660  if (state == matrix)
1662 
1663  const types::blas_int lda = std::max<types::blas_int>(1, nn);
1664  potri(&LAPACKSupport::L, &nn, values, &lda, &info);
1665  // inverse is stored in lower diagonal, set the upper diagonal
1666  // appropriately:
1667  for (types::blas_int i = 0; i < nn; ++i)
1668  for (types::blas_int j = i + 1; j < nn; ++j)
1669  this->el(i, j) = this->el(j, i);
1670  }
1671 
1672  Assert(info >= 0, ExcInternalError());
1674 
1676 }
1677 
1678 
1679 
1680 template <typename number>
1681 void
1682 LAPACKFullMatrix<number>::solve(Vector<number> &v, const bool transposed) const
1683 {
1684  Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1685  AssertDimension(this->m(), v.size());
1686  const char * trans = transposed ? &T : &N;
1687  const types::blas_int nn = this->n();
1688  const number *const values = &this->values[0];
1689  const types::blas_int n_rhs = 1;
1690  types::blas_int info = 0;
1691 
1692  if (state == lu)
1693  {
1694  getrs(
1695  trans, &nn, &n_rhs, values, &nn, ipiv.data(), v.begin(), &nn, &info);
1696  }
1697  else if (state == cholesky)
1698  {
1699  potrs(&LAPACKSupport::L, &nn, &n_rhs, values, &nn, v.begin(), &nn, &info);
1700  }
1702  {
1703  const char uplo =
1705 
1706  const types::blas_int lda = nn;
1707  const types::blas_int ldb = nn;
1708  trtrs(
1709  &uplo, trans, "N", &nn, &n_rhs, values, &lda, v.begin(), &ldb, &info);
1710  }
1711  else
1712  {
1713  Assert(false,
1714  ExcMessage(
1715  "The matrix has to be either factorized or triangular."));
1716  }
1717 
1718  Assert(info == 0, ExcInternalError());
1719 }
1720 
1721 
1722 
1723 template <typename number>
1724 void
1726  const bool transposed) const
1727 {
1728  Assert(B.state == matrix, ExcState(B.state));
1729 
1730  Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1731  AssertDimension(this->m(), B.m());
1732  const char * trans = transposed ? &T : &N;
1733  const types::blas_int nn = this->n();
1734  const number *const values = &this->values[0];
1735  const types::blas_int n_rhs = B.n();
1736  types::blas_int info = 0;
1737 
1738  if (state == lu)
1739  {
1740  getrs(
1741  trans, &nn, &n_rhs, values, &nn, ipiv.data(), &B.values[0], &nn, &info);
1742  }
1743  else if (state == cholesky)
1744  {
1745  potrs(
1746  &LAPACKSupport::L, &nn, &n_rhs, values, &nn, &B.values[0], &nn, &info);
1747  }
1749  {
1750  const char uplo =
1752 
1753  const types::blas_int lda = nn;
1754  const types::blas_int ldb = nn;
1755  trtrs(&uplo,
1756  trans,
1757  "N",
1758  &nn,
1759  &n_rhs,
1760  values,
1761  &lda,
1762  &B.values[0],
1763  &ldb,
1764  &info);
1765  }
1766  else
1767  {
1768  Assert(false,
1769  ExcMessage(
1770  "The matrix has to be either factorized or triangular."));
1771  }
1772 
1773  Assert(info == 0, ExcInternalError());
1774 }
1775 
1776 
1777 
1778 template <typename number>
1779 void
1781  const bool transposed) const
1782 {
1783  solve(v, transposed);
1784 }
1785 
1786 
1787 
1788 template <typename number>
1789 void
1791  const bool transposed) const
1792 {
1793  solve(B, transposed);
1794 }
1795 
1796 
1797 
1798 template <typename number>
1799 number
1801 {
1802  Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1803 
1804  // LAPACK doesn't offer a function to compute a matrix determinant.
1805  // This is due to the difficulty in maintaining numerical accuracy, as the
1806  // calculations are likely to overflow or underflow. See
1807  // http://www.netlib.org/lapack/faq.html#_are_there_routines_in_lapack_to_compute_determinants
1808  //
1809  // However, after a PLU decomposition one can compute this by multiplication
1810  // of the diagonal entries with one another. One must take into consideration
1811  // the number of permutations (row swaps) stored in the P matrix.
1812  //
1813  // See the implementations in the blaze library (detNxN)
1814  // https://bitbucket.org/blaze-lib/blaze
1815  // and also
1816  // https://dualm.wordpress.com/2012/01/06/computing-determinant-in-fortran/
1817  // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?p=341&#p336
1818  // for further information.
1819  Assert(state == lu, ExcState(state));
1820  Assert(ipiv.size() == this->m(), ExcInternalError());
1821  number det = 1.0;
1822  for (size_type i = 0; i < this->m(); ++i)
1823  det *=
1824  (ipiv[i] == types::blas_int(i + 1) ? this->el(i, i) : -this->el(i, i));
1825  return det;
1826 }
1827 
1828 
1829 template <typename number>
1830 void
1831 LAPACKFullMatrix<number>::compute_eigenvalues(const bool right, const bool left)
1832 {
1833  Assert(state == matrix, ExcState(state));
1834  const types::blas_int nn = this->n();
1835  wr.resize(nn);
1836  wi.resize(nn);
1837  if (right)
1838  vr.resize(nn * nn);
1839  if (left)
1840  vl.resize(nn * nn);
1841 
1842  types::blas_int info = 0;
1843  types::blas_int lwork = 1;
1844  const char jobvr = (right) ? V : N;
1845  const char jobvl = (left) ? V : N;
1846 
1847  /*
1848  * The LAPACK routine xGEEV requires a sufficiently large work array; the
1849  * minimum requirement is
1850  *
1851  * work.size >= 4*nn.
1852  *
1853  * However, for better performance, a larger work array may be needed. The
1854  * first call determines the optimal work size and the second does the work.
1855  */
1856  lwork = -1;
1857  work.resize(1);
1858 
1859  std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1861  // This array is only used by the complex versions.
1862  real_work.resize(2 * this->m());
1863  internal::LAPACKFullMatrixImplementation::geev_helper(jobvl,
1864  jobvr,
1865  this->values,
1866  this->m(),
1867  wr,
1868  wi,
1869  vl,
1870  vr,
1871  work,
1872  real_work,
1873  lwork,
1874  info);
1875 
1876  // geev returns info=0 on success. Since we only queried the optimal size
1877  // for work, everything else would not be acceptable.
1878  Assert(info == 0, ExcInternalError());
1879  // Allocate working array according to suggestion (same strategy as was
1880  // noted in compute_svd).
1881  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1882 
1883  // resize workspace array
1884  work.resize((size_type)lwork);
1885 
1886  // Finally compute the eigenvalues.
1887  internal::LAPACKFullMatrixImplementation::geev_helper(jobvl,
1888  jobvr,
1889  this->values,
1890  this->m(),
1891  wr,
1892  wi,
1893  vl,
1894  vr,
1895  work,
1896  real_work,
1897  lwork,
1898  info);
1899 
1900  Assert(info >= 0, ExcInternalError());
1901  // TODO:[GK] What if the QR method fails?
1902  if (info != 0)
1903  std::cerr << "LAPACK error in geev" << std::endl;
1904 
1906 }
1907 
1908 
1909 template <typename number>
1910 void
1912  const number lower_bound,
1913  const number upper_bound,
1914  const number abs_accuracy,
1917 {
1918  Assert(state == matrix, ExcState(state));
1919  const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
1920  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
1921 
1922  wr.resize(nn);
1923  LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
1924 
1925  number *const values_A = &this->values[0];
1926  number *const values_eigenvectors = &matrix_eigenvectors.values[0];
1927 
1928  types::blas_int info(0), lwork(-1), n_eigenpairs(0);
1929  const char *const jobz(&V);
1930  const char *const uplo(&U);
1931  const char *const range(&V);
1932  const types::blas_int *const dummy(&one);
1933  std::vector<types::blas_int> iwork(static_cast<size_type>(5 * nn));
1934  std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
1935 
1936 
1937  /*
1938  * The LAPACK routine xSYEVX requires a sufficiently large work array; the
1939  * minimum requirement is
1940  *
1941  * work.size >= 8*nn.
1942  *
1943  * However, for better performance, a larger work array may be needed. The
1944  * first call determines the optimal work size and the second does the work.
1945  */
1946  work.resize(1);
1947 
1948  syevx(jobz,
1949  range,
1950  uplo,
1951  &nn,
1952  values_A,
1953  &nn,
1954  &lower_bound,
1955  &upper_bound,
1956  dummy,
1957  dummy,
1958  &abs_accuracy,
1959  &n_eigenpairs,
1960  wr.data(),
1961  values_eigenvectors,
1962  &nn,
1963  work.data(),
1964  &lwork,
1965  iwork.data(),
1966  ifail.data(),
1967  &info);
1968  // syevx returns info=0 on success. Since we only queried the optimal size
1969  // for work, everything else would not be acceptable.
1970  Assert(info == 0, ExcInternalError());
1971  // Allocate working array according to suggestion (same strategy as was noted
1972  // in compute_svd).
1973  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1974  work.resize(static_cast<size_type>(lwork));
1975 
1976  // Finally compute the eigenvalues.
1977  syevx(jobz,
1978  range,
1979  uplo,
1980  &nn,
1981  values_A,
1982  &nn,
1983  &lower_bound,
1984  &upper_bound,
1985  dummy,
1986  dummy,
1987  &abs_accuracy,
1988  &n_eigenpairs,
1989  wr.data(),
1990  values_eigenvectors,
1991  &nn,
1992  work.data(),
1993  &lwork,
1994  iwork.data(),
1995  ifail.data(),
1996  &info);
1997 
1998  // Negative return value implies a wrong argument. This should be internal.
1999  Assert(info >= 0, ExcInternalError());
2000  if (info != 0)
2001  std::cerr << "LAPACK error in syevx" << std::endl;
2002 
2003  eigenvalues.reinit(n_eigenpairs);
2004  eigenvectors.reinit(nn, n_eigenpairs, true);
2005 
2006  for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2007  {
2008  eigenvalues(i) = wr[i];
2009  size_type col_begin(i * nn);
2010  for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2011  {
2012  eigenvectors(j, i) = values_eigenvectors[col_begin + j];
2013  }
2014  }
2015 
2017 }
2018 
2019 
2020 template <typename number>
2021 void
2024  const number lower_bound,
2025  const number upper_bound,
2026  const number abs_accuracy,
2028  std::vector<Vector<number>> &eigenvectors,
2029  const types::blas_int itype)
2030 {
2031  Assert(state == matrix, ExcState(state));
2032  const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
2033  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2034  Assert(B.m() == B.n(), ExcNotQuadratic());
2035  Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2036 
2037  wr.resize(nn);
2038  LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
2039 
2040  number *const values_A = &this->values[0];
2041  number *const values_B = &B.values[0];
2042  number *const values_eigenvectors = &matrix_eigenvectors.values[0];
2043 
2044  types::blas_int info(0), lwork(-1), n_eigenpairs(0);
2045  const char *const jobz(&V);
2046  const char *const uplo(&U);
2047  const char *const range(&V);
2048  const types::blas_int *const dummy(&one);
2049  iwork.resize(static_cast<size_type>(5 * nn));
2050  std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
2051 
2052 
2053  /*
2054  * The LAPACK routine xSYGVX requires a sufficiently large work array; the
2055  * minimum requirement is
2056  *
2057  * work.size >= 8*nn.
2058  *
2059  * However, for better performance, a larger work array may be needed. The
2060  * first call determines the optimal work size and the second does the work.
2061  */
2062  work.resize(1);
2063 
2064  sygvx(&itype,
2065  jobz,
2066  range,
2067  uplo,
2068  &nn,
2069  values_A,
2070  &nn,
2071  values_B,
2072  &nn,
2073  &lower_bound,
2074  &upper_bound,
2075  dummy,
2076  dummy,
2077  &abs_accuracy,
2078  &n_eigenpairs,
2079  wr.data(),
2080  values_eigenvectors,
2081  &nn,
2082  work.data(),
2083  &lwork,
2084  iwork.data(),
2085  ifail.data(),
2086  &info);
2087  // sygvx returns info=0 on success. Since we only queried the optimal size
2088  // for work, everything else would not be acceptable.
2089  Assert(info == 0, ExcInternalError());
2090  // Allocate working array according to suggestion (same strategy as was
2091  // noted in compute_svd).
2092  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2093 
2094  // resize workspace arrays
2095  work.resize(static_cast<size_type>(lwork));
2096 
2097  // Finally compute the generalized eigenvalues.
2098  sygvx(&itype,
2099  jobz,
2100  range,
2101  uplo,
2102  &nn,
2103  values_A,
2104  &nn,
2105  values_B,
2106  &nn,
2107  &lower_bound,
2108  &upper_bound,
2109  dummy,
2110  dummy,
2111  &abs_accuracy,
2112  &n_eigenpairs,
2113  wr.data(),
2114  values_eigenvectors,
2115  &nn,
2116  work.data(),
2117  &lwork,
2118  iwork.data(),
2119  ifail.data(),
2120  &info);
2121 
2122  // Negative return value implies a wrong argument. This should be internal.
2123  Assert(info >= 0, ExcInternalError());
2124  if (info != 0)
2125  std::cerr << "LAPACK error in sygvx" << std::endl;
2126 
2127  eigenvalues.reinit(n_eigenpairs);
2128  eigenvectors.resize(n_eigenpairs);
2129 
2130  for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2131  {
2132  eigenvalues(i) = wr[i];
2133  size_type col_begin(i * nn);
2134  eigenvectors[i].reinit(nn, true);
2135  for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2136  {
2137  eigenvectors[i](j) = values_eigenvectors[col_begin + j];
2138  }
2139  }
2140 
2142 }
2143 
2144 
2145 template <typename number>
2146 void
2149  std::vector<Vector<number>> &eigenvectors,
2150  const types::blas_int itype)
2151 {
2152  Assert(state == matrix, ExcState(state));
2153  const types::blas_int nn = this->n();
2154  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2155  Assert(B.m() == B.n(), ExcNotQuadratic());
2156  Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2157  Assert(eigenvectors.size() <= static_cast<size_type>(nn),
2158  ExcMessage("eigenvectors.size() > matrix.n()"));
2159 
2160  wr.resize(nn);
2161  wi.resize(nn); // This is set purely for consistency reasons with the
2162  // eigenvalues() function.
2163 
2164  number *const values_A = &this->values[0];
2165  number *const values_B = &B.values[0];
2166 
2167  types::blas_int info = 0;
2168  types::blas_int lwork = -1;
2169  const char *const jobz = (eigenvectors.size() > 0) ? (&V) : (&N);
2170  const char *const uplo = (&U);
2171 
2172  /*
2173  * The LAPACK routine xSYGV requires a sufficiently large work array; the
2174  * minimum requirement is
2175  *
2176  * work.size >= 3*nn - 1.
2177  *
2178  * However, for better performance, a larger work array may be needed. The
2179  * first call determines the optimal work size and the second does the work.
2180  */
2181  work.resize(1);
2182 
2183  sygv(&itype,
2184  jobz,
2185  uplo,
2186  &nn,
2187  values_A,
2188  &nn,
2189  values_B,
2190  &nn,
2191  wr.data(),
2192  work.data(),
2193  &lwork,
2194  &info);
2195  // sygv returns info=0 on success. Since we only queried the optimal size
2196  // for work, everything else would not be acceptable.
2197  Assert(info == 0, ExcInternalError());
2198  // Allocate working array according to suggestion (same strategy as was
2199  // noted in compute_svd).
2200  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2201 
2202  // resize workspace array
2203  work.resize(static_cast<size_type>(lwork));
2204 
2205  // Finally compute the generalized eigenvalues.
2206  sygv(&itype,
2207  jobz,
2208  uplo,
2209  &nn,
2210  values_A,
2211  &nn,
2212  values_B,
2213  &nn,
2214  wr.data(),
2215  work.data(),
2216  &lwork,
2217  &info);
2218  // Negative return value implies a wrong argument. This should be internal.
2219 
2220  Assert(info >= 0, ExcInternalError());
2221  if (info != 0)
2222  std::cerr << "LAPACK error in sygv" << std::endl;
2223 
2224  for (size_type i = 0; i < eigenvectors.size(); ++i)
2225  {
2226  size_type col_begin(i * nn);
2227  eigenvectors[i].reinit(nn, true);
2228  for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2229  {
2230  eigenvectors[i](j) = values_A[col_begin + j];
2231  }
2232  }
2234 }
2235 
2236 
2237 template <typename number>
2238 void
2240  const unsigned int precision,
2241  const bool scientific,
2242  const unsigned int width_,
2243  const char * zero_string,
2244  const double denominator,
2245  const double threshold) const
2246 {
2247  unsigned int width = width_;
2248 
2249  Assert((!this->empty()) || (this->n() + this->m() == 0), ExcInternalError());
2253  ExcState(state));
2254 
2255  // set output format, but store old
2256  // state
2257  std::ios::fmtflags old_flags = out.flags();
2258  std::streamsize old_precision = out.precision(precision);
2259 
2260  if (scientific)
2261  {
2262  out.setf(std::ios::scientific, std::ios::floatfield);
2263  if (!width)
2264  width = precision + 7;
2265  }
2266  else
2267  {
2268  out.setf(std::ios::fixed, std::ios::floatfield);
2269  if (!width)
2270  width = precision + 2;
2271  }
2272 
2273  for (size_type i = 0; i < this->m(); ++i)
2274  {
2275  // Cholesky is stored in lower triangular, so just output this part:
2276  const size_type nc = state == LAPACKSupport::cholesky ? i + 1 : this->n();
2277  for (size_type j = 0; j < nc; ++j)
2278  // we might have complex numbers, so use abs also to check for nan
2279  // since there is no isnan on complex numbers
2280  if (std::isnan(std::abs((*this)(i, j))))
2281  out << std::setw(width) << (*this)(i, j) << ' ';
2282  else if (std::abs(this->el(i, j)) > threshold)
2283  out << std::setw(width) << this->el(i, j) * denominator << ' ';
2284  else
2285  out << std::setw(width) << zero_string << ' ';
2286  out << std::endl;
2287  };
2288 
2289  AssertThrow(out, ExcIO());
2290  // reset output format
2291  out.flags(old_flags);
2292  out.precision(old_precision);
2293 }
2294 
2295 
2296 //----------------------------------------------------------------------//
2297 
2298 template <typename number>
2299 void
2301 {
2302  matrix = &M;
2303  mem = nullptr;
2304 }
2305 
2306 
2307 template <typename number>
2308 void
2311 {
2312  matrix = &M;
2313  mem = &V;
2314 }
2315 
2316 
2317 template <typename number>
2318 void
2320  const Vector<number> &src) const
2321 {
2322  dst = src;
2323  matrix->apply_lu_factorization(dst, false);
2324 }
2325 
2326 
2327 template <typename number>
2328 void
2330  const Vector<number> &src) const
2331 {
2332  dst = src;
2333  matrix->apply_lu_factorization(dst, true);
2334 }
2335 
2336 
2337 template <typename number>
2338 void
2340  const BlockVector<number> &src) const
2341 {
2342  Assert(mem != nullptr, ExcNotInitialized());
2343  Vector<number> *aux = mem->alloc();
2344  *aux = src;
2345  matrix->apply_lu_factorization(*aux, false);
2346  dst = *aux;
2347 }
2348 
2349 
2350 template <typename number>
2351 void
2353  const BlockVector<number> &src) const
2354 {
2355  Assert(mem != nullptr, ExcNotInitialized());
2356  Vector<number> *aux = mem->alloc();
2357  *aux = src;
2358  matrix->apply_lu_factorization(*aux, true);
2359  dst = *aux;
2360 }
2361 
2362 
2363 
2364 #include "lapack_full_matrix.inst"
2365 
2366 
2367 DEAL_II_NAMESPACE_CLOSE
Matrix is symmetric.
LAPACKFullMatrix(const size_type size=0)
void Tmmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
static const char L
std::vector< number > work
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1366
void rank1_update(const number a, const Vector< number > &v)
number linfty_norm() const
void mTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
std::unique_ptr< LAPACKFullMatrix< number > > svd_vt
LAPACKFullMatrix< number > & operator/=(const number factor)
size_type n() const
number reciprocal_condition_number() const
Contents is actually a matrix.
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
static::ExceptionBase & ExcIO()
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
LAPACKSupport::State state
void remove_row_and_column(const size_type row, const size_type col)
static::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
Matrix is upper triangular.
size_type m() const
Threads::Mutex mutex
std::vector< types::blas_int > ipiv
Contents is the inverse of a matrix.
size_type size() const
size_type n() const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1407
number norm(const char type) const
size_type m() const
void vmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
static const char U
static::ExceptionBase & ExcNotInitialized()
#define AssertThrow(cond, exc)
Definition: exceptions.h:1329
static::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
void solve(Vector< number > &v, const bool transposed=false) const
void vmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
void compute_eigenvalues_symmetric(const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, FullMatrix< number > &eigenvectors)
void reinit(const size_type size)
void Tvmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
void print_formatted(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const unsigned int width=0, const char *zero_string=" ", const double denominator=1., const double threshold=0.) const
number trace() const
static::ExceptionBase & ExcSingular()
void apply_lu_factorization(Vector< number > &v, const bool transposed) const
size_type size() const
number el(const size_type i, const size_type j) const
std::vector< types::blas_int > iwork
static::ExceptionBase & ExcState(State arg1)
static::ExceptionBase & ExcMessage(std::string arg1)
size_type n() const
Contents is a Cholesky decomposition.
void reinit(const TableIndices< N > &new_size, const bool omit_default_initialization=false)
#define Assert(cond, exc)
Definition: exceptions.h:1227
void compute_inverse_svd_with_kernel(const unsigned int kernel_size)
std::make_unsigned< types::blas_int >::type size_type
static::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void TmTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
number frobenius_norm() const
void add(const number a, const LAPACKFullMatrix< number > &B)
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
std::vector< number > inv_work
Contents is something useless.
std::array< NumberType, 3 > hyperbolic_rotation(const NumberType &x, const NumberType &y)
Matrix is the inverse of a singular value decomposition.
size_type m() const
std::vector< number > wi
iterator begin()
std::unique_ptr< LAPACKFullMatrix< number > > svd_u
std::unique_ptr< Number[], decltype(&free)> values
Definition: vector.h:996
void grow_or_shrink(const size_type size)
void scale_rows(const Vector< number > &V)
LAPACKFullMatrix< number > & operator*=(const number factor)
void compute_inverse_svd(const double threshold=0.)
static::ExceptionBase & ExcNotQuadratic()
static const char A
size_type n_elements() const
void Tvmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
LAPACKFullMatrix< number > & operator=(const LAPACKFullMatrix< number > &)
int blas_int
TableBase< N, T > & operator=(const TableBase< N, T > &src)
std::array< NumberType, 3 > givens_rotation(const NumberType &x, const NumberType &y)
std::vector< typename numbers::NumberTraits< number >::real_type > wr
Matrix contains singular value decomposition,.
iterator begin()
void set_property(const LAPACKSupport::Property property)
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
reference el(const size_type i, const size_type j)
No special properties.
void compute_generalized_eigenvalues_symmetric(LAPACKFullMatrix< number > &B, const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, std::vector< Vector< number >> &eigenvectors, const types::blas_int itype=1)
void compute_eigenvalues(const bool right_eigenvectors=false, const bool left_eigenvectors=false)
static::ExceptionBase & ExcNotImplemented()
number determinant() const
void mmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
Eigenvalue vector is filled.
AlignedVector< number > values
Definition: table.h:681
static::ExceptionBase & ExcProperty(Property arg1)
LAPACKSupport::Property property
static::ExceptionBase & ExcZero()
Matrix is lower triangular.
#define AssertIsFinite(number)
Definition: exceptions.h:1428
number l1_norm() const
static bool equal(const T *p1, const T *p2)
static::ExceptionBase & ExcInternalError()
Contents is an LU decomposition.