Reference documentation for deal.II version 9.1.0-pre
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/exceptions.h>
18 #include <deal.II/base/mpi.h>
19 #include <deal.II/base/mpi.templates.h>
20 #include <deal.II/base/multithread_info.h>
21 #include <deal.II/base/utilities.h>
22 
23 #include <deal.II/lac/la_parallel_block_vector.h>
24 #include <deal.II/lac/la_parallel_vector.h>
25 #include <deal.II/lac/vector_memory.h>
26 
27 #include <iostream>
28 
29 #ifdef DEAL_II_WITH_TRILINOS
30 # ifdef DEAL_II_WITH_MPI
31 # include <deal.II/lac/trilinos_parallel_block_vector.h>
32 # include <deal.II/lac/trilinos_vector.h>
33 # include <deal.II/lac/vector_memory.h>
34 
35 # include <Epetra_MpiComm.h>
36 # endif
37 #endif
38 
39 #ifdef DEAL_II_WITH_PETSC
40 # include <deal.II/lac/petsc_block_vector.h>
41 # include <deal.II/lac/petsc_vector.h>
42 
43 # include <petscsys.h>
44 #endif
45 
46 #ifdef DEAL_II_WITH_SLEPC
47 # include <deal.II/lac/slepc_solver.h>
48 
49 # include <slepcsys.h>
50 #endif
51 
52 #ifdef DEAL_II_WITH_P4EST
53 # include <p4est_bits.h>
54 #endif
55 
56 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
57 # include <zoltan_cpp.h>
58 #endif
59 
60 DEAL_II_NAMESPACE_OPEN
61 
62 
63 namespace Utilities
64 {
65  namespace MPI
66  {
67 #ifdef DEAL_II_WITH_MPI
68  unsigned int
69  n_mpi_processes(const MPI_Comm &mpi_communicator)
70  {
71  int n_jobs = 1;
72  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
73  AssertThrowMPI(ierr);
74 
75  return n_jobs;
76  }
77 
78 
79  unsigned int
80  this_mpi_process(const MPI_Comm &mpi_communicator)
81  {
82  int rank = 0;
83  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
84  AssertThrowMPI(ierr);
85 
86  return rank;
87  }
88 
89 
90  MPI_Comm
91  duplicate_communicator(const MPI_Comm &mpi_communicator)
92  {
93  MPI_Comm new_communicator;
94  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
95  AssertThrowMPI(ierr);
96  return new_communicator;
97  }
98 
99 
100 
101  int
102  create_group(const MPI_Comm & comm,
103  const MPI_Group &group,
104  const int tag,
105  MPI_Comm * new_comm)
106  {
107 # if DEAL_II_MPI_VERSION_GTE(3, 0)
108  return MPI_Comm_create_group(comm, group, tag, new_comm);
109 # else
110  int rank;
111  int ierr = MPI_Comm_rank(comm, &rank);
112  AssertThrowMPI(ierr);
113 
114  int grp_rank;
115  ierr = MPI_Group_rank(group, &grp_rank);
116  AssertThrowMPI(ierr);
117  if (grp_rank == MPI_UNDEFINED)
118  {
119  *new_comm = MPI_COMM_NULL;
120  return MPI_SUCCESS;
121  }
122 
123  int grp_size;
124  ierr = MPI_Group_size(group, &grp_size);
125  AssertThrowMPI(ierr);
126 
127  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
128  AssertThrowMPI(ierr);
129 
130  MPI_Group parent_grp;
131  ierr = MPI_Comm_group(comm, &parent_grp);
132  AssertThrowMPI(ierr);
133 
134  std::vector<int> pids(grp_size);
135  std::vector<int> grp_pids(grp_size);
136  std::iota(grp_pids.begin(), grp_pids.end(), 0);
137  ierr = MPI_Group_translate_ranks(
138  group, grp_size, grp_pids.data(), parent_grp, pids.data());
139  AssertThrowMPI(ierr);
140  ierr = MPI_Group_free(&parent_grp);
141  AssertThrowMPI(ierr);
142 
143  MPI_Comm comm_old = *new_comm;
144  MPI_Comm ic;
145  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
146  {
147  const int gid = grp_rank / merge_sz;
148  comm_old = *new_comm;
149  if (gid % 2 == 0)
150  {
151  if ((gid + 1) * merge_sz < grp_size)
152  {
153  ierr = (MPI_Intercomm_create(
154  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
155  AssertThrowMPI(ierr);
156  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
157  AssertThrowMPI(ierr);
158  }
159  }
160  else
161  {
162  ierr = MPI_Intercomm_create(
163  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
164  AssertThrowMPI(ierr);
165  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
166  AssertThrowMPI(ierr);
167  }
168  if (*new_comm != comm_old)
169  {
170  ierr = MPI_Comm_free(&ic);
171  AssertThrowMPI(ierr);
172  ierr = MPI_Comm_free(&comm_old);
173  AssertThrowMPI(ierr);
174  }
175  }
176 
177  return MPI_SUCCESS;
178 # endif
179  }
180 
181 
182 
183  std::vector<unsigned int>
185  const MPI_Comm & mpi_comm,
186  const std::vector<unsigned int> &destinations)
187  {
188  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
189  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
190 
191  for (unsigned int i = 0; i < destinations.size(); ++i)
192  {
193  Assert(destinations[i] < n_procs,
194  ExcIndexRange(destinations[i], 0, n_procs));
195  Assert(destinations[i] != myid,
196  ExcMessage(
197  "There is no point in communicating with ourselves."));
198  }
199 
200 # if DEAL_II_MPI_VERSION_GTE(2, 2)
201  // Calculate the number of messages to send to each process
202  std::vector<unsigned int> dest_vector(n_procs);
203  for (const auto &el : destinations)
204  ++dest_vector[el];
205 
206  // Find how many processes will send to this one
207  // by reducing with sum and then scattering the
208  // results over all processes
209  unsigned int n_recv_from;
210  const int ierr = MPI_Reduce_scatter_block(
211  &dest_vector[0], &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
212 
213  AssertThrowMPI(ierr);
214 
215  // Send myid to every process in `destinations` vector...
216  std::vector<MPI_Request> send_requests(destinations.size());
217  for (const auto &el : destinations)
218  MPI_Isend(&myid,
219  1,
220  MPI_UNSIGNED,
221  el,
222  32766,
223  mpi_comm,
224  &send_requests[&el - &destinations[0]]);
225 
226  // if no one to receive from, return an empty vector
227  if (n_recv_from == 0)
228  return std::vector<unsigned int>();
229 
230  // ...otherwise receive `n_recv_from` times from the processes
231  // who communicate with this one. Store the obtained id's
232  // in the resulting vector
233  std::vector<unsigned int> origins(n_recv_from);
234  for (auto &el : origins)
235  MPI_Recv(&el,
236  1,
237  MPI_UNSIGNED,
238  MPI_ANY_SOURCE,
239  32766,
240  mpi_comm,
241  MPI_STATUS_IGNORE);
242 
243  MPI_Waitall(destinations.size(),
244  send_requests.data(),
245  MPI_STATUSES_IGNORE);
246  return origins;
247 # else
248  // let all processors communicate the maximal number of destinations
249  // they have
250  const unsigned int max_n_destinations =
251  Utilities::MPI::max(destinations.size(), mpi_comm);
252 
253  if (max_n_destinations == 0)
254  // all processes have nothing to send/receive:
255  return std::vector<unsigned int>();
256 
257  // now that we know the number of data packets every processor wants to
258  // send, set up a buffer with the maximal size and copy our destinations
259  // in there, padded with -1's
260  std::vector<unsigned int> my_destinations(max_n_destinations,
262  std::copy(destinations.begin(),
263  destinations.end(),
264  my_destinations.begin());
265 
266  // now exchange these (we could communicate less data if we used
267  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
268  // processors in this case, which is more expensive than the reduction
269  // operation above in MPI_Allreduce)
270  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
271  const int ierr = MPI_Allgather(my_destinations.data(),
272  max_n_destinations,
273  MPI_UNSIGNED,
274  all_destinations.data(),
275  max_n_destinations,
276  MPI_UNSIGNED,
277  mpi_comm);
278  AssertThrowMPI(ierr);
279 
280  // now we know who is going to communicate with whom. collect who is
281  // going to communicate with us!
282  std::vector<unsigned int> origins;
283  for (unsigned int i = 0; i < n_procs; ++i)
284  for (unsigned int j = 0; j < max_n_destinations; ++j)
285  if (all_destinations[i * max_n_destinations + j] == myid)
286  origins.push_back(i);
287  else if (all_destinations[i * max_n_destinations + j] ==
289  break;
290 
291  return origins;
292 # endif
293  }
294 
295 
296  namespace
297  {
298  // custom MIP_Op for calculate_collective_mpi_min_max_avg
299  void
300  max_reduce(const void *in_lhs_,
301  void * inout_rhs_,
302  int * len,
303  MPI_Datatype *)
304  {
305  (void)len;
306  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
307  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
308 
309  Assert(*len == 1, ExcInternalError());
310 
311  inout_rhs->sum += in_lhs->sum;
312  if (inout_rhs->min > in_lhs->min)
313  {
314  inout_rhs->min = in_lhs->min;
315  inout_rhs->min_index = in_lhs->min_index;
316  }
317  else if (inout_rhs->min == in_lhs->min)
318  {
319  // choose lower cpu index when tied to make operator commutative
320  if (inout_rhs->min_index > in_lhs->min_index)
321  inout_rhs->min_index = in_lhs->min_index;
322  }
323 
324  if (inout_rhs->max < in_lhs->max)
325  {
326  inout_rhs->max = in_lhs->max;
327  inout_rhs->max_index = in_lhs->max_index;
328  }
329  else if (inout_rhs->max == in_lhs->max)
330  {
331  // choose lower cpu index when tied to make operator commutative
332  if (inout_rhs->max_index > in_lhs->max_index)
333  inout_rhs->max_index = in_lhs->max_index;
334  }
335  }
336  } // namespace
337 
338 
339 
340  MinMaxAvg
341  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
342  {
343  // If MPI was not started, we have a serial computation and cannot run
344  // the other MPI commands
345  if (job_supports_mpi() == false)
346  {
347  MinMaxAvg result;
348  result.sum = my_value;
349  result.avg = my_value;
350  result.min = my_value;
351  result.max = my_value;
352  result.min_index = 0;
353  result.max_index = 0;
354 
355  return result;
356  }
357 
358  // To avoid uninitialized values on some MPI implementations, provide
359  // result with a default value already...
360  MinMaxAvg result = {0.,
361  std::numeric_limits<double>::max(),
362  -std::numeric_limits<double>::max(),
363  0,
364  0,
365  0.};
366 
367  const unsigned int my_id =
368  ::Utilities::MPI::this_mpi_process(mpi_communicator);
369  const unsigned int numproc =
370  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
371 
372  MPI_Op op;
373  int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
374  AssertThrowMPI(ierr);
375 
376  MinMaxAvg in;
377  in.sum = in.min = in.max = my_value;
378  in.min_index = in.max_index = my_id;
379 
380  MPI_Datatype type;
381  int lengths[] = {3, 2};
382  MPI_Aint displacements[] = {0, offsetof(MinMaxAvg, min_index)};
383  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT};
384 
385  ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
386  AssertThrowMPI(ierr);
387 
388  ierr = MPI_Type_commit(&type);
389  AssertThrowMPI(ierr);
390  ierr = MPI_Allreduce(&in, &result, 1, type, op, mpi_communicator);
391  AssertThrowMPI(ierr);
392 
393  ierr = MPI_Type_free(&type);
394  AssertThrowMPI(ierr);
395 
396  ierr = MPI_Op_free(&op);
397  AssertThrowMPI(ierr);
398 
399  result.avg = result.sum / numproc;
400 
401  return result;
402  }
403 
404 #else
405 
406  unsigned int
407  n_mpi_processes(const MPI_Comm &)
408  {
409  return 1;
410  }
411 
412 
413 
414  unsigned int
415  this_mpi_process(const MPI_Comm &)
416  {
417  return 0;
418  }
419 
420 
421  MPI_Comm
422  duplicate_communicator(const MPI_Comm &mpi_communicator)
423  {
424  return mpi_communicator;
425  }
426 
427 
428 
429  MinMaxAvg
430  min_max_avg(const double my_value, const MPI_Comm &)
431  {
432  MinMaxAvg result;
433 
434  result.sum = my_value;
435  result.avg = my_value;
436  result.min = my_value;
437  result.max = my_value;
438  result.min_index = 0;
439  result.max_index = 0;
440 
441  return result;
442  }
443 
444 #endif
445 
446 
447 
449  char **& argv,
450  const unsigned int max_num_threads)
451  {
452  static bool constructor_has_already_run = false;
453  (void)constructor_has_already_run;
454  Assert(constructor_has_already_run == false,
455  ExcMessage("You can only create a single object of this class "
456  "in a program since it initializes the MPI system."));
457 
458 
459  int ierr = 0;
460 #ifdef DEAL_II_WITH_MPI
461  // if we have PETSc, we will initialize it and let it handle MPI.
462  // Otherwise, we will do it.
463  int MPI_has_been_started = 0;
464  ierr = MPI_Initialized(&MPI_has_been_started);
465  AssertThrowMPI(ierr);
466  AssertThrow(MPI_has_been_started == 0,
467  ExcMessage("MPI error. You can only start MPI once!"));
468 
469  int provided;
470  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
471  // we might use several threads but never call two MPI functions at the
472  // same time. For an explanation see on why we do this see
473  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
474  int wanted = MPI_THREAD_SERIALIZED;
475  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
476  AssertThrowMPI(ierr);
477 
478  // disable for now because at least some implementations always return
479  // MPI_THREAD_SINGLE.
480  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
481  // ExcMessage("MPI reports that we are not allowed to use multiple
482  // threads."));
483 #else
484  // make sure the compiler doesn't warn about these variables
485  (void)argc;
486  (void)argv;
487  (void)ierr;
488 #endif
489 
490  // we are allowed to call MPI_Init ourselves and PETScInitialize will
491  // detect this. This allows us to use MPI_Init_thread instead.
492 #ifdef DEAL_II_WITH_PETSC
493 # ifdef DEAL_II_WITH_SLEPC
494  // Initialize SLEPc (with PETSc):
495  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
497 # else
498  // or just initialize PETSc alone:
499  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
500  AssertThrow(ierr == 0, ExcPETScError(ierr));
501 # endif
502 
503  // Disable PETSc exception handling. This just prints a large wall
504  // of text that is not particularly helpful for what we do:
505  PetscPopSignalHandler();
506 #endif
507 
508  // Initialize zoltan
509 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
510  float version;
511  Zoltan_Initialize(argc, argv, &version);
512 #endif
513 
514 #ifdef DEAL_II_WITH_P4EST
515  // Initialize p4est and libsc components
516 # if !(DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0))
517  // This feature is broken in version 2.0.0 for calls to
518  // MPI_Comm_create_group (see cburstedde/p4est#30).
519  // Disabling it leads to more verbose p4est error messages
520  // which should be fine.
521  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
522 # endif
523  p4est_init(nullptr, SC_LP_SILENT);
524 #endif
525 
526  constructor_has_already_run = true;
527 
528 
529  // Now also see how many threads we'd like to run
530  if (max_num_threads != numbers::invalid_unsigned_int)
531  {
532  // set maximum number of threads (also respecting the environment
533  // variable that the called function evaluates) based on what the
534  // user asked
535  MultithreadInfo::set_thread_limit(max_num_threads);
536  }
537  else
538  // user wants automatic choice
539  {
540 #ifdef DEAL_II_WITH_MPI
541  // we need to figure out how many MPI processes there are on the
542  // current node, as well as how many CPU cores we have. for the
543  // first task, check what get_hostname() returns and then to an
544  // allgather so each processor gets the answer
545  //
546  // in calculating the length of the string, don't forget the
547  // terminating \0 on C-style strings
548  const std::string hostname = Utilities::System::get_hostname();
549  const unsigned int max_hostname_size =
550  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
551  std::vector<char> hostname_array(max_hostname_size);
552  std::copy(hostname.c_str(),
553  hostname.c_str() + hostname.size() + 1,
554  hostname_array.begin());
555 
556  std::vector<char> all_hostnames(max_hostname_size *
557  MPI::n_mpi_processes(MPI_COMM_WORLD));
558  const int ierr = MPI_Allgather(hostname_array.data(),
559  max_hostname_size,
560  MPI_CHAR,
561  all_hostnames.data(),
562  max_hostname_size,
563  MPI_CHAR,
564  MPI_COMM_WORLD);
565  AssertThrowMPI(ierr);
566 
567  // search how often our own hostname appears and the how-manyth
568  // instance the current process represents
569  unsigned int n_local_processes = 0;
570  unsigned int nth_process_on_host = 0;
571  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
572  ++i)
573  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
574  hostname)
575  {
576  ++n_local_processes;
577  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
578  ++nth_process_on_host;
579  }
580  Assert(nth_process_on_host > 0, ExcInternalError());
581 
582 
583  // compute how many cores each process gets. if the number does not
584  // divide evenly, then we get one more core if we are among the
585  // first few processes
586  //
587  // if the number would be zero, round up to one since every process
588  // needs to have at least one thread
589  const unsigned int n_threads =
590  std::max(MultithreadInfo::n_cores() / n_local_processes +
591  (nth_process_on_host <=
592  MultithreadInfo::n_cores() % n_local_processes ?
593  1 :
594  0),
595  1U);
596 #else
597  const unsigned int n_threads = MultithreadInfo::n_cores();
598 #endif
599 
600  // finally set this number of threads
602  }
603  }
604 
605 
607  {
608  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
609  // are no longer used at this point. this is relevant because the static
610  // object destructors run for these vectors at the end of the program
611  // would run after MPI_Finalize is called, leading to errors
612 
613 #ifdef DEAL_II_WITH_MPI
614  // Start with the deal.II MPI vectors (need to do this before finalizing
615  // PETSc because it finalizes MPI). Delete vectors from the pools:
617  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
619  release_unused_memory();
621  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
623  release_unused_memory();
624 
625  // Next with Trilinos:
626 # if defined(DEAL_II_WITH_TRILINOS)
628  TrilinosWrappers::MPI::Vector>::release_unused_memory();
630  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
631 # endif
632 #endif
633 
634 
635  // Now deal with PETSc (with or without MPI). Only delete the vectors if
636  // finalize hasn't been called yet, otherwise this will lead to errors.
637 #ifdef DEAL_II_WITH_PETSC
638  if ((PetscInitializeCalled == PETSC_TRUE) &&
639  (PetscFinalizeCalled == PETSC_FALSE))
640  {
642  PETScWrappers::MPI::Vector>::release_unused_memory();
644  PETScWrappers::MPI::BlockVector>::release_unused_memory();
645 
646 # ifdef DEAL_II_WITH_SLEPC
647  // and now end SLEPc (with PETSc)
648  SlepcFinalize();
649 # else
650  // or just end PETSc.
651  PetscFinalize();
652 # endif
653  }
654 #endif
655 
656 #ifdef DEAL_II_WITH_P4EST
657  // now end p4est and libsc
658  // Note: p4est has no finalize function
659  sc_finalize();
660 #endif
661 
662 
663  // only MPI_Finalize if we are running with MPI. We also need to do this
664  // when running PETSc, because we initialize MPI ourselves before
665  // calling PetscInitialize
666 #ifdef DEAL_II_WITH_MPI
667  if (job_supports_mpi() == true)
668  {
669  if (std::uncaught_exception())
670  {
671  std::cerr
672  << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
673  << this_mpi_process(MPI_COMM_WORLD)
674  << ". Skipping MPI_Finalize() to avoid a deadlock."
675  << std::endl;
676  }
677  else
678  {
679  const int ierr = MPI_Finalize();
680  (void)ierr;
681  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
682  }
683  }
684 #endif
685  }
686 
687 
688 
689  bool
691  {
692 #ifdef DEAL_II_WITH_MPI
693  int MPI_has_been_started = 0;
694  const int ierr = MPI_Initialized(&MPI_has_been_started);
695  AssertThrowMPI(ierr);
696 
697  return (MPI_has_been_started > 0);
698 #else
699  return false;
700 #endif
701  }
702 
703 
704 
705 #include "mpi.inst"
706  } // end of namespace MPI
707 } // end of namespace Utilities
708 
709 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1278
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:448
#define AssertThrow(cond, exc)
Definition: exceptions.h:1329
static::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static::ExceptionBase & ExcMessage(std::string arg1)
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1227
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:102
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:69
Definition: cuda.h:32
std::string get_hostname()
Definition: utilities.cc:686
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1443
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:80
static::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:341
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:184
bool job_supports_mpi()
Definition: mpi.cc:690
unsigned int min_index
Definition: mpi.h:400
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:410
static::ExceptionBase & ExcInternalError()