Reference documentation for deal.II version 9.1.0-pre
tria_base.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/logstream.h>
18 #include <deal.II/base/memory_consumption.h>
19 #include <deal.II/base/utilities.h>
20 
21 #include <deal.II/distributed/tria_base.h>
22 
23 #include <deal.II/grid/grid_tools.h>
24 #include <deal.II/grid/tria.h>
25 #include <deal.II/grid/tria_accessor.h>
26 #include <deal.II/grid/tria_iterator.h>
27 
28 #include <deal.II/lac/sparsity_pattern.h>
29 #include <deal.II/lac/sparsity_tools.h>
30 #include <deal.II/lac/vector_memory.h>
31 
32 #include <algorithm>
33 #include <fstream>
34 #include <iostream>
35 #include <numeric>
36 
37 
38 DEAL_II_NAMESPACE_OPEN
39 
40 namespace parallel
41 {
42  template <int dim, int spacedim>
44  MPI_Comm mpi_communicator,
45  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
46  smooth_grid,
47  const bool check_for_distorted_cells)
48  : ::Triangulation<dim, spacedim>(smooth_grid,
49  check_for_distorted_cells)
50  , mpi_communicator(mpi_communicator)
51  , my_subdomain(Utilities::MPI::this_mpi_process(this->mpi_communicator))
52  , n_subdomains(Utilities::MPI::n_mpi_processes(this->mpi_communicator))
53  {
54 #ifndef DEAL_II_WITH_MPI
55  Assert(false,
56  ExcMessage("You compiled deal.II without MPI support, for "
57  "which parallel::Triangulation is not available."));
58 #endif
59  number_cache.n_locally_owned_active_cells.resize(n_subdomains);
60  }
61 
62 
63 
64  template <int dim, int spacedim>
65  void
67  const ::Triangulation<dim, spacedim> &other_tria)
68  {
69 #ifndef DEAL_II_WITH_MPI
70  (void)other_tria;
71  Assert(false,
72  ExcMessage("You compiled deal.II without MPI support, for "
73  "which parallel::Triangulation is not available."));
74 #else
76 
77  if (const ::parallel::Triangulation<dim, spacedim> *other_tria_x =
78  dynamic_cast<const ::parallel::Triangulation<dim, spacedim> *>(
79  &other_tria))
80  {
81  mpi_communicator = other_tria_x->get_communicator();
82 
83  // release unused vector memory because we will have very different
84  // vectors now
85  ::internal::GrowingVectorMemoryImplementation::
86  release_all_unused_memory();
87  }
88 #endif
89  }
90 
91 
92 
93  template <int dim, int spacedim>
94  std::size_t
96  {
97  std::size_t mem =
102  number_cache.n_locally_owned_active_cells) +
104  number_cache.n_global_active_cells) +
106  return mem;
107  }
108 
109  template <int dim, int spacedim>
111  {
112  // release unused vector memory because the vector layout is going to look
113  // very different now
114  ::internal::GrowingVectorMemoryImplementation::
115  release_all_unused_memory();
116  }
117 
118  template <int dim, int spacedim>
121  , n_global_levels(0)
122  {}
123 
124  template <int dim, int spacedim>
125  unsigned int
127  {
128  return number_cache.n_locally_owned_active_cells[my_subdomain];
129  }
130 
131  template <int dim, int spacedim>
132  unsigned int
134  {
135  return number_cache.n_global_levels;
136  }
137 
138  template <int dim, int spacedim>
141  {
142  return number_cache.n_global_active_cells;
143  }
144 
145  template <int dim, int spacedim>
146  const std::vector<unsigned int> &
148  const
149  {
150  return number_cache.n_locally_owned_active_cells;
151  }
152 
153  template <int dim, int spacedim>
154  MPI_Comm
156  {
157  return mpi_communicator;
158  }
159 
160 #ifdef DEAL_II_WITH_MPI
161  template <int dim, int spacedim>
162  void
164  {
165  Assert(number_cache.n_locally_owned_active_cells.size() ==
167  ExcInternalError());
168 
169  std::fill(number_cache.n_locally_owned_active_cells.begin(),
170  number_cache.n_locally_owned_active_cells.end(),
171  0);
172 
173  number_cache.ghost_owners.clear();
174  number_cache.level_ghost_owners.clear();
175 
176  if (this->n_levels() == 0)
177  {
178  // Skip communication done below if we do not have any cells
179  // (meaning the Triangulation is empty on all processors). This will
180  // happen when called from the destructor of Triangulation, which
181  // can get called during exception handling causing a hang in this
182  // function.
183  number_cache.n_global_active_cells = 0;
184  number_cache.n_global_levels = 0;
185  return;
186  }
187 
188 
189  {
190  // find ghost owners
192  this->begin_active();
193  cell != this->end();
194  ++cell)
195  if (cell->is_ghost())
196  number_cache.ghost_owners.insert(cell->subdomain_id());
197 
198  Assert(number_cache.ghost_owners.size() <
200  ExcInternalError());
201  }
202 
203  if (this->n_levels() > 0)
205  this->begin_active();
206  cell != this->end();
207  ++cell)
208  if (cell->subdomain_id() == my_subdomain)
210 
211  unsigned int send_value =
213  const int ierr =
214  MPI_Allgather(&send_value,
215  1,
216  MPI_UNSIGNED,
217  number_cache.n_locally_owned_active_cells.data(),
218  1,
219  MPI_UNSIGNED,
220  this->mpi_communicator);
221  AssertThrowMPI(ierr);
222 
223  number_cache.n_global_active_cells =
224  std::accumulate(number_cache.n_locally_owned_active_cells.begin(),
225  number_cache.n_locally_owned_active_cells.end(),
226  /* ensure sum is computed with correct data type:*/
227  static_cast<types::global_dof_index>(0));
228  number_cache.n_global_levels =
230  }
231 
232 
233 
234  template <int dim, int spacedim>
235  void
237  {
238  number_cache.level_ghost_owners.clear();
239 
240  // if there is nothing to do, then do nothing
241  if (this->n_levels() == 0)
242  return;
243 
244  // find level ghost owners
246  this->begin();
247  cell != this->end();
248  ++cell)
249  if (cell->level_subdomain_id() != numbers::artificial_subdomain_id &&
250  cell->level_subdomain_id() != this->locally_owned_subdomain())
251  this->number_cache.level_ghost_owners.insert(
252  cell->level_subdomain_id());
253 
254 # ifdef DEBUG
255  // Check that level_ghost_owners is symmetric by sending a message to
256  // everyone
257  {
258  int ierr = MPI_Barrier(this->mpi_communicator);
259  AssertThrowMPI(ierr);
260 
261  // important: preallocate to avoid (re)allocation:
262  std::vector<MPI_Request> requests(
263  this->number_cache.level_ghost_owners.size());
264  unsigned int dummy = 0;
265  unsigned int req_counter = 0;
266 
267  for (std::set<types::subdomain_id>::iterator it =
268  this->number_cache.level_ghost_owners.begin();
269  it != this->number_cache.level_ghost_owners.end();
270  ++it, ++req_counter)
271  {
272  ierr = MPI_Isend(&dummy,
273  1,
274  MPI_UNSIGNED,
275  *it,
276  9001,
277  this->mpi_communicator,
278  &requests[req_counter]);
279  AssertThrowMPI(ierr);
280  }
281 
282  for (std::set<types::subdomain_id>::iterator it =
283  this->number_cache.level_ghost_owners.begin();
284  it != this->number_cache.level_ghost_owners.end();
285  ++it)
286  {
287  unsigned int dummy;
288  ierr = MPI_Recv(&dummy,
289  1,
290  MPI_UNSIGNED,
291  *it,
292  9001,
293  this->mpi_communicator,
294  MPI_STATUS_IGNORE);
295  AssertThrowMPI(ierr);
296  }
297 
298  if (requests.size() > 0)
299  {
300  ierr =
301  MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
302  AssertThrowMPI(ierr);
303  }
304 
305  ierr = MPI_Barrier(this->mpi_communicator);
306  AssertThrowMPI(ierr);
307  }
308 # endif
309 
310  Assert(this->number_cache.level_ghost_owners.size() <
312  ExcInternalError());
313  }
314 
315 #else
316 
317  template <int dim, int spacedim>
318  void
320  {
321  Assert(false, ExcNotImplemented());
322  }
323 
324  template <int dim, int spacedim>
325  void
327  {
328  Assert(false, ExcNotImplemented());
329  }
330 
331 #endif
332 
333  template <int dim, int spacedim>
336  {
337  return my_subdomain;
338  }
339 
340 
341 
342  template <int dim, int spacedim>
343  const std::set<types::subdomain_id> &
345  {
346  return number_cache.ghost_owners;
347  }
348 
349 
350 
351  template <int dim, int spacedim>
352  const std::set<types::subdomain_id> &
354  {
355  return number_cache.level_ghost_owners;
356  }
357 
358 
359 
360  template <int dim, int spacedim>
361  std::map<unsigned int, std::set<::types::subdomain_id>>
363  {
364  // TODO: we are not treating periodic neighbors correctly here. If we do
365  // we can remove the overriding implementation for p::d::Triangulation
366  // that is currently using a p4est callback to get correct ghost neighbors
367  // over periodic faces.
368  Assert(this->get_periodic_face_map().size() == 0, ExcNotImplemented());
369 
370 
371  std::vector<bool> vertex_of_own_cell(this->n_vertices(), false);
372 
373  for (auto cell : this->active_cell_iterators())
374  if (cell->is_locally_owned())
375  for (unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell; ++v)
376  vertex_of_own_cell[cell->vertex_index(v)] = true;
377 
378  std::map<unsigned int, std::set<::types::subdomain_id>> result;
379  for (auto cell : this->active_cell_iterators())
380  if (cell->is_ghost())
381  {
382  const types::subdomain_id owner = cell->subdomain_id();
383  for (unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell;
384  ++v)
385  {
386  if (vertex_of_own_cell[cell->vertex_index(v)])
387  result[cell->vertex_index(v)].insert(owner);
388  }
389  }
390 
391  return result;
392  }
393 
394 } // end namespace parallel
395 
396 
397 
398 /*-------------- Explicit Instantiations -------------------------------*/
399 #include "tria_base.inst"
400 
401 DEAL_II_NAMESPACE_CLOSE
types::global_dof_index n_global_active_cells
Definition: tria_base.h:206
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
Definition: tria.cc:10473
virtual ~Triangulation() override
Definition: tria_base.cc:110
types::subdomain_id locally_owned_subdomain() const override
Definition: tria_base.cc:335
cell_iterator end() const
Definition: tria.cc:12004
types::subdomain_id my_subdomain
Definition: tria_base.h:184
std::set< types::subdomain_id > ghost_owners
Definition: tria_base.h:217
IteratorRange< active_cell_iterator > active_cell_iterators() const
Definition: tria.cc:12060
const std::vector< unsigned int > & n_locally_owned_active_cells_per_processor() const
Definition: tria_base.cc:147
Triangulation(MPI_Comm mpi_communicator, const typename::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:43
cell_iterator begin(const unsigned int level=0) const
Definition: tria.cc:11915
std::set< types::subdomain_id > level_ghost_owners
Definition: tria_base.h:222
unsigned long long int global_dof_index
Definition: types.h:72
unsigned int n_levels() const
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:353
virtual MPI_Comm get_communicator() const
Definition: tria_base.cc:155
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria) override
Definition: tria_base.cc:66
const std::map< std::pair< cell_iterator, unsigned int >, std::pair< std::pair< cell_iterator, unsigned int >, std::bitset< 3 > > > & get_periodic_face_map() const
Definition: tria.cc:13220
std::vector< unsigned int > n_locally_owned_active_cells
Definition: tria_base.h:201
static::ExceptionBase & ExcMessage(std::string arg1)
unsigned int subdomain_id
Definition: types.h:43
#define Assert(cond, exc)
Definition: exceptions.h:1227
virtual void update_number_cache()
Definition: tria_base.cc:163
virtual std::map< unsigned int, std::set<::types::subdomain_id > > compute_vertices_with_ghost_neighbors() const
Definition: tria_base.cc:362
virtual std::size_t memory_consumption() const override
Definition: tria_base.cc:95
virtual unsigned int n_global_levels() const override
Definition: tria_base.cc:133
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:126
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:69
const types::subdomain_id artificial_subdomain_id
Definition: types.h:272
Definition: cuda.h:32
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1443
static::ExceptionBase & ExcNotImplemented()
virtual types::global_dof_index n_global_active_cells() const override
Definition: tria_base.cc:140
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: tria.cc:11935
const std::set< types::subdomain_id > & ghost_owners() const
Definition: tria_base.cc:344
unsigned int n_vertices() const
T max(const T &t, const MPI_Comm &mpi_communicator)
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
types::subdomain_id n_subdomains
Definition: tria_base.h:189
static::ExceptionBase & ExcInternalError()
virtual std::size_t memory_consumption() const
Definition: tria.cc:14972