16 #include <deal.II/base/index_set.h> 17 #include <deal.II/base/memory_consumption.h> 18 #include <deal.II/base/mpi.h> 22 #ifdef DEAL_II_WITH_TRILINOS 23 # ifdef DEAL_II_WITH_MPI 24 # include <Epetra_MpiComm.h> 26 # include <Epetra_Map.h> 27 # include <Epetra_SerialComm.h> 30 DEAL_II_NAMESPACE_OPEN
34 #ifdef DEAL_II_WITH_TRILINOS 39 # ifdef DEAL_II_WITH_64BIT_INDICES 43 , index_space_size(1 + map.MaxAllGID64())
44 , largest_range(
numbers::invalid_unsigned_int)
46 Assert(map.MinAllGID64() == 0,
47 ExcMessage(
"The Epetra_Map does not contain the global index 0, which " 48 "means some entries are not present on any processor."));
55 const size_type n_indices = map.NumMyElements();
57 reinterpret_cast<size_type *
>(map.MyGlobalElements64());
72 Assert(map.MinAllGID() == 0,
73 ExcMessage(
"The Epetra_Map does not contain the global index 0, which " 74 "means some entries are not present on any processor."));
81 const size_type n_indices = map.NumMyElements();
82 unsigned int * indices =
83 reinterpret_cast<unsigned int *
>(map.MyGlobalElements());
91 #endif // ifdef DEAL_II_WITH_TRILINOS 103 Assert(begin <= end, ExcIndexRangeType<size_type>(begin, 0, end));
107 const Range new_range(begin, end);
112 ranges.push_back(new_range);
138 std::vector<Range>::iterator store =
ranges.begin();
139 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end();)
141 std::vector<Range>::iterator next = i;
148 while (next !=
ranges.end() && (next->begin <= last_index))
150 last_index = std::max(last_index, next->end);
156 *store =
Range(first_index, last_index);
160 if (store !=
ranges.end())
162 std::vector<Range> new_ranges(
ranges.begin(), store);
167 size_type next_index = 0, largest_range_size = 0;
168 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end(); ++i)
172 i->nth_index_in_set = next_index;
173 next_index += (i->end - i->begin);
174 if (i->end - i->begin > largest_range_size)
176 largest_range_size = i->end - i->begin;
196 std::vector<Range>::const_iterator r1 =
ranges.begin(),
204 if (r1->end <= r2->begin)
206 else if (r2->end <= r1->begin)
211 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
212 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
216 result.
add_range(std::max(r1->begin, r2->begin),
217 std::min(r1->end, r2->end));
222 if (r1->end <= r2->end)
239 ExcMessage(
"End index needs to be larger or equal to begin index!"));
243 std::vector<Range>::const_iterator r1 =
ranges.begin();
245 while (r1 !=
ranges.end())
247 if ((r1->end > begin) && (r1->begin <
end))
249 result.
add_range(std::max(r1->begin, begin) - begin,
250 std::min(r1->end, end) - begin);
252 else if (r1->begin >= end)
274 std::vector<Range> new_ranges;
276 std::vector<Range>::iterator own_it =
ranges.begin();
277 std::vector<Range>::iterator other_it = other.
ranges.begin();
279 while (own_it !=
ranges.end() && other_it != other.
ranges.end())
282 if (own_it->end <= other_it->begin)
288 if (own_it->begin >= other_it->end)
296 if (own_it->begin < other_it->begin)
298 Range r(own_it->begin, other_it->begin);
299 r.nth_index_in_set = 0;
300 new_ranges.push_back(r);
305 own_it->begin = other_it->end;
306 if (own_it->begin > own_it->end)
308 own_it->begin = own_it->end;
318 for (std::vector<Range>::iterator it =
ranges.begin(); it !=
ranges.end();)
320 if (it->begin >= it->end)
327 const std::vector<Range>::iterator
end = new_ranges.end();
328 for (std::vector<Range>::iterator it = new_ranges.begin(); it !=
end; ++it)
341 "pop_back() failed, because this IndexSet contains no entries."));
359 "pop_front() failed, because this IndexSet contains no entries."));
379 if ((
this == &other) && (offset == 0))
384 ExcIndexRangeType<size_type>(other.
ranges.back().end - 1,
391 std::vector<Range>::const_iterator r1 =
ranges.begin(),
392 r2 = other.
ranges.begin();
394 std::vector<Range> new_ranges;
401 if (r2 == other.
ranges.end() ||
402 (r1 !=
ranges.end() && r1->end < (r2->begin + offset)))
404 new_ranges.push_back(*r1);
407 else if (r1 ==
ranges.end() || (r2->end + offset) < r1->begin)
409 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
416 Range next(std::min(r1->begin, r2->begin + offset),
417 std::max(r1->end, r2->end + offset));
418 new_ranges.push_back(next);
435 out <<
size() <<
" ";
436 out <<
ranges.size() << std::endl;
437 std::vector<Range>::const_iterator r =
ranges.begin();
438 for (; r !=
ranges.end(); ++r)
440 out << r->begin <<
" " << r->end << std::endl;
452 unsigned int n_ranges;
457 for (
unsigned int i = 0; i < n_ranges; ++i)
474 size_t n_ranges =
ranges.size();
475 out.write(reinterpret_cast<const char *>(&n_ranges),
sizeof(n_ranges));
476 if (
ranges.empty() ==
false)
477 out.write(reinterpret_cast<const char *>(&*
ranges.begin()),
487 in.read(reinterpret_cast<char *>(&size),
sizeof(size));
488 in.read(reinterpret_cast<char *>(&n_ranges),
sizeof(n_ranges));
494 in.read(reinterpret_cast<char *>(&*
ranges.begin()),
510 for (std::vector<Range>::iterator it =
ranges.begin(); it !=
ranges.end();
513 indices.push_back(i);
520 #ifdef DEAL_II_WITH_TRILINOS 524 const bool overlapping)
const 535 ExcMessage(
"You are trying to create an Epetra_Map object " 536 "that partitions elements of an index set " 537 "between processors. However, the union of the " 538 "index sets on different processors does not " 539 "contain all indices exactly once: the sum of " 540 "the number of entries the various processors " 541 "want to store locally is " +
543 " whereas the total size of the object to be " 546 ". In other words, there are " 547 "either indices that are not spoken for " 548 "by any processor, or there are indices that are " 549 "claimed by multiple processors."));
559 return Epetra_Map(TrilinosWrappers::types::int_type(
size()),
560 TrilinosWrappers::types::int_type(
n_elements()),
562 # ifdef DEAL_II_WITH_MPI
563 Epetra_MpiComm(communicator)
570 std::vector<size_type> indices;
573 TrilinosWrappers::types::int_type(-1),
574 TrilinosWrappers::types::int_type(
n_elements()),
576 reinterpret_cast<TrilinosWrappers::types::int_type *>(
580 # ifdef DEAL_II_WITH_MPI 581 Epetra_MpiComm(communicator)
599 if (n_global_elements !=
size())
602 #ifdef DEAL_II_WITH_MPI 604 const bool all_contiguous =
609 bool is_globally_ascending =
true;
616 const std::vector<types::global_dof_index> global_dofs =
626 for (; index < global_dofs.size(); ++index)
631 if (new_dof <= old_dof)
633 is_globally_ascending =
false;
643 int is_ascending = is_globally_ascending ? 1 : 0;
644 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
647 return (is_ascending == 1);
650 #endif // DEAL_II_WITH_MPI 666 DEAL_II_NAMESPACE_CLOSE
Iterator lower_bound(Iterator first, Iterator last, const T &val)
static const unsigned int invalid_unsigned_int
IntervalIterator begin_intervals() const
static::ExceptionBase & ExcIO()
void block_read(std::istream &in)
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
size_type n_elements() const
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
#define AssertThrow(cond, exc)
types::global_dof_index size_type
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
unsigned long long int global_dof_index
size_type index_space_size
ElementIterator begin() const
void block_write(std::ostream &out) const
bool is_contiguous() const
static::ExceptionBase & ExcMessage(std::string arg1)
void write(std::ostream &out) const
std::vector< Range > ranges
T sum(const T &t, const MPI_Comm &mpi_communicator)
void subtract_set(const IndexSet &other)
void fill_index_vector(std::vector< size_type > &indices) const
#define Assert(cond, exc)
static::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
IndexSet operator&(const IndexSet &is) const
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
IndexSet get_view(const size_type begin, const size_type end) const
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
ElementIterator begin() const
void add_range(const size_type begin, const size_type end)
void set_size(const size_type size)
#define AssertThrowMPI(error_code)
Threads::Mutex compress_mutex
T min(const T &t, const MPI_Comm &mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::size_t memory_consumption() const
ElementIterator end() const
const types::global_dof_index invalid_dof_index
void read(std::istream &in)
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static::ExceptionBase & ExcInternalError()