libMesh::Parallel::Communicator Class Reference

#include <parallel.h>

Public Types

enum  SendMode { DEFAULT =0, SYNCHRONOUS }
 

Public Member Functions

 Communicator ()
 
 Communicator (const communicator &comm)
 
 ~Communicator ()
 
void split (int color, int key, Communicator &target) const
 
void duplicate (const Communicator &comm)
 
void duplicate (const communicator &comm)
 
communicatorget ()
 
const communicatorget () const
 
MessageTag get_unique_tag (int tagvalue) const
 
void reference_unique_tag (int tagvalue) const
 
void dereference_unique_tag (int tagvalue) const
 
void clear ()
 
Communicatoroperator= (const communicator &comm)
 
unsigned int rank () const
 
unsigned int size () const
 
void send_mode (const SendMode sm)
 
SendMode send_mode () const
 
void barrier () const
 
template<typename T >
bool verify (const T &r) const
 
template<typename T >
bool semiverify (const T *r) const
 
template<typename T >
void min (T &r) const
 
template<typename T >
void minloc (T &r, unsigned int &min_id) const
 
template<typename T >
void minloc (std::vector< T > &r, std::vector< unsigned int > &min_id) const
 
template<typename T >
void max (T &r) const
 
template<typename T >
void maxloc (T &r, unsigned int &max_id) const
 
template<typename T >
void maxloc (std::vector< T > &r, std::vector< unsigned int > &max_id) const
 
template<typename T >
void sum (T &r) const
 
template<typename T >
void set_union (T &data, const unsigned int root_id) const
 
template<typename T >
void set_union (T &data) const
 
status probe (const unsigned int src_processor_id, const MessageTag &tag=any_tag) const
 
template<typename T >
Status packed_range_probe (const unsigned int src_processor_id, const MessageTag &tag, bool &flag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const T &buf, Request &req, const MessageTag &tag=no_tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const T &buf, const DataType &type, const MessageTag &tag=no_tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const T &buf, const DataType &type, Request &req, const MessageTag &tag=no_tag) const
 
template<typename T >
Status receive (const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
 
template<typename T >
void receive (const unsigned int dest_processor_id, T &buf, Request &req, const MessageTag &tag=any_tag) const
 
template<typename T >
Status receive (const unsigned int dest_processor_id, T &buf, const DataType &type, const MessageTag &tag=any_tag) const
 
template<typename T >
void receive (const unsigned int dest_processor_id, T &buf, const DataType &type, Request &req, const MessageTag &tag=any_tag) const
 
template<typename Context , typename Iter >
void send_packed_range (const unsigned int dest_processor_id, const Context *context, Iter range_begin, const Iter range_end, const MessageTag &tag=no_tag) const
 
template<typename Context , typename Iter >
void send_packed_range (const unsigned int dest_processor_id, const Context *context, Iter range_begin, const Iter range_end, Request &req, const MessageTag &tag=no_tag) const
 
template<typename Context , typename Iter >
void nonblocking_send_packed_range (const unsigned int dest_processor_id, const Context *context, Iter range_begin, const Iter range_end, Request &req, const MessageTag &tag=no_tag) const
 
template<typename Context , typename OutputIter , typename T >
void receive_packed_range (const unsigned int dest_processor_id, Context *context, OutputIter out, const T *output_type, const MessageTag &tag=any_tag) const
 
template<typename Context , typename OutputIter , typename T >
void nonblocking_receive_packed_range (const unsigned int src_processor_id, Context *context, OutputIter out, const T *output_type, Request &req, Status &stat, const MessageTag &tag=any_tag) const
 
template<typename T1 , typename T2 >
void send_receive (const unsigned int dest_processor_id, const T1 &send, const unsigned int source_processor_id, T2 &recv, const MessageTag &send_tag=no_tag, const MessageTag &recv_tag=any_tag) const
 
template<typename Context1 , typename RangeIter , typename Context2 , typename OutputIter , typename T >
void send_receive_packed_range (const unsigned int dest_processor_id, const Context1 *context1, RangeIter send_begin, const RangeIter send_end, const unsigned int source_processor_id, Context2 *context2, OutputIter out, const T *output_type, const MessageTag &send_tag=no_tag, const MessageTag &recv_tag=any_tag) const
 
template<typename T1 , typename T2 >
void send_receive (const unsigned int dest_processor_id, const T1 &send, const DataType &type1, const unsigned int source_processor_id, T2 &recv, const DataType &type2, const MessageTag &send_tag=no_tag, const MessageTag &recv_tag=any_tag) const
 
template<typename T >
void gather (const unsigned int root_id, const T &send, std::vector< T > &recv) const
 
template<typename T >
void gather (const unsigned int root_id, const std::basic_string< T > &send, std::vector< std::basic_string< T > > &recv, const bool identical_buffer_sizes=false) const
 
template<typename T >
void gather (const unsigned int root_id, std::vector< T > &r) const
 
template<typename T >
void allgather (const T &send, std::vector< T > &recv) const
 
template<typename T >
void allgather (const std::basic_string< T > &send, std::vector< std::basic_string< T > > &recv, const bool identical_buffer_sizes=false) const
 
template<typename T >
void allgather (std::vector< T > &r, const bool identical_buffer_sizes=false) const
 
template<typename T >
void scatter (const std::vector< T > &data, T &recv, const unsigned int root_id=0) const
 
template<typename T >
void scatter (const std::vector< T > &data, std::vector< T > &recv, const unsigned int root_id=0) const
 
template<typename T >
void scatter (const std::vector< T > &data, const std::vector< int > counts, std::vector< T > &recv, const unsigned int root_id=0) const
 
template<typename T >
void scatter (const std::vector< std::vector< T > > &data, std::vector< T > &recv, const unsigned int root_id=0, const bool identical_buffer_sizes=false) const
 
template<typename Context , typename Iter , typename OutputIter >
void gather_packed_range (const unsigned int root_id, Context *context, Iter range_begin, const Iter range_end, OutputIter out) const
 
template<typename Context , typename Iter , typename OutputIter >
void allgather_packed_range (Context *context, Iter range_begin, const Iter range_end, OutputIter out) const
 
template<typename T >
void alltoall (std::vector< T > &r) const
 
template<typename T >
void broadcast (T &data, const unsigned int root_id=0) const
 
template<typename Context , typename OutputContext , typename Iter , typename OutputIter >
void broadcast_packed_range (const Context *context1, Iter range_begin, const Iter range_end, OutputContext *context2, OutputIter out, const unsigned int root_id=0) const
 
template<>
bool verify (const bool &r) const
 
template<>
bool semiverify (const bool *r) const
 
template<typename T >
bool semiverify (const std::vector< T > *r) const
 
template<typename T >
void min (std::vector< T > &r) const
 
template<typename T >
void max (std::vector< T > &r) const
 
template<typename T >
void sum (std::vector< T > &r) const
 
template<typename T >
void sum (std::complex< T > &r) const
 
template<typename T >
void sum (std::vector< std::complex< T > > &r) const
 
template<typename T >
void set_union (std::set< T > &data, const unsigned int root_id) const
 
template<typename T >
void set_union (std::set< T > &data) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::basic_string< T > &buf, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::basic_string< T > &buf, Request &req, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::set< T > &buf, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::set< T > &buf, Request &req, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::set< T > &buf, const DataType &type, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::set< T > &buf, const DataType &type, Request &req, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::vector< T > &buf, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::vector< T > &buf, Request &req, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::vector< T > &buf, const DataType &type, const MessageTag &tag) const
 
template<typename T >
void send (const unsigned int dest_processor_id, const std::vector< T > &buf, const DataType &type, Request &req, const MessageTag &tag) const
 
template<typename T >
Status receive (const unsigned int src_processor_id, std::basic_string< T > &buf, const MessageTag &tag) const
 
template<typename T >
void receive (const unsigned int src_processor_id, std::basic_string< T > &buf, Request &req, const MessageTag &tag) const
 
template<typename T >
Status receive (const unsigned int src_processor_id, std::set< T > &buf, const MessageTag &tag) const
 
template<typename T >
void receive (const unsigned int src_processor_id, std::set< T > &buf, Request &req, const MessageTag &tag) const
 
template<typename T >
Status receive (const unsigned int src_processor_id, std::set< T > &buf, const DataType &type, const MessageTag &tag) const
 
template<typename T >
void receive (const unsigned int src_processor_id, std::set< T > &buf, const DataType &type, Request &req, const MessageTag &tag) const
 
template<typename T >
Status receive (const unsigned int src_processor_id, std::vector< T > &buf, const MessageTag &tag) const
 
template<typename T >
void receive (const unsigned int src_processor_id, std::vector< T > &buf, Request &req, const MessageTag &tag) const
 
template<typename T >
Status receive (const unsigned int src_processor_id, std::vector< T > &buf, const DataType &type, const MessageTag &tag) const
 
template<typename T >
void receive (const unsigned int src_processor_id, std::vector< T > &buf, const DataType &type, Request &req, const MessageTag &tag) const
 
template<typename T1 , typename T2 >
void send_receive (const unsigned int dest_processor_id, const std::vector< T1 > &sendvec, const DataType &type1, const unsigned int source_processor_id, std::vector< T2 > &recv, const DataType &type2, const MessageTag &send_tag, const MessageTag &recv_tag) const
 
template<typename T >
void send_receive (const unsigned int dest_processor_id, const std::vector< T > &sendvec, const unsigned int source_processor_id, std::vector< T > &recv, const MessageTag &send_tag, const MessageTag &recv_tag) const
 
template<typename T1 , typename T2 >
void send_receive (const unsigned int dest_processor_id, const std::vector< T1 > &sendvec, const unsigned int source_processor_id, std::vector< T2 > &recv, const MessageTag &send_tag, const MessageTag &recv_tag) const
 
template<typename T1 , typename T2 >
void send_receive (const unsigned int dest_processor_id, const std::vector< std::vector< T1 > > &sendvec, const unsigned int source_processor_id, std::vector< std::vector< T2 > > &recv, const MessageTag &, const MessageTag &) const
 
template<typename T >
void send_receive (const unsigned int dest_processor_id, const std::vector< std::vector< T > > &sendvec, const unsigned int source_processor_id, std::vector< std::vector< T > > &recv, const MessageTag &, const MessageTag &) const
 
template<>
void broadcast (bool &data, const unsigned int root_id) const
 
template<typename T >
void broadcast (std::basic_string< T > &data, const unsigned int root_id) const
 
template<typename T >
void broadcast (std::vector< T > &data, const unsigned int root_id) const
 
template<typename T >
void broadcast (std::vector< std::basic_string< T > > &data, const unsigned int root_id) const
 
template<typename T >
void broadcast (std::set< T > &data, const unsigned int root_id) const
 

Private Member Functions

 Communicator (const Communicator &)
 
void assign (const communicator &comm)
 

Private Attributes

communicator _communicator
 
unsigned int _rank
 
unsigned int _size
 
SendMode _send_mode
 
std::map< int, unsigned int > used_tag_values
 
bool _I_duped_it
 

Detailed Description

Encapsulates the MPI_Comm object. Allows the size of the group and this process's position in the group to be determined.

Methods of this object are the preferred way to perform distributed-memory parallel operations.

Definition at line 611 of file parallel.h.

Member Enumeration Documentation

Whether to use default or synchronous sends?

Enumerator
DEFAULT 
SYNCHRONOUS 

Definition at line 684 of file parallel.h.

Constructor & Destructor Documentation

libMesh::Parallel::Communicator::Communicator ( )
inline

Default Constructor.

Definition at line 567 of file parallel_implementation.h.

567  :
568 #ifdef LIBMESH_HAVE_MPI
569  _communicator(MPI_COMM_SELF),
570 #endif
571  _rank(0),
572  _size(1),
574  used_tag_values(),
575  _I_duped_it(false) {}
std::map< int, unsigned int > used_tag_values
Definition: parallel.h:705
libMesh::Parallel::Communicator::Communicator ( const communicator comm)
inlineexplicit

Definition at line 577 of file parallel_implementation.h.

References assign().

577  :
578 #ifdef LIBMESH_HAVE_MPI
579  _communicator(MPI_COMM_SELF),
580 #endif
581  _rank(0),
582  _size(1),
584  used_tag_values(),
585  _I_duped_it(false)
586 {
587  this->assign(comm);
588 }
void assign(const communicator &comm)
std::map< int, unsigned int > used_tag_values
Definition: parallel.h:705
libMesh::Parallel::Communicator::~Communicator ( )
inline

Definition at line 590 of file parallel_implementation.h.

References clear().

591 {
592  this->clear();
593 }
libMesh::Parallel::Communicator::Communicator ( const Communicator )
inlineexplicitprivate

Definition at line 658 of file parallel_implementation.h.

658  :
659 #ifdef LIBMESH_HAVE_MPI
660  _communicator(MPI_COMM_NULL),
661 #endif
662  _rank(0),
663  _size(1),
665  used_tag_values(),
666  _I_duped_it(false)
667 {
668  libmesh_not_implemented();
669 }
std::map< int, unsigned int > used_tag_values
Definition: parallel.h:705

Member Function Documentation

template<typename T >
void libMesh::Parallel::Communicator::allgather ( const T &  send,
std::vector< T > &  recv 
) const
inline

Take a vector of length this->size(), and fill in recv[processor_id] = the value of send on that processor

Definition at line 3093 of file parallel_implementation.h.

References libMesh::libmesh_assert().

Referenced by libMesh::LaplaceMeshSmoother::allgather_graph(), libMesh::Parallel::Sort< KeyType, IdxType >::communicate_bins(), libMesh::Nemesis_IO_Helper::compute_num_global_elem_blocks(), libMesh::DofMap::distribute_dofs(), libMesh::MeshCommunication::find_global_indices(), libMesh::MeshRefinement::flag_elements_by_elem_fraction(), libMesh::MeshRefinement::flag_elements_by_nelem_target(), libMesh::ParmetisPartitioner::initialize(), libMesh::Nemesis_IO::read(), receive(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::DofMap::set_nonlocal_dof_objects(), and libMesh::Parallel::Sort< KeyType, IdxType >::sort().

3095 {
3096  LOG_SCOPE ("allgather()","Parallel");
3097 
3098  libmesh_assert(this->size());
3099  recv.resize(this->size());
3100 
3101  unsigned int comm_size = this->size();
3102  if (comm_size > 1)
3103  {
3104  StandardType<T> send_type(&sendval);
3105 
3106  libmesh_call_mpi
3107  (MPI_Allgather (const_cast<T*>(&sendval), 1, send_type, &recv[0], 1,
3108  send_type, this->get()));
3109  }
3110  else if (comm_size > 0)
3111  recv[0] = sendval;
3112 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
template<typename T >
void libMesh::Parallel::Communicator::allgather ( const std::basic_string< T > &  send,
std::vector< std::basic_string< T > > &  recv,
const bool  identical_buffer_sizes = false 
) const
inline

AllGather overload for string types

Definition at line 3117 of file parallel_implementation.h.

References libMesh::Parallel::allgather(), libMesh::libmesh_assert(), and libmesh_nullptr.

3120 {
3121  LOG_SCOPE ("allgather()","Parallel");
3122 
3123  libmesh_assert(this->size());
3124  recv.assign(this->size(), "");
3125 
3126  // serial case
3127  if (this->size() < 2)
3128  {
3129  recv.resize(1);
3130  recv[0] = sendval;
3131  return;
3132  }
3133 
3134  std::vector<int>
3135  sendlengths (this->size(), 0),
3136  displacements(this->size(), 0);
3137 
3138  const int mysize = static_cast<int>(sendval.size());
3139 
3140  if (identical_buffer_sizes)
3141  sendlengths.assign(this->size(), mysize);
3142  else
3143  // first comm step to determine buffer sizes from all processors
3144  this->allgather(mysize, sendlengths);
3145 
3146  // Find the total size of the final array and
3147  // set up the displacement offsets for each processor
3148  unsigned int globalsize = 0;
3149  for (unsigned int i=0; i != this->size(); ++i)
3150  {
3151  displacements[i] = globalsize;
3152  globalsize += sendlengths[i];
3153  }
3154 
3155  // Check for quick return
3156  if (globalsize == 0)
3157  return;
3158 
3159  // monolithic receive buffer
3160  std::string r(globalsize, 0);
3161 
3162  // and get the data from the remote processors.
3163  libmesh_call_mpi
3164  (MPI_Allgatherv (const_cast<T*>(mysize ? &sendval[0] : libmesh_nullptr),
3165  mysize, StandardType<T>(),
3166  &r[0], &sendlengths[0], &displacements[0],
3167  StandardType<T>(), this->get()));
3168 
3169  // slice receive buffer up
3170  for (unsigned int i=0; i != this->size(); ++i)
3171  recv[i] = r.substr(displacements[i], sendlengths[i]);
3172 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
void allgather(const T &send, std::vector< T > &recv) const
template<typename T >
void libMesh::Parallel::Communicator::allgather ( std::vector< T > &  r,
const bool  identical_buffer_sizes = false 
) const
inline

Take a vector of local variables and expand it to include values from all processors. By default, each processor is allowed to have its own unique input buffer length. If it is known that all processors have the same input sizes additional communication can be avoided.

Specifically, this function transforms this:

* Processor 0: [ ... N_0 ]
* Processor 1: [ ....... N_1 ]
* ...
* Processor M: [ .. N_M]
* 

into this:

* [ [ ... N_0 ] [ ....... N_1 ] ... [ .. N_M] ]
* 

on each processor. This function is collective and therefore must be called by all processors in the Communicator.

Definition at line 3177 of file parallel_implementation.h.

References libMesh::Parallel::allgather(), libMesh::libmesh_assert(), libmesh_nullptr, and libMesh::Parallel::verify().

3179 {
3180  if (this->size() < 2)
3181  return;
3182 
3183  LOG_SCOPE("allgather()", "Parallel");
3184 
3185  if (identical_buffer_sizes)
3186  {
3187  if (r.empty())
3188  return;
3189 
3190  libmesh_assert(this->verify(r.size()));
3191 
3192  std::vector<T> r_src(r.size()*this->size());
3193  r_src.swap(r);
3194  StandardType<T> send_type(&r_src[0]);
3195 
3196  libmesh_call_mpi
3197  (MPI_Allgather (&r_src[0], cast_int<int>(r_src.size()),
3198  send_type, &r[0], cast_int<int>(r_src.size()),
3199  send_type, this->get()));
3200  // libmesh_assert(this->verify(r));
3201  return;
3202  }
3203 
3204  std::vector<int>
3205  sendlengths (this->size(), 0),
3206  displacements(this->size(), 0);
3207 
3208  const int mysize = static_cast<int>(r.size());
3209  this->allgather(mysize, sendlengths);
3210 
3211  // Find the total size of the final array and
3212  // set up the displacement offsets for each processor.
3213  unsigned int globalsize = 0;
3214  for (unsigned int i=0; i != this->size(); ++i)
3215  {
3216  displacements[i] = globalsize;
3217  globalsize += sendlengths[i];
3218  }
3219 
3220  // Check for quick return
3221  if (globalsize == 0)
3222  return;
3223 
3224  // copy the input buffer
3225  std::vector<T> r_src(globalsize);
3226  r_src.swap(r);
3227 
3228  StandardType<T> send_type(&r[0]);
3229 
3230  // and get the data from the remote processors.
3231  // Pass NULL if our vector is empty.
3232  libmesh_call_mpi
3233  (MPI_Allgatherv (r_src.empty() ? libmesh_nullptr : &r_src[0], mysize,
3234  send_type, &r[0], &sendlengths[0],
3235  &displacements[0], send_type, this->get()));
3236 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
void allgather(const T &send, std::vector< T > &recv) const
template<typename Context , typename Iter , typename OutputIter >
void libMesh::Parallel::Communicator::allgather_packed_range ( Context *  context,
Iter  range_begin,
const Iter  range_end,
OutputIter  out 
) const
inline

Take a range of local variables, combine it with ranges from all processors, and write the output to the output iterator.

Definition at line 4075 of file parallel_implementation.h.

References libMesh::Parallel::allgather(), libMesh::libmesh_assert(), libmesh_nullptr, libMesh::Parallel::max(), libMesh::Parallel::pack_range(), and libMesh::Parallel::unpack_range().

Referenced by libMesh::MeshCommunication::gather().

4079 {
4080  typedef typename std::iterator_traits<Iter>::value_type T;
4081  typedef typename Parallel::Packing<T>::buffer_type buffer_t;
4082 
4083  bool nonempty_range = (range_begin != range_end);
4084  this->max(nonempty_range);
4085 
4086  while (nonempty_range)
4087  {
4088  // We will serialize variable size objects from *range_begin to
4089  // *range_end as a sequence of ints in this buffer
4090  std::vector<buffer_t> buffer;
4091 
4092  range_begin = Parallel::pack_range
4093  (context, range_begin, range_end, buffer);
4094 
4095  this->allgather(buffer, false);
4096 
4097  libmesh_assert(buffer.size());
4098 
4100  (buffer, context, out_iter, (T*)libmesh_nullptr);
4101 
4102  nonempty_range = (range_begin != range_end);
4103  this->max(nonempty_range);
4104  }
4105 }
Iter pack_range(const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
void unpack_range(const typename std::vector< buffertype > &buffer, Context *context, OutputIter out, const T *output_type)
void allgather(const T &send, std::vector< T > &recv) const
template<typename T >
void libMesh::Parallel::Communicator::alltoall ( std::vector< T > &  r) const
inline

Effectively transposes the input vector across all processors. The jth entry on processor i is replaced with the ith entry from processor j.

Definition at line 3408 of file parallel_implementation.h.

References libMesh::libmesh_assert(), and libMesh::Parallel::verify().

Referenced by libMesh::Nemesis_IO::read(), receive(), libMesh::MeshCommunication::redistribute(), and libMesh::MeshCommunication::send_coarse_ghosts().

3409 {
3410  if (this->size() < 2 || buf.empty())
3411  return;
3412 
3413  LOG_SCOPE("alltoall()", "Parallel");
3414 
3415  // the per-processor size. this is the same for all
3416  // processors using MPI_Alltoall, could be variable
3417  // using MPI_Alltoallv
3418  const int size_per_proc =
3419  cast_int<int>(buf.size()/this->size());
3420 
3421  libmesh_assert_equal_to (buf.size()%this->size(), 0);
3422 
3423  libmesh_assert(this->verify(size_per_proc));
3424 
3425  std::vector<T> tmp(buf);
3426 
3427  StandardType<T> send_type(&tmp[0]);
3428 
3429  libmesh_call_mpi
3430  (MPI_Alltoall (&tmp[0], size_per_proc, send_type, &buf[0],
3431  size_per_proc, send_type, this->get()));
3432 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
void libMesh::Parallel::Communicator::assign ( const communicator comm)
inlineprivate

Utility function for setting our member variables from an MPI communicator

Definition at line 671 of file parallel_implementation.h.

References _communicator, _rank, _send_mode, _size, and DEFAULT.

Referenced by Communicator(), duplicate(), operator=(), and split().

672 {
673  _communicator = comm;
674 #ifdef LIBMESH_HAVE_MPI
675  if (_communicator != MPI_COMM_NULL)
676  {
677  int i;
678  libmesh_call_mpi
679  (MPI_Comm_size(_communicator, &i));
680 
681  libmesh_assert_greater_equal (i, 0);
682  _size = static_cast<unsigned int>(i);
683 
684  libmesh_call_mpi
685  (MPI_Comm_rank(_communicator, &i));
686 
687  libmesh_assert_greater_equal (i, 0);
688  _rank = static_cast<unsigned int>(i);
689  }
690  else
691  {
692  _rank = 0;
693  _size = 1;
694  }
695 #endif
697 }
void libMesh::Parallel::Communicator::barrier ( ) const
inline

Pause execution until all processors reach a certain point.

Definition at line 958 of file parallel_implementation.h.

Referenced by libMesh::NameBasedIO::write(), and libMesh::XdrIO::write().

959 {
960  if (this->size() > 1)
961  {
962  LOG_SCOPE("barrier()", "Parallel");
963  libmesh_call_mpi(MPI_Barrier (this->get()));
964  }
965 }
unsigned int size() const
Definition: parallel.h:679
template<typename T >
void libMesh::Parallel::Communicator::broadcast ( T &  data,
const unsigned int  root_id = 0 
) const
inline

Take a local value and broadcast it to all processors. Optionally takes the root_id processor, which specifies the processor initiating the broadcast. If data is a vector, the user is responsible for resizing it on all processors, except in the case when data is a vector of strings.

Definition at line 3437 of file parallel_implementation.h.

References libMesh::libmesh_assert().

Referenced by libMesh::EquationSystems::_read_impl(), libMesh::MeshCommunication::broadcast(), broadcast(), libMesh::MetisPartitioner::partition_range(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::XdrIO::read(), libMesh::XdrIO::read_serialized_bc_names(), libMesh::XdrIO::read_serialized_bcs_helper(), libMesh::XdrIO::read_serialized_connectivity(), libMesh::XdrIO::read_serialized_nodes(), libMesh::XdrIO::read_serialized_nodesets(), libMesh::XdrIO::read_serialized_subdomain_names(), receive(), and libMesh::NameBasedIO::write().

3438 {
3439  if (this->size() == 1)
3440  {
3441  libmesh_assert (!this->rank());
3442  libmesh_assert (!root_id);
3443  return;
3444  }
3445 
3446  libmesh_assert_less (root_id, this->size());
3447 
3448  LOG_SCOPE("broadcast()", "Parallel");
3449 
3450  // Spread data to remote processors.
3451  libmesh_call_mpi
3452  (MPI_Bcast (&data, 1, StandardType<T>(&data), root_id,
3453  this->get()));
3454 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<>
void libMesh::Parallel::Communicator::broadcast ( bool &  data,
const unsigned int  root_id 
) const
inline

Definition at line 3458 of file parallel_implementation.h.

References data, and libMesh::libmesh_assert().

3459 {
3460  if (this->size() == 1)
3461  {
3462  libmesh_assert (!this->rank());
3463  libmesh_assert (!root_id);
3464  return;
3465  }
3466 
3467  libmesh_assert_less (root_id, this->size());
3468 
3469  LOG_SCOPE("broadcast()", "Parallel");
3470 
3471  // We don't want to depend on MPI-2 or C++ MPI, so we don't have
3472  // MPI::BOOL available
3473  char char_data = data;
3474 
3475  // Spread data to remote processors.
3476  libmesh_call_mpi
3477  (MPI_Bcast (&char_data, 1, StandardType<char>(&char_data),
3478  root_id, this->get()));
3479 
3480  data = char_data;
3481 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::broadcast ( std::basic_string< T > &  data,
const unsigned int  root_id 
) const
inline

Definition at line 3485 of file parallel_implementation.h.

References libMesh::Parallel::broadcast(), and libMesh::libmesh_assert().

3487 {
3488  if (this->size() == 1)
3489  {
3490  libmesh_assert (!this->rank());
3491  libmesh_assert (!root_id);
3492  return;
3493  }
3494 
3495  libmesh_assert_less (root_id, this->size());
3496 
3497  LOG_SCOPE("broadcast()", "Parallel");
3498 
3499  std::size_t data_size = data.size();
3500  this->broadcast(data_size, root_id);
3501 
3502  std::vector<T> data_c(data_size);
3503 #ifndef NDEBUG
3504  std::string orig(data);
3505 #endif
3506 
3507  if (this->rank() == root_id)
3508  for (std::size_t i=0; i<data.size(); i++)
3509  data_c[i] = data[i];
3510 
3511  this->broadcast (data_c, root_id);
3512 
3513  data.assign(data_c.begin(), data_c.end());
3514 
3515 #ifndef NDEBUG
3516  if (this->rank() == root_id)
3517  libmesh_assert_equal_to (data, orig);
3518 #endif
3519 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
void broadcast(T &data, const unsigned int root_id=0) const
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::broadcast ( std::vector< T > &  data,
const unsigned int  root_id 
) const
inline

Definition at line 3524 of file parallel_implementation.h.

References libMesh::libmesh_assert(), and libmesh_nullptr.

3526 {
3527  if (this->size() == 1)
3528  {
3529  libmesh_assert (!this->rank());
3530  libmesh_assert (!root_id);
3531  return;
3532  }
3533 
3534  libmesh_assert_less (root_id, this->size());
3535 
3536  LOG_SCOPE("broadcast()", "Parallel");
3537 
3538  // and get the data from the remote processors.
3539  // Pass NULL if our vector is empty.
3540  T * data_ptr = data.empty() ? libmesh_nullptr : &data[0];
3541 
3542  libmesh_call_mpi
3543  (MPI_Bcast (data_ptr, cast_int<int>(data.size()),
3544  StandardType<T>(data_ptr), root_id, this->get()));
3545 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::broadcast ( std::vector< std::basic_string< T > > &  data,
const unsigned int  root_id 
) const
inline

The strings will be packed in one long array with the size of each string preceeding the actual characters

Definition at line 3549 of file parallel_implementation.h.

References libMesh::Parallel::broadcast(), and libMesh::libmesh_assert().

3551 {
3552  if (this->size() == 1)
3553  {
3554  libmesh_assert (!this->rank());
3555  libmesh_assert (!root_id);
3556  return;
3557  }
3558 
3559  libmesh_assert_less (root_id, this->size());
3560 
3561  LOG_SCOPE("broadcast()", "Parallel");
3562 
3563  std::size_t bufsize=0;
3564  if (root_id == this->rank())
3565  {
3566  for (std::size_t i=0; i<data.size(); ++i)
3567  bufsize += data[i].size() + 1; // Add one for the string length word
3568  }
3569  this->broadcast(bufsize, root_id);
3570 
3571  // Here we use unsigned int to store up to 32-bit characters
3572  std::vector<unsigned int> temp; temp.reserve(bufsize);
3573  // Pack the strings
3574  if (root_id == this->rank())
3575  {
3576  for (std::size_t i=0; i<data.size(); ++i)
3577  {
3578  temp.push_back(cast_int<unsigned int>(data[i].size()));
3579  for (std::size_t j=0; j != data[i].size(); ++j)
3584  temp.push_back(data[i][j]);
3585  }
3586  }
3587  else
3588  temp.resize(bufsize);
3589 
3590  // broad cast the packed strings
3591  this->broadcast(temp, root_id);
3592 
3593  // Unpack the strings
3594  if (root_id != this->rank())
3595  {
3596  data.clear();
3597  std::vector<unsigned int>::const_iterator iter = temp.begin();
3598  while (iter != temp.end())
3599  {
3600  std::size_t curr_len = *iter++;
3601  data.push_back(std::string(iter, iter+curr_len));
3602  iter += curr_len;
3603  }
3604  }
3605 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
void broadcast(T &data, const unsigned int root_id=0) const
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::broadcast ( std::set< T > &  data,
const unsigned int  root_id 
) const
inline

Definition at line 3611 of file parallel_implementation.h.

References libMesh::Parallel::broadcast(), broadcast(), and libMesh::libmesh_assert().

3613 {
3614  if (this->size() == 1)
3615  {
3616  libmesh_assert (!this->rank());
3617  libmesh_assert (!root_id);
3618  return;
3619  }
3620 
3621  libmesh_assert_less (root_id, this->size());
3622 
3623  LOG_SCOPE("broadcast()", "Parallel");
3624 
3625  std::vector<T> vecdata;
3626  if (this->rank() == root_id)
3627  vecdata.assign(data.begin(), data.end());
3628 
3629  std::size_t vecsize = vecdata.size();
3630  this->broadcast(vecsize, root_id);
3631  if (this->rank() != root_id)
3632  vecdata.resize(vecsize);
3633 
3634  this->broadcast(vecdata, root_id);
3635  if (this->rank() != root_id)
3636  {
3637  data.clear();
3638  data.insert(vecdata.begin(), vecdata.end());
3639  }
3640 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
void broadcast(T &data, const unsigned int root_id=0) const
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename Context , typename OutputContext , typename Iter , typename OutputIter >
void libMesh::Parallel::Communicator::broadcast_packed_range ( const Context *  context1,
Iter  range_begin,
const Iter  range_end,
OutputContext *  context2,
OutputIter  out,
const unsigned int  root_id = 0 
) const
inline

Blocking-broadcast range-of-pointers to one processor. This function does not send the raw pointers, but rather constructs new objects at the other end whose contents match the objects pointed to by the sender.

void Parallel::pack(const T *, vector<int> & data, const Context *) is used to serialize type T onto the end of a data vector.

unsigned int Parallel::packable_size(const T *, const Context *) is used to allow data vectors to reserve memory, and for additional error checking

unsigned int Parallel::packed_size(const T *, vector<int>::const_iterator) is used to advance to the beginning of the next object's data.

Definition at line 3697 of file parallel_implementation.h.

References libMesh::Parallel::broadcast(), libmesh_nullptr, max(), maxloc(), min(), minloc(), libMesh::Parallel::pack_range(), semiverify(), sum(), libMesh::Parallel::unpack_range(), and verify().

Referenced by libMesh::MeshCommunication::broadcast().

3703 {
3704  typedef typename std::iterator_traits<Iter>::value_type T;
3705  typedef typename Parallel::Packing<T>::buffer_type buffer_t;
3706 
3707  do
3708  {
3709  // We will serialize variable size objects from *range_begin to
3710  // *range_end as a sequence of ints in this buffer
3711  std::vector<buffer_t> buffer;
3712 
3713  if (this->rank() == root_id)
3714  range_begin = Parallel::pack_range
3715  (context1, range_begin, range_end, buffer);
3716 
3717  // this->broadcast(vector) requires the receiving vectors to
3718  // already be the appropriate size
3719  std::size_t buffer_size = buffer.size();
3720  this->broadcast (buffer_size, root_id);
3721 
3722  // We continue until there's nothing left to broadcast
3723  if (!buffer_size)
3724  break;
3725 
3726  buffer.resize(buffer_size);
3727 
3728  // Broadcast the packed data
3729  this->broadcast (buffer, root_id);
3730 
3731  if (this->rank() != root_id)
3733  (buffer, context2, out_iter, (T*)libmesh_nullptr);
3734  } while (true); // break above when we reach buffer_size==0
3735 }
Iter pack_range(const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
const class libmesh_nullptr_t libmesh_nullptr
void broadcast(T &data, const unsigned int root_id=0) const
unsigned int rank() const
Definition: parallel.h:677
void unpack_range(const typename std::vector< buffertype > &buffer, Context *context, OutputIter out, const T *output_type)
void libMesh::Parallel::Communicator::clear ( )
inline

Free and reset this communicator

Definition at line 636 of file parallel_implementation.h.

References _communicator, _I_duped_it, and libMesh::libmesh_assert().

Referenced by libMesh::LibMeshInit::LibMeshInit(), operator=(), split(), and ~Communicator().

636  {
637 #ifdef LIBMESH_HAVE_MPI
638  if (_I_duped_it)
639  {
640  libmesh_assert (_communicator != MPI_COMM_NULL);
641  libmesh_call_mpi
642  (MPI_Comm_free(&_communicator));
643 
644  _communicator = MPI_COMM_NULL;
645  }
646  _I_duped_it = false;
647 #endif
648 }
libmesh_assert(j)
void libMesh::Parallel::Communicator::dereference_unique_tag ( int  tagvalue) const
inline

Dereference an already-acquired tag, and see if we can re-release it.

Definition at line 1332 of file parallel_implementation.h.

References libMesh::libmesh_assert().

1333 {
1334  // This had better be an already-acquired tag.
1335  libmesh_assert(used_tag_values.count(tagvalue));
1336 
1337  used_tag_values[tagvalue]--;
1338  // If we don't have any more outstanding references, we
1339  // don't even need to keep this tag in our "used" set.
1340  if (!used_tag_values[tagvalue])
1341  used_tag_values.erase(tagvalue);
1342 }
libmesh_assert(j)
std::map< int, unsigned int > used_tag_values
Definition: parallel.h:705
void libMesh::Parallel::Communicator::duplicate ( const Communicator comm)
inline

Definition at line 614 of file parallel_implementation.h.

References _communicator, and send_mode().

Referenced by duplicate().

615 {
616  this->duplicate(comm._communicator);
617  this->send_mode(comm.send_mode());
618 }
void duplicate(const Communicator &comm)
SendMode send_mode() const
Definition: parallel.h:719
void libMesh::Parallel::Communicator::duplicate ( const communicator comm)
inline

Definition at line 621 of file parallel_implementation.h.

References _communicator, _I_duped_it, assign(), and duplicate().

622 {
623  if (_communicator != MPI_COMM_NULL)
624  {
625  libmesh_call_mpi
626  (MPI_Comm_dup(comm, &_communicator));
627 
628  _I_duped_it = true;
629  }
630  this->assign(_communicator);
631 }
void assign(const communicator &comm)
template<typename T >
void libMesh::Parallel::Communicator::gather ( const unsigned int  root_id,
const T &  send_val,
std::vector< T > &  recv_val 
) const
inline

Take a vector of length comm.size(), and on processor root_id fill in recv[processor_id] = the value of send on processor processor_id

Gather-to-root on one processor.

Definition at line 2954 of file parallel_implementation.h.

References libmesh_nullptr.

Referenced by receive(), libMesh::XdrIO::write_serialized_bcs_helper(), libMesh::XdrIO::write_serialized_connectivity(), libMesh::XdrIO::write_serialized_nodes(), and libMesh::XdrIO::write_serialized_nodesets().

2957 {
2958  libmesh_assert_less (root_id, this->size());
2959 
2960  if (this->rank() == root_id)
2961  recv.resize(this->size());
2962 
2963  if (this->size() > 1)
2964  {
2965  LOG_SCOPE("gather()", "Parallel");
2966 
2967  StandardType<T> send_type(&sendval);
2968 
2969  libmesh_call_mpi
2970  (MPI_Gather(const_cast<T*>(&sendval), 1, send_type,
2971  recv.empty() ? libmesh_nullptr : &recv[0], 1, send_type,
2972  root_id, this->get()));
2973  }
2974  else
2975  recv[0] = sendval;
2976 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
unsigned int rank() const
Definition: parallel.h:677
template<typename T >
void libMesh::Parallel::Communicator::gather ( const unsigned int  root_id,
const std::basic_string< T > &  send,
std::vector< std::basic_string< T > > &  recv,
const bool  identical_buffer_sizes = false 
) const
inline

Gather overload for string types

Definition at line 2981 of file parallel_implementation.h.

References libMesh::Parallel::gather(), and libmesh_nullptr.

2985 {
2986  libmesh_assert_less (root_id, this->size());
2987 
2988  if (this->rank() == root_id)
2989  recv.resize(this->size());
2990 
2991  if (this->size() > 1)
2992  {
2993  LOG_SCOPE ("gather()","Parallel");
2994 
2995  std::vector<int>
2996  sendlengths (this->size(), 0),
2997  displacements(this->size(), 0);
2998 
2999  const int mysize = static_cast<int>(sendval.size());
3000 
3001  if (identical_buffer_sizes)
3002  sendlengths.assign(this->size(), mysize);
3003  else
3004  // first comm step to determine buffer sizes from all processors
3005  this->gather(root_id, mysize, sendlengths);
3006 
3007  // Find the total size of the final array and
3008  // set up the displacement offsets for each processor
3009  unsigned int globalsize = 0;
3010  for (unsigned int i=0; i < this->size(); ++i)
3011  {
3012  displacements[i] = globalsize;
3013  globalsize += sendlengths[i];
3014  }
3015 
3016  // monolithic receive buffer
3017  std::string r;
3018  if (this->rank() == root_id)
3019  r.resize(globalsize, 0);
3020 
3021  // and get the data from the remote processors.
3022  libmesh_call_mpi
3023  (MPI_Gatherv (const_cast<T*>(&sendval[0]),
3024  mysize, StandardType<T>(),
3025  this->rank() == root_id ? &r[0] : libmesh_nullptr,
3026  &sendlengths[0], &displacements[0],
3027  StandardType<T>(), root_id, this->get()));
3028 
3029  // slice receive buffer up
3030  if (this->rank() == root_id)
3031  for (unsigned int i=0; i != this->size(); ++i)
3032  recv[i] = r.substr(displacements[i], sendlengths[i]);
3033  }
3034  else
3035  recv[0] = sendval;
3036 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
unsigned int rank() const
Definition: parallel.h:677
template<typename T >
void libMesh::Parallel::Communicator::gather ( const unsigned int  root_id,
std::vector< T > &  r 
) const
inline

Take a vector of local variables and expand it on processor root_id to include values from all processors

This handles the case where the lengths of the vectors may vary. Specifically, this function transforms this:

* Processor 0: [ ... N_0 ]
* Processor 1: [ ....... N_1 ]
* ...
* Processor M: [ .. N_M]
* 

into this:

* [ [ ... N_0 ] [ ....... N_1 ] ... [ .. N_M] ]
* 

on processor root_id. This function is collective and therefore must be called by all processors in the Communicator.

Definition at line 3041 of file parallel_implementation.h.

References libMesh::Parallel::allgather(), libMesh::libmesh_assert(), and libmesh_nullptr.

3043 {
3044  if (this->size() == 1)
3045  {
3046  libmesh_assert (!this->rank());
3047  libmesh_assert (!root_id);
3048  return;
3049  }
3050 
3051  libmesh_assert_less (root_id, this->size());
3052 
3053  std::vector<int>
3054  sendlengths (this->size(), 0),
3055  displacements(this->size(), 0);
3056 
3057  const int mysize = static_cast<int>(r.size());
3058  this->allgather(mysize, sendlengths);
3059 
3060  LOG_SCOPE("gather()", "Parallel");
3061 
3062  // Find the total size of the final array and
3063  // set up the displacement offsets for each processor.
3064  unsigned int globalsize = 0;
3065  for (unsigned int i=0; i != this->size(); ++i)
3066  {
3067  displacements[i] = globalsize;
3068  globalsize += sendlengths[i];
3069  }
3070 
3071  // Check for quick return
3072  if (globalsize == 0)
3073  return;
3074 
3075  // copy the input buffer
3076  std::vector<T> r_src(r);
3077 
3078  // now resize it to hold the global data
3079  // on the receiving processor
3080  if (root_id == this->rank())
3081  r.resize(globalsize);
3082 
3083  // and get the data from the remote processors
3084  libmesh_call_mpi
3085  (MPI_Gatherv (r_src.empty() ? libmesh_nullptr : &r_src[0], mysize,
3086  StandardType<T>(), r.empty() ? libmesh_nullptr : &r[0],
3087  &sendlengths[0], &displacements[0],
3088  StandardType<T>(), root_id, this->get()));
3089 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
void allgather(const T &send, std::vector< T > &recv) const
template<typename Context , typename Iter , typename OutputIter >
void libMesh::Parallel::Communicator::gather_packed_range ( const unsigned int  root_id,
Context *  context,
Iter  range_begin,
const Iter  range_end,
OutputIter  out 
) const
inline

Take a range of local variables, combine it with ranges from all processors, and write the output to the output iterator on rank root.

Definition at line 4042 of file parallel_implementation.h.

References libMesh::Parallel::gather(), libmesh_nullptr, libMesh::Parallel::max(), libMesh::Parallel::pack_range(), and libMesh::Parallel::unpack_range().

Referenced by libMesh::MeshCommunication::gather().

4047 {
4048  typedef typename std::iterator_traits<Iter>::value_type T;
4049  typedef typename Parallel::Packing<T>::buffer_type buffer_t;
4050 
4051  bool nonempty_range = (range_begin != range_end);
4052  this->max(nonempty_range);
4053 
4054  while (nonempty_range)
4055  {
4056  // We will serialize variable size objects from *range_begin to
4057  // *range_end as a sequence of ints in this buffer
4058  std::vector<buffer_t> buffer;
4059 
4060  range_begin = Parallel::pack_range
4061  (context, range_begin, range_end, buffer);
4062 
4063  this->gather(root_id, buffer);
4064 
4066  (buffer, context, out_iter, (T*)(libmesh_nullptr));
4067 
4068  nonempty_range = (range_begin != range_end);
4069  this->max(nonempty_range);
4070  }
4071 }
Iter pack_range(const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
const class libmesh_nullptr_t libmesh_nullptr
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
void unpack_range(const typename std::vector< buffertype > &buffer, Context *context, OutputIter out, const T *output_type)
const communicator& libMesh::Parallel::Communicator::get ( ) const
inline

Definition at line 648 of file parallel.h.

References operator=().

648 { return _communicator; }
MessageTag libMesh::Parallel::Communicator::get_unique_tag ( int  tagvalue) const
inline

Get a tag that is unique to this Communicator. Note that if people are also using magic numbers or copying communicators around then we can't guarantee the tag is unique to this MPI_Comm.

Definition at line 1298 of file parallel_implementation.h.

References libMesh::libmesh_assert(), and libMesh::Parallel::MessageTag::MessageTag().

Referenced by libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::MeshCommunication::gather_neighboring_elements(), libMesh::Nemesis_IO::read(), libMesh::MeshCommunication::redistribute(), libMesh::MeshCommunication::send_coarse_ghosts(), and libMesh::XdrIO::write_serialized_nodes().

1299 {
1300  if (used_tag_values.count(tagvalue))
1301  {
1302  // Get the largest value in the used values, and pick one
1303  // larger
1304  tagvalue = used_tag_values.rbegin()->first+1;
1305  libmesh_assert(!used_tag_values.count(tagvalue));
1306  }
1307  used_tag_values[tagvalue] = 1;
1308 
1309  // #ifndef NDEBUG
1310  // // Make sure everyone called get_unique_tag and make sure
1311  // // everyone got the same value
1312  // int maxval = tagvalue;
1313  // this->max(maxval);
1314  // libmesh_assert_equal_to (tagvalue, maxval);
1315  // #endif
1316 
1317  return MessageTag(tagvalue, this);
1318 }
libmesh_assert(j)
std::map< int, unsigned int > used_tag_values
Definition: parallel.h:705
template<typename T >
void libMesh::Parallel::Communicator::max ( T &  r) const
inline

Take a local variable and replace it with the maximum of it's values on all processors. Containers are replaced element-wise.

Definition at line 1723 of file parallel_implementation.h.

Referenced by libMesh::MeshRefinement::_coarsen_elements(), libMesh::MeshRefinement::_refine_elements(), libMesh::MeshRefinement::_smooth_flags(), libMesh::UnstructuredMesh::all_second_order(), libMesh::MeshTools::Modification::all_tri(), libMesh::DofMap::attach_matrix(), libMesh::Parallel::Sort< KeyType, IdxType >::binsort(), libMesh::MeshTools::bounding_box(), broadcast_packed_range(), libMesh::MeshTools::Generation::build_extrusion(), libMesh::System::calculate_norm(), libMesh::DofMap::check_dirichlet_bcid_consistency(), libMesh::EpetraVector< T >::close(), libMesh::MeshRefinement::eliminate_unrefined_patches(), libMesh::MeshRefinement::flag_elements_by_error_fraction(), libMesh::MeshRefinement::flag_elements_by_nelem_target(), libMesh::DofMap::get_info(), libMesh::LocationMap< T >::init(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_topology_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_valid_boundary_ids(), libMesh::MeshTools::libmesh_assert_valid_dof_ids(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::MeshTools::libmesh_assert_valid_refinement_flags(), libMesh::MeshTools::libmesh_assert_valid_unique_ids(), libMesh::MeshRefinement::limit_level_mismatch_at_edge(), libMesh::MeshRefinement::limit_level_mismatch_at_node(), libMesh::MeshRefinement::limit_overrefined_boundary(), libMesh::MeshRefinement::limit_underrefined_boundary(), libMesh::DistributedVector< T >::max(), max(), libMesh::MeshTools::n_active_levels(), libMesh::MeshTools::n_levels(), libMesh::MeshTools::n_p_levels(), libMesh::DistributedMesh::parallel_max_elem_id(), libMesh::DistributedMesh::parallel_max_node_id(), libMesh::ReplicatedMesh::parallel_max_unique_id(), libMesh::DistributedMesh::parallel_max_unique_id(), libMesh::MeshTools::paranoid_n_levels(), libMesh::MeshTools::processor_bounding_box(), libMesh::Nemesis_IO::read(), libMesh::MeshBase::recalculate_n_partitions(), libMesh::MeshTools::subdomain_bounding_box(), libMesh::Parallel::sync_dofobject_data_by_xyz(), libMesh::MeshRefinement::test_level_one(), and libMesh::MeshRefinement::test_unflagged().

1724 {
1725  if (this->size() > 1)
1726  {
1727  LOG_SCOPE("max(scalar)", "Parallel");
1728 
1729  T temp;
1730  libmesh_call_mpi
1731  (MPI_Allreduce (&r, &temp, 1, StandardType<T>(&r), MPI_MAX,
1732  this->get()));
1733  r = temp;
1734  }
1735 }
unsigned int size() const
Definition: parallel.h:679
template<typename T >
void libMesh::Parallel::Communicator::max ( std::vector< T > &  r) const
inline

Definition at line 1756 of file parallel_implementation.h.

References libMesh::libmesh_assert(), max(), and libMesh::Parallel::verify().

1757 {
1758  if (this->size() > 1 && !r.empty())
1759  {
1760  LOG_SCOPE("max(vector)", "Parallel");
1761 
1762  libmesh_assert(this->verify(r.size()));
1763 
1764  std::vector<T> temp(r);
1765  libmesh_call_mpi
1766  (MPI_Allreduce (&temp[0], &r[0], cast_int<int>(r.size()),
1767  StandardType<T>(&temp[0]), MPI_MAX,
1768  this->get()));
1769  }
1770 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
template<typename T >
void libMesh::Parallel::Communicator::maxloc ( T &  r,
unsigned int &  max_id 
) const
inline

Take a local variable and replace it with the maximum of it's values on all processors, returning the minimum rank of a processor which originally held the maximum value.

Definition at line 1795 of file parallel_implementation.h.

References libMesh::Parallel::dataplusint_type< int >(), libMesh::Parallel::DataPlusInt< T >::rank, and libMesh::Parallel::DataPlusInt< T >::val.

Referenced by broadcast_packed_range(), and maxloc().

1797 {
1798  if (this->size() > 1)
1799  {
1800  LOG_SCOPE("maxloc(scalar)", "Parallel");
1801 
1802  DataPlusInt<T> data_in;
1803  data_in.val = r;
1804  data_in.rank = this->rank();
1805  DataPlusInt<T> data_out;
1806  libmesh_call_mpi
1807  (MPI_Allreduce (&data_in, &data_out, 1,
1808  dataplusint_type<T>(),
1809  MPI_MAXLOC, this->get()));
1810  r = data_out.val;
1811  max_id = data_out.rank;
1812  }
1813  else
1814  max_id = this->rank();
1815 }
unsigned int size() const
Definition: parallel.h:679
unsigned int rank() const
Definition: parallel.h:677
template<typename T >
void libMesh::Parallel::Communicator::maxloc ( std::vector< T > &  r,
std::vector< unsigned int > &  max_id 
) const
inline

Take a vector of local variables and replace each entry with the maximum of it's values on all processors. Set each min_id entry to the minimum rank where a corresponding maximum was found.

Definition at line 1842 of file parallel_implementation.h.

References libMesh::libmesh_assert(), maxloc(), and libMesh::Parallel::verify().

1844 {
1845  if (this->size() > 1 && !r.empty())
1846  {
1847  LOG_SCOPE("maxloc(vector)", "Parallel");
1848 
1849  libmesh_assert(this->verify(r.size()));
1850 
1851  std::vector<DataPlusInt<T> > data_in(r.size());
1852  for (std::size_t i=0; i != r.size(); ++i)
1853  {
1854  data_in[i].val = r[i];
1855  data_in[i].rank = this->rank();
1856  }
1857  std::vector<DataPlusInt<T> > data_out(r.size());
1858  libmesh_call_mpi
1859  (MPI_Allreduce (&data_in[0], &data_out[0],
1860  cast_int<int>(r.size()),
1861  dataplusint_type<T>(), MPI_MAXLOC,
1862  this->get()));
1863  for (std::size_t i=0; i != r.size(); ++i)
1864  {
1865  r[i] = data_out[i].val;
1866  max_id[i] = data_out[i].rank;
1867  }
1868  }
1869  else if (!r.empty())
1870  {
1871  for (std::size_t i=0; i != r.size(); ++i)
1872  max_id[i] = this->rank();
1873  }
1874 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
template<typename T >
void libMesh::Parallel::Communicator::min ( T &  r) const
inline

Take a local variable and replace it with the minimum of it's values on all processors. Containers are replaced element-wise.

Definition at line 1534 of file parallel_implementation.h.

Referenced by libMesh::MeshRefinement::_smooth_flags(), libMesh::MeshTools::bounding_box(), broadcast_packed_range(), libMesh::MeshRefinement::create_parent_error_vector(), libMesh::MeshRefinement::flag_elements_by_error_fraction(), libMesh::LocationMap< T >::init(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_object_ids(), libMesh::MeshTools::libmesh_assert_valid_refinement_flags(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshRefinement::make_flags_parallel_consistent(), libMesh::MeshRefinement::make_refinement_compatible(), libMesh::DistributedVector< T >::min(), min(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::MeshTools::processor_bounding_box(), libMesh::Partitioner::set_parent_processor_ids(), and libMesh::MeshTools::subdomain_bounding_box().

1535 {
1536  if (this->size() > 1)
1537  {
1538  LOG_SCOPE("min(scalar)", "Parallel");
1539 
1540  T temp = r;
1541  libmesh_call_mpi
1542  (MPI_Allreduce (&temp, &r, 1, StandardType<T>(&temp), MPI_MIN,
1543  this->get()));
1544  }
1545 }
unsigned int size() const
Definition: parallel.h:679
template<typename T >
void libMesh::Parallel::Communicator::min ( std::vector< T > &  r) const
inline

Definition at line 1567 of file parallel_implementation.h.

References libMesh::libmesh_assert(), min(), and libMesh::Parallel::verify().

1568 {
1569  if (this->size() > 1 && !r.empty())
1570  {
1571  LOG_SCOPE("min(vector)", "Parallel");
1572 
1573  libmesh_assert(this->verify(r.size()));
1574 
1575  std::vector<T> temp(r);
1576  libmesh_call_mpi
1577  (MPI_Allreduce (&temp[0], &r[0], cast_int<int>(r.size()),
1578  StandardType<T>(&temp[0]), MPI_MIN,
1579  this->get()));
1580  }
1581 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
template<typename T >
void libMesh::Parallel::Communicator::minloc ( T &  r,
unsigned int &  min_id 
) const
inline

Take a local variable and replace it with the minimum of it's values on all processors, returning the minimum rank of a processor which originally held the minimum value.

Definition at line 1606 of file parallel_implementation.h.

References libMesh::Parallel::dataplusint_type< int >(), libMesh::Parallel::DataPlusInt< T >::rank, and libMesh::Parallel::DataPlusInt< T >::val.

Referenced by broadcast_packed_range(), and minloc().

1608 {
1609  if (this->size() > 1)
1610  {
1611  LOG_SCOPE("minloc(scalar)", "Parallel");
1612 
1613  DataPlusInt<T> data_in;
1614  data_in.val = r;
1615  data_in.rank = this->rank();
1616  DataPlusInt<T> data_out;
1617  libmesh_call_mpi
1618  (MPI_Allreduce (&data_in, &data_out, 1, dataplusint_type<T>(),
1619  MPI_MINLOC, this->get()));
1620  r = data_out.val;
1621  min_id = data_out.rank;
1622  }
1623  else
1624  min_id = this->rank();
1625 }
unsigned int size() const
Definition: parallel.h:679
unsigned int rank() const
Definition: parallel.h:677
template<typename T >
void libMesh::Parallel::Communicator::minloc ( std::vector< T > &  r,
std::vector< unsigned int > &  min_id 
) const
inline

Take a vector of local variables and replace each entry with the minimum of it's values on all processors. Set each min_id entry to the minimum rank where a corresponding minimum was found.

Definition at line 1652 of file parallel_implementation.h.

References libMesh::libmesh_assert(), minloc(), and libMesh::Parallel::verify().

1654 {
1655  if (this->size() > 1 && !r.empty())
1656  {
1657  LOG_SCOPE("minloc(vector)", "Parallel");
1658 
1659  libmesh_assert(this->verify(r.size()));
1660 
1661  std::vector<DataPlusInt<T> > data_in(r.size());
1662  for (std::size_t i=0; i != r.size(); ++i)
1663  {
1664  data_in[i].val = r[i];
1665  data_in[i].rank = this->rank();
1666  }
1667  std::vector<DataPlusInt<T> > data_out(r.size());
1668  libmesh_call_mpi
1669  (MPI_Allreduce (&data_in[0], &data_out[0],
1670  cast_int<int>(r.size()),
1671  dataplusint_type<T>(), MPI_MINLOC,
1672  this->get()));
1673  for (std::size_t i=0; i != r.size(); ++i)
1674  {
1675  r[i] = data_out[i].val;
1676  min_id[i] = data_out[i].rank;
1677  }
1678  }
1679  else if (!r.empty())
1680  {
1681  for (std::size_t i=0; i != r.size(); ++i)
1682  min_id[i] = this->rank();
1683  }
1684 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
template<typename Context , typename OutputIter , typename T >
void libMesh::Parallel::Communicator::nonblocking_receive_packed_range ( const unsigned int  src_processor_id,
Context *  context,
OutputIter  out,
const T *  output_type,
Request req,
Status stat,
const MessageTag tag = any_tag 
) const
inline

Non-Blocking-receive range-of-pointers from one processor.

This is meant to receive messages from nonblocking_send_packed_range

Similar in design to the above receive_packed_range. However, this version requires a Request and a Status.

The Status must be a positively tested Status for a message of this type (i.e. a message does exist). It should most likely be generated by Communicator::packed_range_probe.

Definition at line 2733 of file parallel_implementation.h.

References libMesh::Parallel::Request::add_post_wait_work(), libMesh::Parallel::receive(), and libMesh::Parallel::Status::size().

2740 {
2741  libmesh_experimental();
2742 
2743  typedef typename Parallel::Packing<T>::buffer_type buffer_t;
2744 
2745  // Receive serialized variable size objects as a sequence of
2746  // buffer_t.
2747  // Allocate a buffer on the heap so we don't have to free it until
2748  // after the Request::wait()
2749  std::vector<buffer_t> * buffer = new std::vector<buffer_t>(stat.size());
2750  this->receive(src_processor_id, *buffer, req, tag);
2751 
2752  // Make the Request::wait() handle unpacking the buffer
2753  req.add_post_wait_work
2754  (new Parallel::PostWaitUnpackBuffer<std::vector<buffer_t>, Context, OutputIter, T>(*buffer, context, out));
2755 
2756  // Make the Request::wait() then handle deleting the buffer
2757  req.add_post_wait_work
2758  (new Parallel::PostWaitDeleteBuffer<std::vector<buffer_t> >(buffer));
2759 }
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
OStreamProxy out(std::cout)
template<typename Context , typename Iter >
void libMesh::Parallel::Communicator::nonblocking_send_packed_range ( const unsigned int  dest_processor_id,
const Context *  context,
Iter  range_begin,
const Iter  range_end,
Request req,
const MessageTag tag = no_tag 
) const
inline

Similar to the above Nonblocking send_packed_range with a few important differences:

  1. The total size of the packed buffer MUST be less than std::numeric_limits<int>::max()
  2. Only one message is generated
  3. On the receiving end the message should be tested for using Communicator::packed_range_probe()
  4. The message must be received by Communicator::nonblocking_receive_packed_range()

Definition at line 2402 of file parallel_implementation.h.

References libMesh::Parallel::Request::add_post_wait_work(), libMesh::Parallel::max(), libMesh::Parallel::pack_range(), and libMesh::Parallel::send().

2408 {
2409  libmesh_experimental();
2410 
2411  // Allocate a buffer on the heap so we don't have to free it until
2412  // after the Request::wait()
2413  typedef typename std::iterator_traits<Iter>::value_type T;
2414  typedef typename Parallel::Packing<T>::buffer_type buffer_t;
2415 
2416  if (range_begin != range_end)
2417  {
2418  std::vector<buffer_t> * buffer = new std::vector<buffer_t>();
2419 
2420  range_begin =
2421  Parallel::pack_range(context,
2422  range_begin,
2423  range_end,
2424  *buffer,
2425  // MPI-2 can only use integers for size
2427 
2428  if (range_begin != range_end)
2429  libmesh_error_msg("Non-blocking packed range sends cannot exceed " << std::numeric_limits<int>::max() << "in size");
2430 
2431  // Make the Request::wait() handle deleting the buffer
2432  req.add_post_wait_work
2433  (new Parallel::PostWaitDeleteBuffer<std::vector<buffer_t> >
2434  (buffer));
2435 
2436  // Non-blocking send of the buffer
2437  this->send(dest_processor_id, *buffer, req, tag);
2438  }
2439 }
Iter pack_range(const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
void max(T &r, const Communicator &comm=Communicator_World)
Communicator & libMesh::Parallel::Communicator::operator= ( const communicator comm)
inline

Definition at line 650 of file parallel_implementation.h.

References assign(), and clear().

651 {
652  this->clear();
653  this->assign(comm);
654  return *this;
655 }
void assign(const communicator &comm)
template<typename T >
Status libMesh::Parallel::Communicator::packed_range_probe ( const unsigned int  src_processor_id,
const MessageTag tag,
bool &  flag 
) const
inline

Non-Blocking message probe for a packed range message. Allows information about a message to be examined before the message is actually received.

Template type must match the object type that will be in the packed range

Parameters
src_processor_idThe processor the mssage is expected from or Parallel::any_source
tagThe message tag or Parallel::any_tag
flagOutput. True if a message exists. False otherwise.

Definition at line 2037 of file parallel_implementation.h.

References libMesh::Parallel::Status::get(), and libMesh::Parallel::MessageTag::value().

2040 {
2041  START_LOG("packed_range_probe()", "Parallel");
2042 
2043  libmesh_experimental();
2044 
2045  Status stat((StandardType<typename Packing<T>::buffer_type>()));
2046 
2047  int int_flag;
2048 
2049  libmesh_call_mpi(MPI_Iprobe(src_processor_id,
2050  tag.value(),
2051  this->get(),
2052  &int_flag,
2053  stat.get()));
2054 
2055  flag = int_flag;
2056 
2057  STOP_LOG("packed_range_probe()", "Parallel");
2058 
2059  return stat;
2060 }
status libMesh::Parallel::Communicator::probe ( const unsigned int  src_processor_id,
const MessageTag tag = any_tag 
) const
inline

Blocking message probe. Allows information about a message to be examined before the message is actually received.

We do not currently support probes on one processor without MPI.

Definition at line 2023 of file parallel_implementation.h.

References libMesh::Parallel::MessageTag::value().

Referenced by libMesh::MeshCommunication::gather_neighboring_elements(), and set_union().

2025 {
2026  LOG_SCOPE("probe()", "Parallel");
2027 
2028  status stat;
2029 
2030  libmesh_call_mpi
2031  (MPI_Probe (src_processor_id, tag.value(), this->get(), &stat));
2032 
2033  return stat;
2034 }
MPI_Status status
Definition: parallel.h:172
template<typename T >
Status libMesh::Parallel::Communicator::receive ( const unsigned int  dest_processor_id,
T &  buf,
const MessageTag tag = any_tag 
) const
inline

Blocking-receive from one processor with data-defined type.

We do not currently support receives on one processor without MPI.

Definition at line 2487 of file parallel_implementation.h.

References libMesh::Parallel::Status::get(), libMesh::Parallel::probe(), and libMesh::Parallel::MessageTag::value().

Referenced by libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::MeshCommunication::gather_neighboring_elements(), libMesh::SystemSubsetBySubdomain::init(), libMesh::SparseMatrix< T >::print(), libMesh::Nemesis_IO::read(), send(), libMesh::XdrIO::write_serialized_bcs_helper(), libMesh::XdrIO::write_serialized_connectivity(), libMesh::XdrIO::write_serialized_nodes(), libMesh::XdrIO::write_serialized_nodesets(), and libMesh::Parallel::StandardType< std::complex< T > >::~StandardType().

2490 {
2491  LOG_SCOPE("receive()", "Parallel");
2492 
2493  // Get the status of the message, explicitly provide the
2494  // datatype so we can later query the size
2495  Status stat(this->probe(src_processor_id, tag), StandardType<T>(&buf));
2496 
2497  libmesh_call_mpi
2498  (MPI_Recv (&buf, 1, StandardType<T>(&buf), src_processor_id,
2499  tag.value(), this->get(), stat.get()));
2500 
2501  return stat;
2502 }
status probe(const unsigned int src_processor_id, const MessageTag &tag=any_tag) const
template<typename T >
void libMesh::Parallel::Communicator::receive ( const unsigned int  dest_processor_id,
T &  buf,
Request req,
const MessageTag tag = any_tag 
) const
inline

Nonblocking-receive from one processor with data-defined type.

Definition at line 2507 of file parallel_implementation.h.

References libMesh::Parallel::Request::get(), and libMesh::Parallel::MessageTag::value().

2511 {
2512  LOG_SCOPE("receive()", "Parallel");
2513 
2514  libmesh_call_mpi
2515  (MPI_Irecv (&buf, 1, StandardType<T>(&buf), src_processor_id,
2516  tag.value(), this->get(), req.get()));
2517 }
template<typename T >
Status libMesh::Parallel::Communicator::receive ( const unsigned int  dest_processor_id,
T &  buf,
const DataType type,
const MessageTag tag = any_tag 
) const
inline

Blocking-receive from one processor with user-defined type.

Definition at line 3848 of file parallel_implementation.h.

3852 { libmesh_not_implemented(); return Status(); }
template<typename T >
void libMesh::Parallel::Communicator::receive ( const unsigned int  dest_processor_id,
T &  buf,
const DataType type,
Request req,
const MessageTag tag = any_tag 
) const
inline

Nonblocking-receive from one processor with user-defined type.

Definition at line 3855 of file parallel_implementation.h.

References allgather(), alltoall(), broadcast(), end, gather(), libMesh::Parallel::pack_range(), receive_packed_range(), scatter(), send_receive(), send_receive_packed_range(), and libMesh::Parallel::unpack_range().

3860 { libmesh_not_implemented(); }
template<typename T >
Status libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::basic_string< T > &  buf,
const MessageTag tag 
) const
inline

Definition at line 2443 of file parallel_implementation.h.

References libMesh::Parallel::receive().

2446 {
2447  std::vector<T> tempbuf; // Officially C++ won't let us get a
2448  // modifiable array from a string
2449 
2450  Status stat = this->receive(src_processor_id, tempbuf, tag);
2451  buf.assign(tempbuf.begin(), tempbuf.end());
2452  return stat;
2453 }
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
void libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::basic_string< T > &  buf,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2458 of file parallel_implementation.h.

References libMesh::Parallel::Request::add_post_wait_work(), and libMesh::Parallel::receive().

2462 {
2463  // Officially C++ won't let us get a modifiable array from a
2464  // string, and we can't even put one on the stack for the
2465  // non-blocking case.
2466  std::vector<T> * tempbuf = new std::vector<T>();
2467 
2468  // We can clear the string, but the Request::wait() will need to
2469  // handle copying our temporary buffer to it
2470  buf.clear();
2471 
2472  req.add_post_wait_work
2473  (new Parallel::PostWaitCopyBuffer<std::vector<T>,
2474  std::back_insert_iterator<std::basic_string<T> > >
2475  (tempbuf, std::back_inserter(buf)));
2476 
2477  // Make the Request::wait() then handle deleting the buffer
2478  req.add_post_wait_work
2479  (new Parallel::PostWaitDeleteBuffer<std::vector<T> >(tempbuf));
2480 
2481  this->receive(src_processor_id, tempbuf, req, tag);
2482 }
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
Status libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::set< T > &  buf,
const MessageTag tag 
) const
inline

Definition at line 2522 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::receive().

2525 {
2526  return this->receive
2527  (src_processor_id, buf,
2528  StandardType<T>(buf.empty() ? libmesh_nullptr : &(*buf.begin())), tag);
2529 }
const class libmesh_nullptr_t libmesh_nullptr
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
void libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::set< T > &  buf,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2539 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::receive().

2543 {
2544  this->receive (src_processor_id, buf,
2545  StandardType<T>(buf.empty() ? libmesh_nullptr : &(*buf.begin())), req, tag);
2546 }
const class libmesh_nullptr_t libmesh_nullptr
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
Status libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::set< T > &  buf,
const DataType type,
const MessageTag tag 
) const
inline

Definition at line 2552 of file parallel_implementation.h.

References libMesh::Parallel::receive().

2556 {
2557  LOG_SCOPE("receive()", "Parallel");
2558 
2559  std::vector<T> vecbuf;
2560  Status stat = this->receive(src_processor_id, vecbuf, type, tag);
2561  buf.clear();
2562  buf.insert(vecbuf.begin(), vecbuf.end());
2563 
2564  return stat;
2565 }
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
void libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::set< T > &  buf,
const DataType type,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2575 of file parallel_implementation.h.

References libMesh::Parallel::Request::add_post_wait_work(), and libMesh::Parallel::receive().

2580 {
2581  LOG_SCOPE("receive()", "Parallel");
2582 
2583  // Allocate temporary buffer on the heap so it lives until after
2584  // the non-blocking send completes
2585  std::vector<T> * vecbuf = new std::vector<T>();
2586 
2587  // We can clear the set, but the Request::wait() will need to
2588  // handle copying our temporary buffer to it
2589  buf.clear();
2590 
2591  req.add_post_wait_work
2592  (new Parallel::PostWaitCopyBuffer<std::vector<T>,
2593  std::insert_iterator<std::set<T> > >
2594  (*vecbuf, std::inserter(buf,buf.end())));
2595 
2596  // Make the Request::wait() then handle deleting the buffer
2597  req.add_post_wait_work
2598  (new Parallel::PostWaitDeleteBuffer<std::vector<T> >(vecbuf));
2599 
2600  this->receive(src_processor_id, *vecbuf, type, req, tag);
2601 }
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
Status libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::vector< T > &  buf,
const MessageTag tag 
) const
inline

Definition at line 2607 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::receive().

2610 {
2611  return this->receive
2612  (src_processor_id, buf,
2613  StandardType<T>(buf.empty() ? libmesh_nullptr : &(*buf.begin())), tag);
2614 }
const class libmesh_nullptr_t libmesh_nullptr
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
void libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::vector< T > &  buf,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2619 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::receive().

2623 {
2624  this->receive (src_processor_id, buf,
2625  StandardType<T>(buf.empty() ? libmesh_nullptr : &(*buf.begin())), req, tag);
2626 }
const class libmesh_nullptr_t libmesh_nullptr
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
template<typename T >
Status libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::vector< T > &  buf,
const DataType type,
const MessageTag tag 
) const
inline

Definition at line 2631 of file parallel_implementation.h.

References libMesh::Parallel::Status::get(), libmesh_nullptr, libMesh::Parallel::probe(), libMesh::Parallel::Status::size(), libMesh::Parallel::Status::source(), and libMesh::Parallel::Status::tag().

2635 {
2636  LOG_SCOPE("receive()", "Parallel");
2637 
2638  // Get the status of the message, explicitly provide the
2639  // datatype so we can later query the size
2640  Status stat(this->probe(src_processor_id, tag), type);
2641 
2642  buf.resize(stat.size());
2643 
2644  // Use stat.source() and stat.tag() in the receive - if
2645  // src_processor_id is or tag is "any" then we want to be sure we
2646  // try to receive the same message we just probed.
2647  libmesh_call_mpi
2648  (MPI_Recv (buf.empty() ? libmesh_nullptr : &buf[0],
2649  cast_int<int>(buf.size()), type, stat.source(),
2650  stat.tag(), this->get(), stat.get()));
2651 
2652  libmesh_assert_equal_to (stat.size(), buf.size());
2653 
2654  return stat;
2655 }
const class libmesh_nullptr_t libmesh_nullptr
status probe(const unsigned int src_processor_id, const MessageTag &tag=any_tag) const
template<typename T >
void libMesh::Parallel::Communicator::receive ( const unsigned int  src_processor_id,
std::vector< T > &  buf,
const DataType type,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2660 of file parallel_implementation.h.

References libMesh::Parallel::Request::get(), libmesh_nullptr, and libMesh::Parallel::MessageTag::value().

2665 {
2666  LOG_SCOPE("receive()", "Parallel");
2667 
2668  libmesh_call_mpi
2669  (MPI_Irecv (buf.empty() ? libmesh_nullptr : &buf[0],
2670  cast_int<int>(buf.size()), type, src_processor_id,
2671  tag.value(), this->get(), req.get()));
2672 }
const class libmesh_nullptr_t libmesh_nullptr
template<typename Context , typename OutputIter , typename T >
void libMesh::Parallel::Communicator::receive_packed_range ( const unsigned int  dest_processor_id,
Context *  context,
OutputIter  out,
const T *  output_type,
const MessageTag tag = any_tag 
) const
inline

Blocking-receive range-of-pointers from one processor. This function does not receive raw pointers, but rather constructs new objects whose contents match the objects pointed to by the sender.

The objects will be of type T = iterator_traits<OutputIter>::value_type.

Using std::back_inserter as the output iterator allows receive to fill any container type. Using libMesh::null_output_iterator allows the receive to be dealt with solely by Parallel::unpack(), for objects whose unpack() is written so as to not leak memory when used in this fashion.

A future version of this method should be created to preallocate memory when receiving vectors...

void Parallel::unpack(vector<int>::iterator in, T ** out, Context *) is used to unserialize type T, typically into a new heap-allocated object whose pointer is returned as *out.

unsigned int Parallel::packed_size(const T *, vector<int>::const_iterator) is used to advance to the beginning of the next object's data.

Definition at line 2676 of file parallel_implementation.h.

References libMesh::Parallel::MessageTag::MessageTag(), libMesh::Parallel::receive(), libMesh::Parallel::Status::source(), libMesh::Parallel::Status::tag(), and libMesh::Parallel::unpack_range().

Referenced by libMesh::MeshCommunication::gather_neighboring_elements(), receive(), libMesh::MeshCommunication::redistribute(), and libMesh::MeshCommunication::send_coarse_ghosts().

2681 {
2682  typedef typename Parallel::Packing<T>::buffer_type buffer_t;
2683 
2684  // Receive serialized variable size objects as sequences of buffer_t
2685  std::size_t total_buffer_size = 0;
2686  Status stat = this->receive(src_processor_id, total_buffer_size, tag);
2687 
2688  // Use stat.source() and stat.tag() in subsequent receives - if
2689  // src_processor_id is or tag is "any" then we want to be sure we
2690  // try to receive messages all corresponding to the same send.
2691 
2692  std::size_t received_buffer_size = 0;
2693  while (received_buffer_size < total_buffer_size)
2694  {
2695  std::vector<buffer_t> buffer;
2696  this->receive(stat.source(), buffer, MessageTag(stat.tag()));
2697  received_buffer_size += buffer.size();
2699  (buffer, context, out_iter, output_type);
2700  }
2701 }
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
void unpack_range(const typename std::vector< buffertype > &buffer, Context *context, OutputIter out, const T *output_type)
void libMesh::Parallel::Communicator::reference_unique_tag ( int  tagvalue) const
inline

Reference an already-acquired tag, so that we know it will be dereferenced multiple times before we can re-release it.

Definition at line 1322 of file parallel_implementation.h.

References libMesh::libmesh_assert().

Referenced by libMesh::Parallel::MessageTag::MessageTag().

1323 {
1324  // This had better be an already-acquired tag.
1325  libmesh_assert(used_tag_values.count(tagvalue));
1326 
1327  used_tag_values[tagvalue]++;
1328 }
libmesh_assert(j)
std::map< int, unsigned int > used_tag_values
Definition: parallel.h:705
template<typename T >
void libMesh::Parallel::Communicator::scatter ( const std::vector< T > &  data,
T &  recv,
const unsigned int  root_id = 0 
) const
inline

Take a vector of local variables and scatter the ith item to the ith processor in the communicator. The result is saved into recv.

Definition at line 3241 of file parallel_implementation.h.

References libMesh::libmesh_assert(), and libmesh_nullptr.

Referenced by receive().

3244 {
3245  libmesh_assert_less (root_id, this->size());
3246 
3247  // Do not allow the root_id to scatter a NULL vector.
3248  // That would leave recv in an indeterminate state.
3249  libmesh_assert (this->rank() != root_id || this->size() == data.size());
3250 
3251  if (this->size() == 1)
3252  {
3253  libmesh_assert (!this->rank());
3254  libmesh_assert (!root_id);
3255  recv = data[0];
3256  return;
3257  }
3258 
3259  LOG_SCOPE("scatter()", "Parallel");
3260 
3261  T * data_ptr = const_cast<T*>(data.empty() ? libmesh_nullptr : &data[0]);
3262 
3263  libmesh_call_mpi
3264  (MPI_Scatter (data_ptr, 1, StandardType<T>(data_ptr),
3265  &recv, 1, StandardType<T>(&recv), root_id, this->get()));
3266 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::scatter ( const std::vector< T > &  data,
std::vector< T > &  recv,
const unsigned int  root_id = 0 
) const
inline

Take a vector of local variables and scatter the ith equal-sized chunk to the ith processor in the communicator. The data size must be a multiple of the communicator size. The result is saved into recv buffer. The recv buffer does not have to be sized prior to this operation.

Definition at line 3271 of file parallel_implementation.h.

References libMesh::Parallel::broadcast(), libMesh::libmesh_assert(), and libmesh_nullptr.

3274 {
3275  libmesh_assert_less (root_id, this->size());
3276 
3277  if (this->size() == 1)
3278  {
3279  libmesh_assert (!this->rank());
3280  libmesh_assert (!root_id);
3281  recv.assign(data.begin(), data.end());
3282  return;
3283  }
3284 
3285  LOG_SCOPE("scatter()", "Parallel");
3286 
3287  int recv_buffer_size;
3288  if (this->rank() == root_id)
3289  {
3290  libmesh_assert(data.size() % this->size() == 0);
3291  recv_buffer_size = data.size() / this->size();
3292  }
3293 
3294  this->broadcast(recv_buffer_size);
3295  recv.resize(recv_buffer_size);
3296 
3297  T * data_ptr = const_cast<T*>(data.empty() ? libmesh_nullptr : &data[0]);
3298  T * recv_ptr = recv.empty() ? libmesh_nullptr : &recv[0];
3299 
3300  libmesh_call_mpi
3301  (MPI_Scatter (data_ptr, recv_buffer_size, StandardType<T>(data_ptr),
3302  recv_ptr, recv_buffer_size, StandardType<T>(recv_ptr), root_id, this->get()));
3303 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
void broadcast(T &data, const unsigned int root_id=0) const
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::scatter ( const std::vector< T > &  data,
const std::vector< int >  counts,
std::vector< T > &  recv,
const unsigned int  root_id = 0 
) const
inline

Take a vector of local variables and scatter the ith variable-sized chunk to the ith processor in the communicator. The counts vector should contain the number of items for each processor. The result is saved into recv buffer. The recv buffer does not have to be sized prior to this operation.

Definition at line 3308 of file parallel_implementation.h.

References libMesh::libmesh_assert(), and libmesh_nullptr.

3312 {
3313  libmesh_assert_less (root_id, this->size());
3314 
3315  if (this->size() == 1)
3316  {
3317  libmesh_assert (!this->rank());
3318  libmesh_assert (!root_id);
3319  libmesh_assert (counts.size() == this->size());
3320  recv.assign(data.begin(), data.begin() + counts[0]);
3321  return;
3322  }
3323 
3324  std::vector<int> displacements(this->size(), 0);
3325  if (root_id == this->rank())
3326  {
3327  libmesh_assert(counts.size() == this->size());
3328 
3329  // Create a displacements vector from the incoming counts vector
3330  unsigned int globalsize = 0;
3331  for (unsigned int i=0; i < this->size(); ++i)
3332  {
3333  displacements[i] = globalsize;
3334  globalsize += counts[i];
3335  }
3336 
3337  libmesh_assert(data.size() == globalsize);
3338  }
3339 
3340  LOG_SCOPE("scatter()", "Parallel");
3341 
3342  // Scatter the buffer sizes to size remote buffers
3343  int recv_buffer_size;
3344  this->scatter(counts, recv_buffer_size, root_id);
3345  recv.resize(recv_buffer_size);
3346 
3347  T * data_ptr = const_cast<T*>(data.empty() ? libmesh_nullptr : &data[0]);
3348  int * count_ptr = const_cast<int*>(counts.empty() ? libmesh_nullptr : &counts[0]);
3349  T * recv_ptr = recv.empty() ? libmesh_nullptr : &recv[0];
3350 
3351  // Scatter the non-uniform chunks
3352  libmesh_call_mpi
3353  (MPI_Scatterv (data_ptr, count_ptr, &displacements[0], StandardType<T>(data_ptr),
3354  recv_ptr, recv_buffer_size, StandardType<T>(recv_ptr), root_id, this->get()));
3355 }
void scatter(const std::vector< T > &data, T &recv, const unsigned int root_id=0) const
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::scatter ( const std::vector< std::vector< T > > &  data,
std::vector< T > &  recv,
const unsigned int  root_id = 0,
const bool  identical_buffer_sizes = false 
) const
inline

Take a vector of vectors and scatter the ith inner vector to the ith processor in the communicator. The result is saved into recv buffer. The recv buffer does not have to be sized prior to this operation.

Definition at line 3360 of file parallel_implementation.h.

References end, and libMesh::libmesh_assert().

3364 {
3365  libmesh_assert_less (root_id, this->size());
3366 
3367  if (this->size() == 1)
3368  {
3369  libmesh_assert (!this->rank());
3370  libmesh_assert (!root_id);
3371  libmesh_assert (data.size() == this->size());
3372  recv.assign(data[0].begin(), data[0].end());
3373  return;
3374  }
3375 
3376  std::vector<T> stacked_data;
3377  std::vector<int> counts;
3378 
3379  if (root_id == this->rank())
3380  {
3381  libmesh_assert (data.size() == this->size());
3382 
3383  if (!identical_buffer_sizes)
3384  counts.resize(this->size());
3385 
3386  for (std::size_t i=0; i < data.size(); ++i)
3387  {
3388  if (!identical_buffer_sizes)
3389  counts[i] = data[i].size();
3390 #ifndef NDEBUG
3391  else
3392  // Check that buffer sizes are indeed equal
3393  libmesh_assert(!i || data[i-1].size() == data[i].size());
3394 #endif
3395  std::copy(data[i].begin(), data[i].end(), std::back_inserter(stacked_data));
3396  }
3397  }
3398 
3399  if (identical_buffer_sizes)
3400  this->scatter(stacked_data, recv, root_id);
3401  else
3402  this->scatter(stacked_data, counts, recv, root_id);
3403 }
void scatter(const std::vector< T > &data, T &recv, const unsigned int root_id=0) const
unsigned int size() const
Definition: parallel.h:679
IterBase * end
libmesh_assert(j)
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
bool libMesh::Parallel::Communicator::semiverify ( const T *  r) const
inline

Verify that a local pointer points to the same value on all processors where it is not NULL. Containers must have the same value in every entry.

Definition at line 1398 of file parallel_implementation.h.

References libMesh::Parallel::max(), libMesh::Parallel::min(), libMesh::Parallel::Attributes< T >::set_highest(), and libMesh::Parallel::Attributes< T >::set_lowest().

Referenced by broadcast_packed_range(), libMesh::MeshTools::libmesh_assert_valid_boundary_ids(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::MeshTools::libmesh_assert_valid_unique_ids(), and semiverify().

1399 {
1400  if (this->size() > 1 && Attributes<T>::has_min_max == true)
1401  {
1402  T tempmin, tempmax;
1403  if (r)
1404  tempmin = tempmax = *r;
1405  else
1406  {
1407  Attributes<T>::set_highest(tempmin);
1408  Attributes<T>::set_lowest(tempmax);
1409  }
1410  this->min(tempmin);
1411  this->max(tempmax);
1412  bool invalid = r && ((*r != tempmin) ||
1413  (*r != tempmax));
1414  this->max(invalid);
1415  return !invalid;
1416  }
1417 
1418 #ifdef LIBMESH_HAVE_CXX11
1419  static_assert(Attributes<T>::has_min_max,
1420  "Tried to semiverify an unverifiable type");
1421 #endif
1422 
1423  return true;
1424 }
unsigned int size() const
Definition: parallel.h:679
static void set_lowest(T &)
Definition: parallel.h:404
static void set_highest(T &)
Definition: parallel.h:405
static const bool has_min_max
Definition: parallel.h:403
template<>
bool libMesh::Parallel::Communicator::semiverify ( const bool *  r) const
inline

Definition at line 1429 of file parallel_implementation.h.

References libmesh_nullptr.

1430 {
1431  if (r)
1432  {
1433  const unsigned char rnew = *r;
1434  return this->semiverify(&rnew);
1435  }
1436 
1437  const unsigned char * rptr = libmesh_nullptr;
1438  return this->semiverify(rptr);
1439 }
const class libmesh_nullptr_t libmesh_nullptr
template<typename T >
bool libMesh::Parallel::Communicator::semiverify ( const std::vector< T > *  r) const
inline

Definition at line 1444 of file parallel_implementation.h.

References libmesh_nullptr, libMesh::Parallel::max(), libMesh::Parallel::min(), semiverify(), libMesh::Parallel::set_highest(), libMesh::Parallel::set_lowest(), verify(), and libMesh::Parallel::verify().

1445 {
1446  if (this->size() > 1 && Attributes<T>::has_min_max == true)
1447  {
1448  std::size_t rsize = r ? r->size() : 0;
1449  std::size_t * psize = r ? &rsize : libmesh_nullptr;
1450 
1451  if (!this->semiverify(psize))
1452  return false;
1453 
1454  this->max(rsize);
1455 
1456  std::vector<T> tempmin, tempmax;
1457  if (r)
1458  {
1459  tempmin = tempmax = *r;
1460  }
1461  else
1462  {
1463  tempmin.resize(rsize);
1464  tempmax.resize(rsize);
1465  Attributes<std::vector<T> >::set_highest(tempmin);
1466  Attributes<std::vector<T> >::set_lowest(tempmax);
1467  }
1468  this->min(tempmin);
1469  this->max(tempmax);
1470  bool invalid = r && ((*r != tempmin) ||
1471  (*r != tempmax));
1472  this->max(invalid);
1473  return !invalid;
1474  }
1475 
1476 #ifdef LIBMESH_HAVE_CXX11
1477  static_assert(Attributes<T>::has_min_max,
1478  "Tried to semiverify a vector of an unverifiable type");
1479 #endif
1480 
1481  return true;
1482 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
static void set_lowest(unsigned long long &x)
static void set_highest(unsigned long long &x)
static const bool has_min_max
Definition: parallel.h:403
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const T &  buf,
const MessageTag tag = no_tag 
) const
inline

Blocking-send to one processor with data-defined type.

We do not currently support sends on one processor without MPI.

Definition at line 2110 of file parallel_implementation.h.

References libMesh::Parallel::MessageTag::value().

Referenced by libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::MeshCommunication::gather_neighboring_elements(), libMesh::SystemSubsetBySubdomain::init(), libMesh::SparseMatrix< T >::print(), libMesh::Nemesis_IO::read(), set_union(), libMesh::XdrIO::write_serialized_bcs_helper(), libMesh::XdrIO::write_serialized_connectivity(), libMesh::XdrIO::write_serialized_nodes(), libMesh::XdrIO::write_serialized_nodesets(), and libMesh::Parallel::StandardType< std::complex< T > >::~StandardType().

2113 {
2114  LOG_SCOPE("send()", "Parallel");
2115 
2116  T * dataptr = const_cast<T*> (&buf);
2117 
2118  libmesh_call_mpi
2119  (((this->send_mode() == SYNCHRONOUS) ?
2120  MPI_Ssend : MPI_Send) (dataptr,
2121  1,
2122  StandardType<T>(dataptr),
2123  dest_processor_id,
2124  tag.value(),
2125  this->get()));
2126 }
SendMode send_mode() const
Definition: parallel.h:719
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const T &  buf,
Request req,
const MessageTag tag = no_tag 
) const
inline

Nonblocking-send to one processor with data-defined type.

Definition at line 2131 of file parallel_implementation.h.

References libMesh::Parallel::Request::get(), and libMesh::Parallel::MessageTag::value().

2135 {
2136  LOG_SCOPE("send()", "Parallel");
2137 
2138  T * dataptr = const_cast<T*>(&buf);
2139 
2140  libmesh_call_mpi
2141  (((this->send_mode() == SYNCHRONOUS) ?
2142  MPI_Issend : MPI_Isend) (dataptr,
2143  1,
2144  StandardType<T>(dataptr),
2145  dest_processor_id,
2146  tag.value(),
2147  this->get(),
2148  req.get()));
2149 }
SendMode send_mode() const
Definition: parallel.h:719
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const T &  buf,
const DataType type,
const MessageTag tag = no_tag 
) const
inline

Blocking-send to one processor with user-defined type.

Definition at line 3800 of file parallel_implementation.h.

3804 { libmesh_not_implemented(); }
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const T &  buf,
const DataType type,
Request req,
const MessageTag tag = no_tag 
) const
inline

Nonblocking-send to one processor with user-defined type.

Definition at line 3807 of file parallel_implementation.h.

References receive(), and send_packed_range().

3812 { libmesh_not_implemented(); }
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::basic_string< T > &  buf,
const MessageTag tag 
) const
inline

Definition at line 2064 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::MessageTag::value().

2067 {
2068  LOG_SCOPE("send()", "Parallel");
2069 
2070  T * dataptr = buf.empty() ? libmesh_nullptr : const_cast<T *>(buf.data());
2071 
2072  libmesh_call_mpi
2073  (((this->send_mode() == SYNCHRONOUS) ?
2074  MPI_Ssend : MPI_Send) (dataptr,
2075  cast_int<int>(buf.size()),
2076  StandardType<T>(dataptr),
2077  dest_processor_id,
2078  tag.value(),
2079  this->get()));
2080 }
const class libmesh_nullptr_t libmesh_nullptr
SendMode send_mode() const
Definition: parallel.h:719
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::basic_string< T > &  buf,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2085 of file parallel_implementation.h.

References libMesh::Parallel::Request::get(), libmesh_nullptr, and libMesh::Parallel::MessageTag::value().

2089 {
2090  LOG_SCOPE("send()", "Parallel");
2091 
2092  T * dataptr = buf.empty() ? libmesh_nullptr : const_cast<T *>(buf.data());
2093 
2094  std::cerr<<"Sending: "<<buf.size()<<std::endl;
2095 
2096  libmesh_call_mpi
2097  (((this->send_mode() == SYNCHRONOUS) ?
2098  MPI_Issend : MPI_Isend) (dataptr,
2099  cast_int<int>(buf.size()),
2100  StandardType<T>(dataptr),
2101  dest_processor_id,
2102  tag.value(),
2103  this->get(),
2104  req.get()));
2105 }
const class libmesh_nullptr_t libmesh_nullptr
SendMode send_mode() const
Definition: parallel.h:719
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::set< T > &  buf,
const MessageTag tag 
) const
inline

Definition at line 2154 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::send().

2157 {
2158  this->send(dest_processor_id, buf,
2159  StandardType<T>(buf.empty() ? libmesh_nullptr : &(*buf.begin())), tag);
2160 }
const class libmesh_nullptr_t libmesh_nullptr
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::set< T > &  buf,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2165 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::send().

2169 {
2170  this->send(dest_processor_id, buf,
2171  StandardType<T>(buf.empty() ? libmesh_nullptr : &(*buf.begin())), req, tag);
2172 }
const class libmesh_nullptr_t libmesh_nullptr
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::set< T > &  buf,
const DataType type,
const MessageTag tag 
) const
inline

Definition at line 2177 of file parallel_implementation.h.

References libMesh::Parallel::send().

2181 {
2182  LOG_SCOPE("send()", "Parallel");
2183 
2184  std::vector<T> vecbuf(buf.begin(), buf.end());
2185  this->send(dest_processor_id, vecbuf, type, tag);
2186 }
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::set< T > &  buf,
const DataType type,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2191 of file parallel_implementation.h.

References libMesh::Parallel::Request::add_post_wait_work(), and libMesh::Parallel::send().

2196 {
2197  LOG_SCOPE("send()", "Parallel");
2198 
2199  // Allocate temporary buffer on the heap so it lives until after
2200  // the non-blocking send completes
2201  std::vector<T> * vecbuf =
2202  new std::vector<T>(buf.begin(), buf.end());
2203 
2204  // Make the Request::wait() handle deleting the buffer
2205  req.add_post_wait_work
2206  (new Parallel::PostWaitDeleteBuffer<std::vector<T> >(vecbuf));
2207 
2208  this->send(dest_processor_id, *vecbuf, type, req, tag);
2209 }
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::vector< T > &  buf,
const MessageTag tag 
) const
inline

Definition at line 2214 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::send().

2217 {
2218  this->send(dest_processor_id, buf,
2219  StandardType<T>(buf.empty() ? libmesh_nullptr : &buf.front()), tag);
2220 }
const class libmesh_nullptr_t libmesh_nullptr
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::vector< T > &  buf,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2225 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::send().

2229 {
2230  this->send(dest_processor_id, buf,
2231  StandardType<T>(buf.empty() ? libmesh_nullptr : &buf.front()), req, tag);
2232 }
const class libmesh_nullptr_t libmesh_nullptr
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::vector< T > &  buf,
const DataType type,
const MessageTag tag 
) const
inline

Definition at line 2237 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::MessageTag::value().

2241 {
2242  LOG_SCOPE("send()", "Parallel");
2243 
2244  libmesh_call_mpi
2245  (((this->send_mode() == SYNCHRONOUS) ?
2246  MPI_Ssend : MPI_Send) (buf.empty() ? libmesh_nullptr : const_cast<T*>(&buf[0]),
2247  cast_int<int>(buf.size()),
2248  type,
2249  dest_processor_id,
2250  tag.value(),
2251  this->get()));
2252 }
const class libmesh_nullptr_t libmesh_nullptr
SendMode send_mode() const
Definition: parallel.h:719
template<typename T >
void libMesh::Parallel::Communicator::send ( const unsigned int  dest_processor_id,
const std::vector< T > &  buf,
const DataType type,
Request req,
const MessageTag tag 
) const
inline

Definition at line 2257 of file parallel_implementation.h.

References libMesh::Parallel::Request::get(), libmesh_nullptr, and libMesh::Parallel::MessageTag::value().

2262 {
2263  LOG_SCOPE("send()", "Parallel");
2264 
2265  libmesh_call_mpi
2266  (((this->send_mode() == SYNCHRONOUS) ?
2267  MPI_Issend : MPI_Isend) (buf.empty() ? libmesh_nullptr : const_cast<T*>(&buf[0]),
2268  cast_int<int>(buf.size()),
2269  type,
2270  dest_processor_id,
2271  tag.value(),
2272  this->get(),
2273  req.get()));
2274 }
const class libmesh_nullptr_t libmesh_nullptr
SendMode send_mode() const
Definition: parallel.h:719
void libMesh::Parallel::Communicator::send_mode ( const SendMode  sm)
inline

Explicitly sets the SendMode type used for send operations.

Definition at line 714 of file parallel.h.

Referenced by duplicate(), and split().

714 { _send_mode = sm; }
template<typename Context , typename Iter >
void libMesh::Parallel::Communicator::send_packed_range ( const unsigned int  dest_processor_id,
const Context *  context,
Iter  range_begin,
const Iter  range_end,
const MessageTag tag = no_tag 
) const
inline

Blocking-send range-of-pointers to one processor. This function does not send the raw pointers, but rather constructs new objects at the other end whose contents match the objects pointed to by the sender.

void Parallel::pack(const T *, vector<int> & data, const Context *) is used to serialize type T onto the end of a data vector.

unsigned int Parallel::packable_size(const T *, const Context *) is used to allow data vectors to reserve memory, and for additional error checking

Definition at line 2278 of file parallel_implementation.h.

References libMesh::Parallel::pack_range(), libMesh::Parallel::packed_range_size(), and libMesh::Parallel::send().

Referenced by libMesh::MeshCommunication::gather_neighboring_elements(), libMesh::MeshCommunication::redistribute(), send(), and libMesh::MeshCommunication::send_coarse_ghosts().

2283 {
2284  // We will serialize variable size objects from *range_begin to
2285  // *range_end as a sequence of plain data (e.g. ints) in this buffer
2286  typedef typename std::iterator_traits<Iter>::value_type T;
2287 
2288  std::size_t total_buffer_size =
2289  Parallel::packed_range_size (context, range_begin, range_end);
2290 
2291  this->send(dest_processor_id, total_buffer_size, tag);
2292 
2293 #ifdef DEBUG
2294  std::size_t used_buffer_size = 0;
2295 #endif
2296 
2297  while (range_begin != range_end)
2298  {
2299  libmesh_assert_greater (std::distance(range_begin, range_end), 0);
2300 
2301  std::vector<typename Parallel::Packing<T>::buffer_type> buffer;
2302 
2303  const Iter next_range_begin = Parallel::pack_range
2304  (context, range_begin, range_end, buffer);
2305 
2306  libmesh_assert_greater (std::distance(range_begin, next_range_begin), 0);
2307 
2308  range_begin = next_range_begin;
2309 
2310 #ifdef DEBUG
2311  used_buffer_size += buffer.size();
2312 #endif
2313 
2314  // Blocking send of the buffer
2315  this->send(dest_processor_id, buffer, tag);
2316  }
2317 
2318 #ifdef DEBUG
2319  libmesh_assert_equal_to(used_buffer_size, total_buffer_size);
2320 #endif
2321 }
Iter pack_range(const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
std::size_t packed_range_size(const Context *context, Iter range_begin, const Iter range_end)
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename Context , typename Iter >
void libMesh::Parallel::Communicator::send_packed_range ( const unsigned int  dest_processor_id,
const Context *  context,
Iter  range_begin,
const Iter  range_end,
Request req,
const MessageTag tag = no_tag 
) const
inline

Nonblocking-send range-of-pointers to one processor. This function does not send the raw pointers, but rather constructs new objects at the other end whose contents match the objects pointed to by the sender.

void Parallel::pack(const T *, vector<int> & data, const Context *) is used to serialize type T onto the end of a data vector.

unsigned int Parallel::packable_size(const T *, const Context *) is used to allow data vectors to reserve memory, and for additional error checking

Definition at line 2325 of file parallel_implementation.h.

References libMesh::Parallel::Request::add_post_wait_work(), libMesh::Parallel::Request::add_prior_request(), libMesh::Parallel::pack_range(), libMesh::Parallel::packed_range_size(), and libMesh::Parallel::send().

2331 {
2332  // Allocate a buffer on the heap so we don't have to free it until
2333  // after the Request::wait()
2334  typedef typename std::iterator_traits<Iter>::value_type T;
2335  typedef typename Parallel::Packing<T>::buffer_type buffer_t;
2336 
2337  std::size_t total_buffer_size =
2338  Parallel::packed_range_size (context, range_begin, range_end);
2339 
2340  // That local variable will be gone soon; we need a send buffer that
2341  // will stick around. I heard you like buffering so I put a buffer
2342  // for your buffer size so you can buffer the size of your buffer.
2343  std::size_t * total_buffer_size_buffer = new std::size_t;
2344  *total_buffer_size_buffer = total_buffer_size;
2345 
2346  // Delete the buffer size's buffer when we're done
2347  Request intermediate_req = request();
2348  intermediate_req.add_post_wait_work
2349  (new Parallel::PostWaitDeleteBuffer<std::size_t>(total_buffer_size_buffer));
2350  this->send(dest_processor_id, *total_buffer_size_buffer, intermediate_req, tag);
2351 
2352  // And don't finish up the full request until we're done with its
2353  // dependencies
2354  req.add_prior_request(intermediate_req);
2355 
2356 #ifdef DEBUG
2357  std::size_t used_buffer_size = 0;
2358 #endif
2359 
2360  while (range_begin != range_end)
2361  {
2362  libmesh_assert_greater (std::distance(range_begin, range_end), 0);
2363 
2364  std::vector<buffer_t> * buffer = new std::vector<buffer_t>();
2365 
2366  const Iter next_range_begin =
2367  Parallel::pack_range(context, range_begin, range_end,
2368  *buffer);
2369 
2370  libmesh_assert_greater (std::distance(range_begin, next_range_begin), 0);
2371 
2372  range_begin = next_range_begin;
2373 
2374 #ifdef DEBUG
2375  used_buffer_size += buffer->size();
2376 #endif
2377 
2378  Request next_intermediate_req;
2379 
2380  Request * my_req = (range_begin == range_end) ? &req : &next_intermediate_req;
2381 
2382  // Make the Request::wait() handle deleting the buffer
2383  my_req->add_post_wait_work
2384  (new Parallel::PostWaitDeleteBuffer<std::vector<buffer_t> >
2385  (buffer));
2386 
2387  // Non-blocking send of the buffer
2388  this->send(dest_processor_id, *buffer, *my_req, tag);
2389 
2390  if (range_begin != range_end)
2391  req.add_prior_request(*my_req);
2392  }
2393 }
Iter pack_range(const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
MPI_Request request
Definition: parallel.h:167
std::size_t packed_range_size(const Context *context, Iter range_begin, const Iter range_end)
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
template<typename T1 , typename T2 >
void libMesh::Parallel::Communicator::send_receive ( const unsigned int  send_tgt,
const T1 &  send_val,
const unsigned int  recv_source,
T2 &  recv_val,
const MessageTag send_tag = no_tag,
const MessageTag recv_tag = any_tag 
) const
inline

Send data send to one processor while simultaneously receiving other data recv from a (potentially different) processor.

Send-receive data from one processor.

Definition at line 2794 of file parallel_implementation.h.

References libMesh::Parallel::MessageTag::value().

Referenced by libMesh::ParmetisPartitioner::assign_partitioning(), libMesh::MeshCommunication::find_global_indices(), libMesh::SparsityPattern::Build::parallel_sync(), receive(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::Partitioner::set_node_processor_ids(), libMesh::DofMap::set_nonlocal_dof_objects(), libMesh::Parallel::sync_dofobject_data_by_id(), libMesh::Parallel::sync_dofobject_data_by_xyz(), and libMesh::XdrIO::write_serialized_connectivity().

2800 {
2801  LOG_SCOPE("send_receive()", "Parallel");
2802 
2803  if (dest_processor_id == this->rank() &&
2804  source_processor_id == this->rank())
2805  {
2806  recv = sendvec;
2807  return;
2808  }
2809 
2810  // MPI_STATUS_IGNORE is from MPI-2; using it with some versions of
2811  // MPICH may cause a crash:
2812  // https://bugzilla.mcs.anl.gov/globus/show_bug.cgi?id=1798
2813 #if MPI_VERSION > 1
2814  libmesh_call_mpi
2815  (MPI_Sendrecv(const_cast<T1*>(&sendvec), 1, StandardType<T1>(&sendvec),
2816  dest_processor_id, send_tag.value(), &recv, 1,
2817  StandardType<T2>(&recv), source_processor_id,
2818  recv_tag.value(), this->get(), MPI_STATUS_IGNORE));
2819 #else
2820  MPI_Status stat;
2821  libmesh_call_mpi
2822  (MPI_Sendrecv(const_cast<T1*>(&sendvec), 1, StandardType<T1>(&sendvec),
2823  dest_processor_id, send_tag.value(), &recv, 1,
2824  StandardType<T2>(&recv), source_processor_id,
2825  recv_tag.value(), this->get(), &stat));
2826 #endif
2827 }
unsigned int rank() const
Definition: parallel.h:677
template<typename T1 , typename T2 >
void libMesh::Parallel::Communicator::send_receive ( const unsigned int  dest_processor_id,
const T1 &  send,
const DataType type1,
const unsigned int  source_processor_id,
T2 &  recv,
const DataType type2,
const MessageTag send_tag = no_tag,
const MessageTag recv_tag = any_tag 
) const

Send data send to one processor while simultaneously receiving other data recv from a (potentially different) processor, using a user-specified MPI Dataype.

template<typename T1 , typename T2 >
void libMesh::Parallel::Communicator::send_receive ( const unsigned int  dest_processor_id,
const std::vector< T1 > &  sendvec,
const DataType type1,
const unsigned int  source_processor_id,
std::vector< T2 > &  recv,
const DataType type2,
const MessageTag send_tag,
const MessageTag recv_tag 
) const
inline

Definition at line 2764 of file parallel_implementation.h.

References libMesh::Parallel::receive(), libMesh::Parallel::send(), and libMesh::Parallel::Request::wait().

2772 {
2773  LOG_SCOPE("send_receive()", "Parallel");
2774 
2775  if (dest_processor_id == this->rank() &&
2776  source_processor_id == this->rank())
2777  {
2778  recv = sendvec;
2779  return;
2780  }
2781 
2782  Parallel::Request req;
2783 
2784  this->send (dest_processor_id, sendvec, type1, req, send_tag);
2785 
2786  this->receive (source_processor_id, recv, type2, recv_tag);
2787 
2788  req.wait();
2789 }
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
unsigned int rank() const
Definition: parallel.h:677
template<typename T >
void libMesh::Parallel::Communicator::send_receive ( const unsigned int  dest_processor_id,
const std::vector< T > &  sendvec,
const unsigned int  source_processor_id,
std::vector< T > &  recv,
const MessageTag send_tag,
const MessageTag recv_tag 
) const
inline

Definition at line 2839 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::send_receive().

2845 {
2846  if (dest_processor_id == this->rank() &&
2847  source_processor_id == this->rank())
2848  {
2849  LOG_SCOPE("send_receive()", "Parallel");
2850  recv = sendvec;
2851  return;
2852  }
2853 
2854  const T* example = sendvec.empty() ?
2855  (recv.empty() ? libmesh_nullptr : &recv[0]) : &sendvec[0];
2856 
2857  // Call the user-defined type version with automatic
2858  // type conversion based on template argument:
2859  this->send_receive (dest_processor_id, sendvec,
2860  StandardType<T>(example),
2861  source_processor_id, recv,
2862  StandardType<T>(example),
2863  send_tag, recv_tag);
2864 }
const class libmesh_nullptr_t libmesh_nullptr
void send_receive(const unsigned int dest_processor_id, const T1 &send, const unsigned int source_processor_id, T2 &recv, const MessageTag &send_tag=no_tag, const MessageTag &recv_tag=any_tag) const
unsigned int rank() const
Definition: parallel.h:677
template<typename T1 , typename T2 >
void libMesh::Parallel::Communicator::send_receive ( const unsigned int  dest_processor_id,
const std::vector< T1 > &  sendvec,
const unsigned int  source_processor_id,
std::vector< T2 > &  recv,
const MessageTag send_tag,
const MessageTag recv_tag 
) const
inline

Definition at line 2870 of file parallel_implementation.h.

References libmesh_nullptr, and libMesh::Parallel::send_receive().

2876 {
2877  // Call the user-defined type version with automatic
2878  // type conversion based on template argument:
2879  this->send_receive (dest_processor_id, sendvec,
2880  StandardType<T1>(sendvec.empty() ? libmesh_nullptr : &sendvec[0]),
2881  source_processor_id, recv,
2882  StandardType<T2>(recv.empty() ? libmesh_nullptr : &recv[0]),
2883  send_tag, recv_tag);
2884 }
const class libmesh_nullptr_t libmesh_nullptr
void send_receive(const unsigned int dest_processor_id, const T1 &send, const unsigned int source_processor_id, T2 &recv, const MessageTag &send_tag=no_tag, const MessageTag &recv_tag=any_tag) const
template<typename T1 , typename T2 >
void libMesh::Parallel::Communicator::send_receive ( const unsigned int  dest_processor_id,
const std::vector< std::vector< T1 > > &  sendvec,
const unsigned int  source_processor_id,
std::vector< std::vector< T2 > > &  recv,
const MessageTag ,
const MessageTag  
) const
inline

Definition at line 2890 of file parallel_implementation.h.

References libMesh::Parallel::any_tag, and libMesh::Parallel::no_tag.

2896 {
2897  // FIXME - why aren't we honoring send_tag and recv_tag here?
2898  send_receive_vec_of_vec
2899  (dest_processor_id, sendvec, source_processor_id, recv,
2900  no_tag, any_tag, *this);
2901 }
const MessageTag no_tag
Definition: parallel.h:278
const MessageTag any_tag
Definition: parallel.h:273
template<typename T >
void libMesh::Parallel::Communicator::send_receive ( const unsigned int  dest_processor_id,
const std::vector< std::vector< T > > &  sendvec,
const unsigned int  source_processor_id,
std::vector< std::vector< T > > &  recv,
const MessageTag ,
const MessageTag  
) const
inline

Definition at line 2908 of file parallel_implementation.h.

References libMesh::Parallel::any_tag, and libMesh::Parallel::no_tag.

2914 {
2915  // FIXME - why aren't we honoring send_tag and recv_tag here?
2916  send_receive_vec_of_vec
2917  (dest_processor_id, sendvec, source_processor_id, recv,
2918  no_tag, any_tag, *this);
2919 }
const MessageTag no_tag
Definition: parallel.h:278
const MessageTag any_tag
Definition: parallel.h:273
template<typename Context1 , typename RangeIter , typename Context2 , typename OutputIter , typename T >
void libMesh::Parallel::Communicator::send_receive_packed_range ( const unsigned int  dest_processor_id,
const Context1 *  context1,
RangeIter  send_begin,
const RangeIter  send_end,
const unsigned int  source_processor_id,
Context2 *  context2,
OutputIter  out_iter,
const T *  output_type,
const MessageTag send_tag = no_tag,
const MessageTag recv_tag = any_tag 
) const
inline

Send a range-of-pointers to one processor while simultaneously receiving another range from a (potentially different) processor. This function does not send or receive raw pointers, but rather constructs new objects at each receiver whose contents match the objects pointed to by the sender.

The objects being sent will be of type T1 = iterator_traits<RangeIter>::value_type, and the objects being received will be of type T2 = iterator_traits<OutputIter>::value_type

void Parallel::pack(const T1*, vector<int> & data, const Context1*) is used to serialize type T1 onto the end of a data vector.

Using std::back_inserter as the output iterator allows send_receive to fill any container type. Using libMesh::null_output_iterator allows the receive to be dealt with solely by Parallel::unpack(), for objects whose unpack() is written so as to not leak memory when used in this fashion.

A future version of this method should be created to preallocate memory when receiving vectors...

void Parallel::unpack(vector<int>::iterator in, T2** out, Context *) is used to unserialize type T2, typically into a new heap-allocated object whose pointer is returned as *out.

unsigned int Parallel::packable_size(const T1*, const Context1*) is used to allow data vectors to reserve memory, and for additional error checking.

unsigned int Parallel::packed_size(const T2*, vector<int>::const_iterator) is used to advance to the beginning of the next object's data.

Send-receive range-of-pointers from one processor.

If you call this without MPI you might be making a mistake, but we'll support it.

Definition at line 2927 of file parallel_implementation.h.

References libMesh::Parallel::receive_packed_range(), libMesh::Parallel::send_packed_range(), and libMesh::Parallel::Request::wait().

Referenced by receive().

2937 {
2938  LOG_SCOPE("send_receive()", "Parallel");
2939 
2940  Parallel::Request req;
2941 
2942  this->send_packed_range (dest_processor_id, context1, send_begin, send_end,
2943  req, send_tag);
2944 
2945  this->receive_packed_range (source_processor_id, context2, out_iter,
2946  output_type, recv_tag);
2947 
2948  req.wait();
2949 }
void send_packed_range(const unsigned int dest_processor_id, const Context *context, Iter range_begin, const Iter range_end, const MessageTag &tag=no_tag) const
void receive_packed_range(const unsigned int dest_processor_id, Context *context, OutputIter out, const T *output_type, const MessageTag &tag=any_tag) const
template<typename T >
void libMesh::Parallel::Communicator::set_union ( T &  data,
const unsigned int  root_id 
) const
inline

Take a container of local variables on each processor, and collect their union over all processors, replacing the set on processor 0.

Definition at line 3773 of file parallel_implementation.h.

References probe(), and send().

Referenced by libMesh::BoundaryInfo::_find_id_maps(), libMesh::MeshBase::cache_elem_dims(), libMesh::Nemesis_IO_Helper::compute_num_global_nodesets(), libMesh::Nemesis_IO_Helper::compute_num_global_sidesets(), DMlibMeshSetSystem_libMesh(), set_union(), libMesh::MeshBase::subdomain_ids(), and libMesh::BoundaryInfo::sync().

3774 { libmesh_assert_equal_to(root_id, 0); }
template<typename T >
void libMesh::Parallel::Communicator::set_union ( T &  data) const
inline

Take a container of local variables on each processor, and replace it with their union over all processors.

Definition at line 3770 of file parallel_implementation.h.

3770 {}
template<typename T >
void libMesh::Parallel::Communicator::set_union ( std::set< T > &  data,
const unsigned int  root_id 
) const
inline

Definition at line 1980 of file parallel_implementation.h.

References libMesh::Parallel::gather().

1982 {
1983  std::vector<T> vecdata(data.begin(), data.end());
1984  this->gather(root_id, vecdata);
1985  if (this->rank() == root_id)
1986  data.insert(vecdata.begin(), vecdata.end());
1987 }
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
unsigned int rank() const
Definition: parallel.h:677
IterBase * data
template<typename T >
void libMesh::Parallel::Communicator::set_union ( std::set< T > &  data) const
inline

Definition at line 1992 of file parallel_implementation.h.

References libMesh::Parallel::allgather(), data, libMesh::Parallel::gather(), and set_union().

1993 {
1994  std::vector<T> vecdata(data.begin(), data.end());
1995  this->allgather(vecdata, false);
1996  data.insert(vecdata.begin(), vecdata.end());
1997 }
IterBase * data
void allgather(const T &send, std::vector< T > &recv) const
void libMesh::Parallel::Communicator::split ( int  color,
int  key,
Communicator target 
) const
inline

Definition at line 596 of file parallel_implementation.h.

References _I_duped_it, assign(), clear(), and send_mode().

597 {
598  target.clear();
599  MPI_Comm newcomm;
600  libmesh_call_mpi
601  (MPI_Comm_split(this->get(), color, key, &newcomm));
602 
603  target.assign(newcomm);
604  target._I_duped_it = true;
605  target.send_mode(this->send_mode());
606 }
SendMode send_mode() const
Definition: parallel.h:719
template<typename T >
void libMesh::Parallel::Communicator::sum ( T &  r) const
inline

Take a local variable and replace it with the sum of it's values on all processors. Containers are replaced element-wise.

Definition at line 1913 of file parallel_implementation.h.

Referenced by libMesh::Parallel::BinSorter< KeyType, IdxType >::binsort(), broadcast_packed_range(), libMesh::Parallel::Histogram< KeyType, IdxType >::build_histogram(), libMesh::System::calculate_norm(), libMesh::Parallel::Sort< KeyType, IdxType >::communicate_bins(), libMesh::Nemesis_IO_Helper::compute_num_global_elem_blocks(), libMesh::Nemesis_IO_Helper::compute_num_global_nodesets(), libMesh::Nemesis_IO_Helper::compute_num_global_sidesets(), libMesh::MeshRefinement::create_parent_error_vector(), libMesh::CondensedEigenSystem::get_eigenpair(), libMesh::DofMap::get_info(), libMesh::DistributedVector< T >::init(), libMesh::DistributedMesh::n_active_elem(), libMesh::BoundaryInfo::n_boundary_conds(), libMesh::BoundaryInfo::n_edge_conds(), libMesh::CondensedEigenSystem::n_global_non_condensed_dofs(), libMesh::BoundaryInfo::n_nodeset_conds(), libMesh::BoundaryInfo::n_shellface_conds(), libMesh::DistributedMesh::parallel_n_elem(), libMesh::DistributedMesh::parallel_n_nodes(), libMesh::Nemesis_IO::read(), libMesh::ErrorEstimator::reduce_error(), libMesh::Parallel::Sort< KeyType, IdxType >::sort(), libMesh::MeshTools::total_weight(), and libMesh::XdrIO::write_serialized_connectivity().

1914 {
1915  if (this->size() > 1)
1916  {
1917  LOG_SCOPE("sum()", "Parallel");
1918 
1919  T temp = r;
1920  libmesh_call_mpi
1921  (MPI_Allreduce (&temp, &r, 1, StandardType<T>(&temp), MPI_SUM,
1922  this->get()));
1923  }
1924 }
unsigned int size() const
Definition: parallel.h:679
template<typename T >
void libMesh::Parallel::Communicator::sum ( std::vector< T > &  r) const
inline

Definition at line 1928 of file parallel_implementation.h.

References libMesh::libmesh_assert(), and libMesh::Parallel::verify().

1929 {
1930  if (this->size() > 1 && !r.empty())
1931  {
1932  LOG_SCOPE("sum()", "Parallel");
1933 
1934  libmesh_assert(this->verify(r.size()));
1935 
1936  std::vector<T> temp(r);
1937  libmesh_call_mpi
1938  (MPI_Allreduce (&temp[0], &r[0], cast_int<int>(r.size()),
1939  StandardType<T>(&temp[0]), MPI_SUM,
1940  this->get()));
1941  }
1942 }
unsigned int size() const
Definition: parallel.h:679
libmesh_assert(j)
template<typename T >
void libMesh::Parallel::Communicator::sum ( std::complex< T > &  r) const
inline

Definition at line 1948 of file parallel_implementation.h.

1949 {
1950  if (this->size() > 1)
1951  {
1952  LOG_SCOPE("sum()", "Parallel");
1953 
1954  std::complex<T> temp(r);
1955  libmesh_call_mpi
1956  (MPI_Allreduce (&temp, &r, 2, StandardType<T>(), MPI_SUM,
1957  this->get()));
1958  }
1959 }
unsigned int size() const
Definition: parallel.h:679
template<typename T >
void libMesh::Parallel::Communicator::sum ( std::vector< std::complex< T > > &  r) const
inline

Definition at line 1963 of file parallel_implementation.h.

References libMesh::libmesh_assert(), libmesh_nullptr, and libMesh::Parallel::verify().

1964 {
1965  if (this->size() > 1 && !r.empty())
1966  {
1967  LOG_SCOPE("sum()", "Parallel");
1968 
1969  libmesh_assert(this->verify(r.size()));
1970 
1971  std::vector<std::complex<T> > temp(r);
1972  libmesh_call_mpi
1973  (MPI_Allreduce (&temp[0], &r[0], cast_int<int>(r.size() * 2),
1974  StandardType<T>(libmesh_nullptr), MPI_SUM, this->get()));
1975  }
1976 }
unsigned int size() const
Definition: parallel.h:679
const class libmesh_nullptr_t libmesh_nullptr
libmesh_assert(j)
template<typename T >
bool libMesh::Parallel::Communicator::verify ( const T &  r) const
inline

Verify that a local variable has the same value on all processors. Containers must have the same value in every entry.

Definition at line 1365 of file parallel_implementation.h.

References libMesh::Parallel::max(), and libMesh::Parallel::min().

Referenced by libMesh::MeshCommunication::broadcast(), broadcast_packed_range(), libMesh::DofMap::check_dirichlet_bcid_consistency(), libMesh::MeshCommunication::delete_remote_elements(), libMesh::MeshCommunication::gather(), and semiverify().

1366 {
1367  if (this->size() > 1 && Attributes<T>::has_min_max == true)
1368  {
1369  T tempmin = r, tempmax = r;
1370  this->min(tempmin);
1371  this->max(tempmax);
1372  bool verified = (r == tempmin) &&
1373  (r == tempmax);
1374  this->min(verified);
1375  return verified;
1376  }
1377 
1378 #ifdef LIBMESH_HAVE_CXX11
1379  static_assert(Attributes<T>::has_min_max,
1380  "Tried to verify an unverifiable type");
1381 #endif
1382 
1383  return true;
1384 }
unsigned int size() const
Definition: parallel.h:679
static const bool has_min_max
Definition: parallel.h:403
template<>
bool libMesh::Parallel::Communicator::verify ( const bool &  r) const
inline

Definition at line 1389 of file parallel_implementation.h.

References libMesh::Parallel::verify().

1390 {
1391  const unsigned char rnew = r;
1392  return this->verify(rnew);
1393 }

Member Data Documentation

communicator libMesh::Parallel::Communicator::_communicator
private

Definition at line 699 of file parallel.h.

Referenced by assign(), clear(), duplicate(), and libMesh::ParallelObject::operator=().

bool libMesh::Parallel::Communicator::_I_duped_it
private

Definition at line 706 of file parallel.h.

Referenced by clear(), duplicate(), and split().

unsigned int libMesh::Parallel::Communicator::_rank
private

Definition at line 700 of file parallel.h.

Referenced by assign().

SendMode libMesh::Parallel::Communicator::_send_mode
private

Definition at line 701 of file parallel.h.

Referenced by assign().

unsigned int libMesh::Parallel::Communicator::_size
private

Definition at line 700 of file parallel.h.

Referenced by assign().

std::map<int, unsigned int> libMesh::Parallel::Communicator::used_tag_values
mutableprivate

Definition at line 705 of file parallel.h.


The documentation for this class was generated from the following files: