libMesh::Parallel Namespace Reference

Namespaces

 Utils
 

Classes

struct  Attributes
 
class  BinSorter
 Parallel bin sorting object. More...
 
struct  BuildStandardTypeVector
 
struct  BuildStandardTypeVector< 0 >
 
class  Communicator
 
struct  data_type
 
class  DataPlusInt
 
class  DataType
 
struct  FillDisplacementArray
 
struct  FillDisplacementArray< 0 >
 
class  Histogram
 Used in conjunction with a BinSorter for parallel sorting. More...
 
class  MessageTag
 
class  OpFunction
 
class  OpFunction< Point >
 
class  OpFunction< TypeVector< T > >
 
class  OpFunction< VectorValue< T > >
 
struct  opfunction_dependent_false
 
class  Packing
 
class  Packing< const Elem * >
 
class  Packing< const Node * >
 
class  Packing< Elem * >
 
class  Packing< Node * >
 
struct  PostWaitCopyBuffer
 
struct  PostWaitDeleteBuffer
 
struct  PostWaitDereferenceSharedPtr
 
struct  PostWaitDereferenceTag
 
struct  PostWaitFreeBuffer
 
struct  PostWaitUnpackBuffer
 
struct  PostWaitWork
 
class  Request
 
struct  request
 
class  Sort
 Object for performing parallel sorts using MPI. More...
 
class  StandardType
 
class  StandardType< Hilbert::HilbertIndices >
 
class  StandardType< Point >
 
class  StandardType< std::complex< T > >
 
class  StandardType< std::pair< T1, T2 > >
 
class  StandardType< std::tuple< Types... > >
 
class  StandardType< TensorValue< T > >
 
class  StandardType< TypeTensor< T > >
 
class  StandardType< TypeVector< T > >
 
class  StandardType< VectorValue< T > >
 
struct  standardtype_dependent_false
 
class  Status
 
struct  status
 
struct  SyncEverything
 
class  TypeVectorOpFunction
 

Typedefs

typedef MPI_Comm communicator
 
typedef MPI_Datatype data_type
 
typedef std::pair< Hilbert::HilbertIndices, unique_id_typeDofObjectKey
 
typedef MPI_Request request
 
typedef MPI_Status status
 

Functions

void wait (std::vector< Request > &r)
 
std::size_t waitany (std::vector< Request > &r)
 
 LIBMESH_INT_TYPE (char)
 
 LIBMESH_INT_TYPE (signed char)
 
 LIBMESH_INT_TYPE (unsigned char)
 
 LIBMESH_INT_TYPE (short int)
 
 LIBMESH_INT_TYPE (unsigned short int)
 
 LIBMESH_INT_TYPE (int)
 
 LIBMESH_INT_TYPE (long)
 
 LIBMESH_INT_TYPE (unsigned long long)
 
 LIBMESH_FLOAT_TYPE (float)
 
 LIBMESH_FLOAT_TYPE (double)
 
 LIBMESH_FLOAT_TYPE (long double)
 
template<typename T , typename C , typename A >
 LIBMESH_CONTAINER_TYPE (std::set< T LIBMESH_ATTRIBUTES_COMMA C LIBMESH_ATTRIBUTES_COMMA A >)
 
template<typename T , typename A >
 LIBMESH_CONTAINER_TYPE (std::vector< T LIBMESH_ATTRIBUTES_COMMA A >)
 
 LIBMESH_PARALLEL_INTEGER_OPS (char)
 
 LIBMESH_PARALLEL_INTEGER_OPS (signed char)
 
 LIBMESH_PARALLEL_INTEGER_OPS (unsigned char)
 
 LIBMESH_PARALLEL_INTEGER_OPS (short int)
 
 LIBMESH_PARALLEL_INTEGER_OPS (unsigned short int)
 
 LIBMESH_PARALLEL_INTEGER_OPS (int)
 
 LIBMESH_PARALLEL_INTEGER_OPS (long)
 
 LIBMESH_PARALLEL_INTEGER_OPS (unsigned long long)
 
 LIBMESH_PARALLEL_FLOAT_OPS (float)
 
 LIBMESH_PARALLEL_FLOAT_OPS (double)
 
 LIBMESH_PARALLEL_FLOAT_OPS (long double)
 
template<typename Context , typename buffertype , typename OutputIter , typename T >
void unpack_range (const typename std::vector< buffertype > &buffer, Context *context, OutputIter out, const T *output_type)
 
template<typename Context , typename buffertype , typename Iter >
Iter pack_range (const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
 
template<typename Context , typename Iter >
std::size_t packed_range_size (const Context *context, Iter range_begin, const Iter range_end)
 
template<typename Context , typename buffertype , typename OutputIter , typename T >
void unpack_range (const std::vector< buffertype > &buffer, Context *context, OutputIter out_iter, const T *)
 
template<typename Iterator , typename DofObjType , typename SyncFunctor >
void sync_dofobject_data_by_xyz (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, LocationMap< DofObjType > *location_map, SyncFunctor &sync)
 
template<typename Iterator , typename SyncFunctor >
void sync_dofobject_data_by_id (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
 
template<typename Iterator , typename DofObjectCheckFunctor , typename SyncFunctor >
void sync_dofobject_data_by_id (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, const DofObjectCheckFunctor &dofobj_check, SyncFunctor &sync)
 
template<typename Iterator , typename SyncFunctor >
void sync_element_data_by_parent_id (MeshBase &mesh, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
 
template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
bool sync_node_data_by_element_id_once (MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
 
template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
void sync_node_data_by_element_id (MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
 
template<typename Iterator , typename DofObjType , typename SyncFunctor >
void sync_dofobject_data_by_xyz (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, LocationMap< DofObjType > &location_map, SyncFunctor &sync)
 
template<typename T >
data_type dataplusint_type ()
 
template<>
data_type dataplusint_type< short int > ()
 
template<>
data_type dataplusint_type< int > ()
 
template<>
data_type dataplusint_type< long > ()
 
template<>
data_type dataplusint_type< float > ()
 
template<>
data_type dataplusint_type< double > ()
 
template<>
data_type dataplusint_type< long double > ()
 
template<typename MapToVectors , typename RequestContainer , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapToVectors &data, RequestContainer &reqs, ActionFunctor &act_on_data)
 
template<typename MapToVectors , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapToVectors &data, ActionFunctor &act_on_data)
 
template<typename datum , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
 
template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
 
template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, RequestContainer &reqs, ActionFunctor &act_on_data)
 
template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
 
template<typename datum , typename A , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const std::vector< datum, A > *example)
 
template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, RequestContainer &reqs, ActionFunctor &act_on_data)
 
template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
 
Status wait (Request &r)
 
 LIBMESH_STANDARD_TYPE (char, MPI_CHAR)
 
 LIBMESH_STANDARD_TYPE (signed char, MPI_SIGNED_CHAR)
 
 LIBMESH_STANDARD_TYPE (unsigned char, MPI_UNSIGNED_CHAR)
 
 LIBMESH_STANDARD_TYPE (short int, MPI_SHORT)
 
 LIBMESH_STANDARD_TYPE (unsigned short int, MPI_UNSIGNED_SHORT)
 
 LIBMESH_STANDARD_TYPE (int, MPI_INT)
 
 LIBMESH_STANDARD_TYPE (unsigned int, MPI_UNSIGNED)
 
 LIBMESH_STANDARD_TYPE (long, MPI_LONG)
 
 LIBMESH_STANDARD_TYPE (long long, MPI_LONG_LONG_INT)
 
 LIBMESH_STANDARD_TYPE (unsigned long, MPI_UNSIGNED_LONG)
 
 LIBMESH_STANDARD_TYPE (unsigned long long, MPI_UNSIGNED_LONG_LONG)
 
 LIBMESH_STANDARD_TYPE (float, MPI_FLOAT)
 
 LIBMESH_STANDARD_TYPE (double, MPI_DOUBLE)
 
 LIBMESH_STANDARD_TYPE (long double, MPI_LONG_DOUBLE)
 

Variables

const unsigned int any_source
 
const MessageTag any_tag = MessageTag(MPI_ANY_TAG)
 
const MessageTag no_tag = MessageTag(0)
 

Detailed Description

The Parallel namespace is for wrapper functions for common general parallel synchronization tasks.

For MPI 1.1 compatibility, temporary buffers are used instead of MPI 2's MPI_IN_PLACE

The Parallel namespace is for wrapper functions for common general parallel synchronization tasks.

Typedef Documentation

◆ communicator

Communicator object for talking with subsets of processors

Definition at line 57 of file communicator.h.

◆ data_type

typedef MPI_Datatype libMesh::Parallel::data_type

Data types for communication

Definition at line 46 of file data_type.h.

◆ DofObjectKey

typedef Hilbert::HilbertIndices libMesh::Parallel::DofObjectKey

Definition at line 69 of file parallel_hilbert.h.

◆ request

typedef MPI_Request libMesh::Parallel::request

Request object for non-blocking I/O

Definition at line 40 of file request.h.

◆ status

typedef MPI_Status libMesh::Parallel::status

Status object for querying messages

Definition at line 41 of file status.h.

Function Documentation

◆ dataplusint_type()

template<typename T >
data_type libMesh::Parallel::dataplusint_type ( )
inline

Templated function to return the appropriate MPI datatype for use with built-in C types when combined with an int

◆ dataplusint_type< double >()

template<>
data_type libMesh::Parallel::dataplusint_type< double > ( )
inline

Definition at line 167 of file parallel_implementation.h.

167 { return MPI_DOUBLE_INT; }

◆ dataplusint_type< float >()

template<>
data_type libMesh::Parallel::dataplusint_type< float > ( )
inline

Definition at line 164 of file parallel_implementation.h.

164 { return MPI_FLOAT_INT; }

◆ dataplusint_type< int >()

template<>
data_type libMesh::Parallel::dataplusint_type< int > ( )
inline

Definition at line 158 of file parallel_implementation.h.

158 { return MPI_2INT; }

◆ dataplusint_type< long >()

template<>
data_type libMesh::Parallel::dataplusint_type< long > ( )
inline

Definition at line 161 of file parallel_implementation.h.

161 { return MPI_LONG_INT; }

◆ dataplusint_type< long double >()

template<>
data_type libMesh::Parallel::dataplusint_type< long double > ( )
inline

Definition at line 170 of file parallel_implementation.h.

170 { return MPI_LONG_DOUBLE_INT; }

◆ dataplusint_type< short int >()

template<>
data_type libMesh::Parallel::dataplusint_type< short int > ( )
inline

Definition at line 155 of file parallel_implementation.h.

155 { return MPI_SHORT_INT; }

◆ LIBMESH_CONTAINER_TYPE() [1/2]

template<typename T , typename C , typename A >
libMesh::Parallel::LIBMESH_CONTAINER_TYPE ( std::set< T LIBMESH_ATTRIBUTES_COMMA C LIBMESH_ATTRIBUTES_COMMA A )

◆ LIBMESH_CONTAINER_TYPE() [2/2]

template<typename T , typename A >
libMesh::Parallel::LIBMESH_CONTAINER_TYPE ( std::vector< T LIBMESH_ATTRIBUTES_COMMA A )

◆ LIBMESH_FLOAT_TYPE() [1/3]

libMesh::Parallel::LIBMESH_FLOAT_TYPE ( float  )

◆ LIBMESH_FLOAT_TYPE() [2/3]

libMesh::Parallel::LIBMESH_FLOAT_TYPE ( double  )

◆ LIBMESH_FLOAT_TYPE() [3/3]

libMesh::Parallel::LIBMESH_FLOAT_TYPE ( long  double)

◆ LIBMESH_INT_TYPE() [1/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( char  )

◆ LIBMESH_INT_TYPE() [2/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( signed  char)

◆ LIBMESH_INT_TYPE() [3/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( unsigned  char)

◆ LIBMESH_INT_TYPE() [4/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( short  int)

◆ LIBMESH_INT_TYPE() [5/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( unsigned short  int)

◆ LIBMESH_INT_TYPE() [6/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( int  )

◆ LIBMESH_INT_TYPE() [7/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( long  )

◆ LIBMESH_INT_TYPE() [8/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( unsigned long  long)

◆ LIBMESH_PARALLEL_FLOAT_OPS() [1/3]

libMesh::Parallel::LIBMESH_PARALLEL_FLOAT_OPS ( float  )

◆ LIBMESH_PARALLEL_FLOAT_OPS() [2/3]

libMesh::Parallel::LIBMESH_PARALLEL_FLOAT_OPS ( double  )

◆ LIBMESH_PARALLEL_FLOAT_OPS() [3/3]

libMesh::Parallel::LIBMESH_PARALLEL_FLOAT_OPS ( long  double)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [1/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( char  )

◆ LIBMESH_PARALLEL_INTEGER_OPS() [2/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( signed  char)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [3/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( unsigned  char)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [4/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( short  int)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [5/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( unsigned short  int)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [6/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( int  )

◆ LIBMESH_PARALLEL_INTEGER_OPS() [7/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( long  )

◆ LIBMESH_PARALLEL_INTEGER_OPS() [8/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( unsigned long  long)

◆ LIBMESH_STANDARD_TYPE() [1/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( char  ,
MPI_CHAR   
)

◆ LIBMESH_STANDARD_TYPE() [2/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( signed  char,
MPI_SIGNED_CHAR   
)

◆ LIBMESH_STANDARD_TYPE() [3/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned  char,
MPI_UNSIGNED_CHAR   
)

◆ LIBMESH_STANDARD_TYPE() [4/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( short  int,
MPI_SHORT   
)

◆ LIBMESH_STANDARD_TYPE() [5/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned short  int,
MPI_UNSIGNED_SHORT   
)

◆ LIBMESH_STANDARD_TYPE() [6/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( int  ,
MPI_INT   
)

◆ LIBMESH_STANDARD_TYPE() [7/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned  int,
MPI_UNSIGNED   
)

◆ LIBMESH_STANDARD_TYPE() [8/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( long  ,
MPI_LONG   
)

◆ LIBMESH_STANDARD_TYPE() [9/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( long  long,
MPI_LONG_LONG_INT   
)

◆ LIBMESH_STANDARD_TYPE() [10/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned  long,
MPI_UNSIGNED_LONG   
)

◆ LIBMESH_STANDARD_TYPE() [11/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned long  long,
MPI_UNSIGNED_LONG_LONG   
)

◆ LIBMESH_STANDARD_TYPE() [12/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( float  ,
MPI_FLOAT   
)

◆ LIBMESH_STANDARD_TYPE() [13/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( double  ,
MPI_DOUBLE   
)

◆ LIBMESH_STANDARD_TYPE() [14/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( long  double,
MPI_LONG_DOUBLE   
)

◆ pack_range()

template<typename Context , typename buffertype , typename Iter >
Iter libMesh::Parallel::pack_range ( const Context *  context,
Iter  range_begin,
const Iter  range_end,
typename std::vector< buffertype > &  buffer,
std::size_t  approx_buffer_size = 1000000 
)
inline

Encode a range of potentially-variable-size objects to a data array.

The data will be buffered in vectors with lengths that do not exceed the sum of approx_buffer_size and the size of an individual packed object.

Helper function for range packing

Definition at line 139 of file packing.h.

References libMesh::Parallel::Packing< T >::pack(), libMesh::Parallel::Packing< T >::packable_size(), and libMesh::Parallel::Packing< T >::packed_size().

Referenced by libMesh::Parallel::Communicator::allgather_packed_range(), libMesh::Parallel::Communicator::broadcast_packed_range(), libMesh::Parallel::Communicator::gather_packed_range(), libMesh::Parallel::Communicator::nonblocking_send_packed_range(), libMesh::Parallel::Communicator::send_packed_range(), and libMesh::Parallel::Communicator::send_receive_packed_range().

148 {
149  typedef typename std::iterator_traits<Iter>::value_type T;
150 
151  // Count the total size of and preallocate buffer for efficiency.
152  // Prepare to stop early if the buffer would be too large.
153  std::size_t buffer_size = 0;
154  Iter range_stop = range_begin;
155  for (; range_stop != range_end && buffer_size < approx_buffer_size;
156  ++range_stop)
157  {
158  std::size_t next_buffer_size =
159  Parallel::Packing<T>::packable_size(*range_stop, context);
160  buffer_size += next_buffer_size;
161  }
162  buffer.reserve(buffer.size() + buffer_size);
163 
164  // Pack the objects into the buffer
165  for (; range_begin != range_stop; ++range_begin)
166  {
167 #ifndef NDEBUG
168  std::size_t old_size = buffer.size();
169 #endif
170 
171  Parallel::Packing<T>::pack
172  (*range_begin, back_inserter(buffer), context);
173 
174 #ifndef NDEBUG
175  unsigned int my_packable_size =
176  Parallel::Packing<T>::packable_size(*range_begin, context);
177  unsigned int my_packed_size =
178  Parallel::Packing<T>::packed_size (buffer.begin() + old_size);
179  libmesh_assert_equal_to (my_packable_size, my_packed_size);
180  libmesh_assert_equal_to (buffer.size(), old_size + my_packable_size);
181 #endif
182  }
183 
184  return range_stop;
185 }

◆ packed_range_size()

template<typename Context , typename Iter >
std::size_t libMesh::Parallel::packed_range_size ( const Context *  context,
Iter  range_begin,
const Iter  range_end 
)
inline

Return the total buffer size needed to encode a range of potentially-variable-size objects to a data array.

Helper function for range packing

Definition at line 118 of file packing.h.

References libMesh::Parallel::Packing< T >::packable_size().

Referenced by libMesh::Parallel::Communicator::send_packed_range().

121 {
122  typedef typename std::iterator_traits<Iter>::value_type T;
123 
124  std::size_t buffer_size = 0;
125  for (Iter range_count = range_begin;
126  range_count != range_end;
127  ++range_count)
128  {
129  buffer_size += Parallel::Packing<T>::packable_size(*range_count, context);
130  }
131  return buffer_size;
132 }

◆ pull_parallel_vector_data() [1/3]

template<typename datum , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void libMesh::Parallel::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
RequestContainer &  reqs,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const datum *  example 
)

Send query vectors, receive and answer them with vectors of data, then act on those answers.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of query ids to send.

Query data which is received from other processors will be operated on by gather_data(processor_id_type pid, const std::vector<id> & ids, std::vector<datum> & data)

Answer data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<id> & ids, const std::vector<datum> & data);

The example pointer may be null; it merely needs to be of the correct type. It's just here because function overloading in C++ is easy, whereas SFINAE is hard and partial template specialization of functions is impossible.

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All receives and actions are completed before this function returns.

Not all sends may have yet completed. The supplied container of Request objects, req, has more requests inserted, one for each of the data sends. These requests must be waited on before the data map is deleted.

Definition at line 472 of file parallel_sync.h.

References libMesh::Parallel::Communicator::get_unique_tag(), push_parallel_vector_data(), query, libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), libMesh::Parallel::Communicator::size(), wait(), and waitany().

Referenced by libMesh::DofMap::allgather_recursive_constraints(), libMesh::MeshCommunication::assign_global_indices(), libMesh::Partitioner::assign_partitioning(), libMesh::MeshCommunication::find_global_indices(), libMesh::DofMap::gather_constraints(), libMesh::DistributedVector< T >::localize(), pull_parallel_vector_data(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::Partitioner::set_node_processor_ids(), libMesh::DofMap::set_nonlocal_dof_objects(), sync_dofobject_data_by_id(), sync_dofobject_data_by_xyz(), sync_element_data_by_parent_id(), sync_node_data_by_element_id_once(), and libMesh::XdrIO::write_serialized_connectivity().

478 {
479  typedef typename MapToVectors::mapped_type query_type;
480 
481  std::map<processor_id_type, std::vector<datum> >
482  response_data, received_data;
483  std::vector<Request> response_reqs;
484 
485  StandardType<datum> datatype;
486 
487  // We'll grab a tag so we can overlap request sends and receives
488  // without confusing one for the other
489  MessageTag tag = comm.get_unique_tag(105);
490 
491  auto gather_functor =
492  [&comm, &gather_data, &response_data, &response_reqs, &datatype, &tag]
493  (processor_id_type pid, query_type query)
494  {
495  gather_data(pid, query, response_data[pid]);
496  libmesh_assert_equal_to(query.size(), response_data[pid].size());
497 
498  // Just act on data later if the user requested a send-to-self
499  if (pid != comm.rank())
500  {
501  Request sendreq;
502  comm.send(pid, response_data[pid], datatype, sendreq, tag);
503  response_reqs.push_back(sendreq);
504  }
505  };
506 
507  push_parallel_vector_data (comm, queries, reqs, gather_functor);
508 
509  // Every outgoing query should now have an incoming response.
510  // Post all of the receives, non-blocking
511  std::vector<Request> receive_reqs;
512  std::vector<processor_id_type> receive_procids;
513  for (auto & querypair : queries)
514  {
515  processor_id_type proc_id = querypair.first;
516  libmesh_assert_less(proc_id, comm.size());
517 
518  if (proc_id == comm.rank())
519  {
520  libmesh_assert(queries.count(proc_id));
521  libmesh_assert_equal_to(queries.at(proc_id).size(),
522  response_data.at(proc_id).size());
523  act_on_data(proc_id, queries.at(proc_id), response_data.at(proc_id));
524  }
525  else
526  {
527  auto & querydata = querypair.second;
528  Request req;
529  auto & incoming_data = received_data[proc_id];
530  incoming_data.resize(querydata.size());
531  comm.receive(proc_id, incoming_data, datatype, req, tag);
532  receive_reqs.push_back(req);
533  receive_procids.push_back(proc_id);
534  }
535  }
536 
537  while(receive_reqs.size())
538  {
539  std::size_t completed = waitany(receive_reqs);
540  processor_id_type proc_id = receive_procids[completed];
541  receive_reqs.erase(receive_reqs.begin() + completed);
542  receive_procids.erase(receive_procids.begin() + completed);
543 
544  libmesh_assert(queries.count(proc_id));
545  libmesh_assert_equal_to(queries.at(proc_id).size(),
546  received_data[proc_id].size());
547  act_on_data(proc_id, queries.at(proc_id), received_data[proc_id]);
548  received_data.erase(proc_id);
549  }
550 
551  wait(response_reqs);
552 }
void wait(std::vector< Request > &r)
Definition: request.C:213
uint8_t processor_id_type
Definition: id_types.h:99
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
query_obj query
std::size_t waitany(std::vector< Request > &r)
Definition: request.C:219

◆ pull_parallel_vector_data() [2/3]

template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void libMesh::Parallel::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const datum *  example 
)

Send query vectors, receive and answer them with vectors of data, then act on those answers.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of query ids to send.

Query data which is received from other processors will be operated on by gather_data(processor_id_type pid, const std::vector<id> & ids, std::vector<datum> & data)

Answer data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<id> & ids, const std::vector<datum> & data);

The example pointer may be null; it merely needs to be of the correct type. It's just here because function overloading in C++ is easy, whereas SFINAE is hard and partial template specialization of functions is impossible.

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All communication and actions are complete when this function returns.

Definition at line 559 of file parallel_sync.h.

References pull_parallel_vector_data(), and wait().

564 {
565  std::vector<Request> requests;
566 
567  pull_parallel_vector_data(comm, queries, requests, gather_data,
568  act_on_data, example);
569 
570  wait(requests);
571 }
void wait(std::vector< Request > &r)
Definition: request.C:213
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const std::vector< datum, A > *example)

◆ pull_parallel_vector_data() [3/3]

template<typename datum , typename A , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void libMesh::Parallel::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
RequestContainer &  reqs,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const std::vector< datum, A > *  example 
)

Definition at line 580 of file parallel_sync.h.

References any_source, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::probe(), push_parallel_vector_data(), query, libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and wait().

586 {
587  typedef typename MapToVectors::mapped_type query_type;
588 
589  std::map<processor_id_type, std::vector<std::vector<datum,A>>>
590  response_data;
591  std::vector<Request> response_reqs;
592 
593  // We'll grab a tag so we can overlap request sends and receives
594  // without confusing one for the other
595  MessageTag tag = comm.get_unique_tag(105);
596 
597  auto gather_functor =
598  [&comm, &gather_data, &act_on_data,
599  &response_data, &response_reqs, &tag]
600  (processor_id_type pid, query_type query)
601  {
602  gather_data(pid, query, response_data[pid]);
603  libmesh_assert_equal_to(query.size(),
604  response_data[pid].size());
605 
606  // Just act on data if the user requested a send-to-self
607  if (pid == comm.rank())
608  {
609  act_on_data(pid, query, response_data[pid]);
610  }
611  else
612  {
613  Request sendreq;
614  comm.send(pid, response_data[pid], sendreq, tag);
615  response_reqs.push_back(sendreq);
616  }
617  };
618 
619  push_parallel_vector_data (comm, queries, reqs, gather_functor);
620 
621  // Every outgoing query should now have an incoming response.
622  //
623  // Post all of the receives.
624  //
625  // Use blocking API here since we can't use the pre-sized
626  // non-blocking APIs with this data type.
627  //
628  // FIXME - implement Derek's API from #1684, switch to that!
629  std::vector<Request> receive_reqs;
630  std::vector<processor_id_type> receive_procids;
631  for (std::size_t i = 0,
632  n_queries = queries.size() - queries.count(comm.rank());
633  i != n_queries; ++i)
634  {
635  Status stat(comm.probe(any_source, tag));
636  const processor_id_type
637  proc_id = cast_int<processor_id_type>(stat.source());
638 
639  std::vector<std::vector<datum,A>> received_data;
640  comm.receive(proc_id, received_data, tag);
641 
642  libmesh_assert(queries.count(proc_id));
643  auto & querydata = queries.at(proc_id);
644  libmesh_assert_equal_to(querydata.size(), received_data.size());
645  act_on_data(proc_id, querydata, received_data);
646  }
647 
648  wait(response_reqs);
649 }
void wait(std::vector< Request > &r)
Definition: request.C:213
const unsigned int any_source
Definition: communicator.h:70
uint8_t processor_id_type
Definition: id_types.h:99
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
query_obj query

◆ push_parallel_vector_data() [1/6]

template<typename MapToVectors , typename RequestContainer , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapToVectors &  data,
RequestContainer &  reqs,
ActionFunctor &  act_on_data 
)

Send and receive and act on vectors of data.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of data to send.

Data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<datum> & data)

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All receives and actions are completed before this function returns.

Not all sends may have yet completed. The supplied container of Request objects, req, has more requests inserted, one for each of the data sends. These requests must be waited on before the data map is deleted.

Definition at line 239 of file parallel_sync.h.

References libMesh::Parallel::Communicator::alltoall(), data, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), libMesh::Parallel::Communicator::size(), and waitany().

Referenced by libMesh::DofMap::allgather_recursive_constraints(), libMesh::Parallel::Sort< KeyType, IdxType >::communicate_bins(), libMesh::MeshTools::correct_node_proc_ids(), pull_parallel_vector_data(), push_parallel_vector_data(), and libMesh::DofMap::scatter_constraints().

243 {
244  // This function must be run on all processors at once
245  libmesh_parallel_only(comm);
246 
247  processor_id_type num_procs = comm.size();
248 
249  // Size of vectors to send to each procesor
250  std::vector<std::size_t> will_send_to(num_procs, 0);
251  processor_id_type num_sends = 0;
252  for (auto & datapair : data)
253  {
254  // Don't try to send anywhere that doesn't exist
255  libmesh_assert_less(datapair.first, num_procs);
256 
257  // Don't give us empty vectors to send
258  libmesh_assert_greater(datapair.second.size(), 0);
259 
260  will_send_to[datapair.first] = datapair.second.size();
261  num_sends++;
262  }
263 
264  // Tell everyone about where everyone will send to
265  comm.alltoall(will_send_to);
266 
267  // will_send_to now represents who we'll receive from
268  // give it a good name
269  auto & will_receive_from = will_send_to;
270 
271  // This function only works for "flat" data that we can pre-size
272  // receive buffers for: a map to vectors-of-standard-types, not e.g.
273  // vectors-of-vectors.
274  //
275  // Trying to instantiate a StandardType<T> gives us a compiler error
276  // where otherwise we would have had a runtime error.
277  //
278  // Creating a StandardType<T> manually also saves our APIs from
279  // having to do a bunch of automatic creations later.
280  //
281  // This object will be free'd before all non-blocking communications
282  // complete, but the MPI standard for MPI_Type_free specifies "Any
283  // communication that is currently using this datatype will
284  // complete normally." so we're cool.
285  typedef decltype(data.begin()->second.front()) ref_type;
286  typedef typename std::remove_reference<ref_type>::type nonref_type;
287  StandardType<typename std::remove_const<nonref_type>::type> datatype;
288 
289  // We'll grab a tag so we can overlap request sends and receives
290  // without confusing one for the other
291  MessageTag tag = comm.get_unique_tag(1225);
292 
293  MapToVectors received_data;
294 
295  // Post all of the sends, non-blocking
296  for (auto & datapair : data)
297  {
298  processor_id_type destid = datapair.first;
299  libmesh_assert_less(destid, num_procs);
300  auto & datum = datapair.second;
301 
302  // Just act on data if the user requested a send-to-self
303  if (destid == comm.rank())
304  act_on_data(destid, datum);
305  else
306  {
307  Request sendreq;
308  comm.send(destid, datum, datatype, sendreq, tag);
309  reqs.insert(reqs.end(), sendreq);
310  }
311  }
312 
313  // Post all of the receives, non-blocking
314  std::vector<Request> receive_reqs;
315  std::vector<processor_id_type> receive_procids;
316  for (processor_id_type proc_id = 0; proc_id < num_procs; proc_id++)
317  if (will_receive_from[proc_id] && proc_id != comm.rank())
318  {
319  Request req;
320  auto & incoming_data = received_data[proc_id];
321  incoming_data.resize(will_receive_from[proc_id]);
322  comm.receive(proc_id, incoming_data, datatype, req, tag);
323  receive_reqs.push_back(req);
324  receive_procids.push_back(proc_id);
325  }
326 
327  while(receive_reqs.size())
328  {
329  std::size_t completed = waitany(receive_reqs);
330  processor_id_type proc_id = receive_procids[completed];
331  receive_reqs.erase(receive_reqs.begin() + completed);
332  receive_procids.erase(receive_procids.begin() + completed);
333 
334  act_on_data(proc_id, received_data[proc_id]);
335  received_data.erase(proc_id);
336  }
337 }
uint8_t processor_id_type
Definition: id_types.h:99
std::size_t waitany(std::vector< Request > &r)
Definition: request.C:219
IterBase * data

◆ push_parallel_vector_data() [2/6]

template<typename MapToVectors , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapToVectors &  data,
ActionFunctor &  act_on_data 
)

Send and receive and act on vectors of data.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of data to send.

Data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<datum> & data);

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All communication and actions are complete when this function returns.

Definition at line 435 of file parallel_sync.h.

References data, push_parallel_vector_data(), and wait().

438 {
439  std::vector<Request> requests;
440 
441  push_parallel_vector_data(comm, data, requests, act_on_data);
442 
443  wait(requests);
444 }
void wait(std::vector< Request > &r)
Definition: request.C:213
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
IterBase * data

◆ push_parallel_vector_data() [3/6]

template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
RequestContainer &  reqs,
ActionFunctor &  act_on_data 
)

Definition at line 348 of file parallel_sync.h.

References libMesh::Parallel::Communicator::alltoall(), any_source, data, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::probe(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and libMesh::Parallel::Communicator::size().

352 {
353  // This function must be run on all processors at once
354  libmesh_parallel_only(comm);
355 
356  processor_id_type num_procs = comm.size();
357 
358  // Size of vectors to send to each procesor
359  std::vector<std::size_t> will_send_to(num_procs, 0);
360  processor_id_type num_sends = 0;
361  for (auto & datapair : data)
362  {
363  // Don't try to send anywhere that doesn't exist
364  libmesh_assert_less(datapair.first, num_procs);
365 
366  // Don't give us empty vectors to send
367  libmesh_assert_greater(datapair.second.size(), 0);
368 
369  will_send_to[datapair.first] = datapair.second.size();
370  num_sends++;
371  }
372 
373  // Tell everyone about where everyone will send to
374  comm.alltoall(will_send_to);
375 
376  // will_send_to now represents who we'll receive from
377  // give it a good name
378  auto & will_receive_from = will_send_to;
379 
380  processor_id_type n_receives = 0;
381  for (processor_id_type proc_id = 0; proc_id < num_procs; proc_id++)
382  if (will_receive_from[proc_id])
383  n_receives++;
384 
385  // We'll construct a datatype once for repeated use
386  StandardType<ValueType> datatype;
387 
388  // We'll grab a tag so we can overlap request sends and receives
389  // without confusing one for the other
390  MessageTag tag = comm.get_unique_tag(1225);
391 
392  // Post all of the sends, non-blocking
393  for (auto & datapair : data)
394  {
395  processor_id_type destid = datapair.first;
396  libmesh_assert_less(destid, num_procs);
397  auto & datum = datapair.second;
398 
399  // Just act on data if the user requested a send-to-self
400  if (destid == comm.rank())
401  {
402  act_on_data(destid, datum);
403  n_receives--;
404  }
405  else
406  {
407  Request sendreq;
408  comm.send(destid, datum, datatype, sendreq, tag);
409  reqs.insert(reqs.end(), sendreq);
410  }
411  }
412 
413  // Post all of the receives.
414  //
415  // Use blocking API here since we can't use the pre-sized
416  // non-blocking APIs with this data type.
417  //
418  // FIXME - implement Derek's API from #1684, switch to that!
419  for (processor_id_type i = 0; i != n_receives; ++i)
420  {
421  Status stat(comm.probe(any_source, tag));
422  const processor_id_type
423  proc_id = cast_int<processor_id_type>(stat.source());
424 
425  std::vector<std::vector<ValueType,A1>,A2> received_data;
426  comm.receive(proc_id, received_data, datatype, tag);
427  act_on_data(proc_id, received_data);
428  }
429 }
const unsigned int any_source
Definition: communicator.h:70
uint8_t processor_id_type
Definition: id_types.h:99
IterBase * data

◆ push_parallel_vector_data() [4/6]

template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
ActionFunctor &  act_on_data 
)

Definition at line 454 of file parallel_sync.h.

References data, push_parallel_vector_data(), and wait().

457 {
458  std::vector<Request> requests;
459 
460  push_parallel_vector_data(comm, data, requests, act_on_data);
461 
462  wait(requests);
463 }
void wait(std::vector< Request > &r)
Definition: request.C:213
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
IterBase * data

◆ push_parallel_vector_data() [5/6]

template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
RequestContainer &  reqs,
ActionFunctor &  act_on_data 
)

Definition at line 348 of file parallel_sync.h.

References libMesh::Parallel::Communicator::alltoall(), any_source, data, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::probe(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and libMesh::Parallel::Communicator::size().

352 {
353  // This function must be run on all processors at once
354  libmesh_parallel_only(comm);
355 
356  processor_id_type num_procs = comm.size();
357 
358  // Size of vectors to send to each procesor
359  std::vector<std::size_t> will_send_to(num_procs, 0);
360  processor_id_type num_sends = 0;
361  for (auto & datapair : data)
362  {
363  // Don't try to send anywhere that doesn't exist
364  libmesh_assert_less(datapair.first, num_procs);
365 
366  // Don't give us empty vectors to send
367  libmesh_assert_greater(datapair.second.size(), 0);
368 
369  will_send_to[datapair.first] = datapair.second.size();
370  num_sends++;
371  }
372 
373  // Tell everyone about where everyone will send to
374  comm.alltoall(will_send_to);
375 
376  // will_send_to now represents who we'll receive from
377  // give it a good name
378  auto & will_receive_from = will_send_to;
379 
380  processor_id_type n_receives = 0;
381  for (processor_id_type proc_id = 0; proc_id < num_procs; proc_id++)
382  if (will_receive_from[proc_id])
383  n_receives++;
384 
385  // We'll construct a datatype once for repeated use
386  StandardType<ValueType> datatype;
387 
388  // We'll grab a tag so we can overlap request sends and receives
389  // without confusing one for the other
390  MessageTag tag = comm.get_unique_tag(1225);
391 
392  // Post all of the sends, non-blocking
393  for (auto & datapair : data)
394  {
395  processor_id_type destid = datapair.first;
396  libmesh_assert_less(destid, num_procs);
397  auto & datum = datapair.second;
398 
399  // Just act on data if the user requested a send-to-self
400  if (destid == comm.rank())
401  {
402  act_on_data(destid, datum);
403  n_receives--;
404  }
405  else
406  {
407  Request sendreq;
408  comm.send(destid, datum, datatype, sendreq, tag);
409  reqs.insert(reqs.end(), sendreq);
410  }
411  }
412 
413  // Post all of the receives.
414  //
415  // Use blocking API here since we can't use the pre-sized
416  // non-blocking APIs with this data type.
417  //
418  // FIXME - implement Derek's API from #1684, switch to that!
419  for (processor_id_type i = 0; i != n_receives; ++i)
420  {
421  Status stat(comm.probe(any_source, tag));
422  const processor_id_type
423  proc_id = cast_int<processor_id_type>(stat.source());
424 
425  std::vector<std::vector<ValueType,A1>,A2> received_data;
426  comm.receive(proc_id, received_data, datatype, tag);
427  act_on_data(proc_id, received_data);
428  }
429 }
const unsigned int any_source
Definition: communicator.h:70
uint8_t processor_id_type
Definition: id_types.h:99
IterBase * data

◆ push_parallel_vector_data() [6/6]

template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
ActionFunctor &  act_on_data 
)

Definition at line 454 of file parallel_sync.h.

References data, push_parallel_vector_data(), and wait().

457 {
458  std::vector<Request> requests;
459 
460  push_parallel_vector_data(comm, data, requests, act_on_data);
461 
462  wait(requests);
463 }
void wait(std::vector< Request > &r)
Definition: request.C:213
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
IterBase * data

◆ sync_dofobject_data_by_id() [1/2]

template<typename Iterator , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_id ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
SyncFunctor &  sync 
)

Request data about a range of ghost dofobjects uniquely identified by their id. Fulfill requests with sync.gather_data(const std::vector<dof_id_type> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<dof_id_type> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 336 of file parallel_ghost_sync.h.

Referenced by libMesh::Partitioner::_find_global_index_by_pid_map(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshCommunication::make_elems_parallel_consistent(), libMesh::MeshRefinement::make_flags_parallel_consistent(), libMesh::MeshCommunication::make_node_unique_ids_parallel_consistent(), libMesh::MeshCommunication::make_p_levels_parallel_consistent(), libMesh::FEMSystem::mesh_position_set(), libMesh::LaplaceMeshSmoother::smooth(), and libMesh::MeshRefinement::uniformly_coarsen().

340 {
341  sync_dofobject_data_by_id(comm, range_begin, range_end, SyncEverything(), sync);
342 }
void sync_dofobject_data_by_id(const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, const DofObjectCheckFunctor &dofobj_check, SyncFunctor &sync)

◆ sync_dofobject_data_by_id() [2/2]

template<typename Iterator , typename DofObjectCheckFunctor , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_id ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
const DofObjectCheckFunctor &  dofobj_check,
SyncFunctor &  sync 
)

Request data about a range of ghost dofobjects uniquely identified by their id.

Elements within the range can be excluded from the request by returning false from dofobj_check(dof_object)

Definition at line 347 of file parallel_ghost_sync.h.

References data, libMesh::DofObject::id(), libMesh::DofObject::invalid_processor_id, libMesh::DofObject::processor_id(), pull_parallel_vector_data(), libMesh::Parallel::Communicator::rank(), and libMesh::Parallel::Communicator::size().

352 {
353  // This function must be run on all processors at once
354  libmesh_parallel_only(comm);
355 
356  // Count the objects to ask each processor about
357  std::vector<dof_id_type>
358  ghost_objects_from_proc(comm.size(), 0);
359 
360  for (Iterator it = range_begin; it != range_end; ++it)
361  {
362  DofObject * obj = *it;
363  libmesh_assert (obj);
364 
365  // We may want to pass Elem* or Node* to the check function, not
366  // just DofObject*
367  if (!dofobj_check(*it))
368  continue;
369 
370  processor_id_type obj_procid = obj->processor_id();
371  if (obj_procid != DofObject::invalid_processor_id)
372  ghost_objects_from_proc[obj_procid]++;
373  }
374 
375  // Request sets to send to each processor
376  std::map<processor_id_type, std::vector<dof_id_type>>
377  requested_objs_id;
378 
379  // We know how many objects live on each processor, so reserve()
380  // space for each.
381  for (processor_id_type p=0; p != comm.size(); ++p)
382  if (p != comm.rank() && ghost_objects_from_proc[p])
383  requested_objs_id[p].reserve(ghost_objects_from_proc[p]);
384 
385  for (Iterator it = range_begin; it != range_end; ++it)
386  {
387  DofObject * obj = *it;
388 
389  if (!dofobj_check(*it))
390  continue;
391 
392  processor_id_type obj_procid = obj->processor_id();
393  if (obj_procid == comm.rank() ||
394  obj_procid == DofObject::invalid_processor_id)
395  continue;
396 
397  requested_objs_id[obj_procid].push_back(obj->id());
398  }
399 
400  auto gather_functor =
401  [&sync]
402  (processor_id_type, const std::vector<dof_id_type> & ids,
403  std::vector<typename SyncFunctor::datum> & data)
404  {
405  sync.gather_data(ids, data);
406  };
407 
408  auto action_functor =
409  [&sync]
410  (processor_id_type, const std::vector<dof_id_type> & ids,
411  const std::vector<typename SyncFunctor::datum> & data)
412  {
413  // Let the user process the results
414  sync.act_on_data(ids, data);
415  };
416 
417  // Trade requests with other processors
418  typename SyncFunctor::datum * ex = nullptr;
420  (comm, requested_objs_id, gather_functor, action_functor, ex);
421 }
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
IterBase * data

◆ sync_dofobject_data_by_xyz() [1/2]

template<typename Iterator , typename DofObjType , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_xyz ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
LocationMap< DofObjType > *  location_map,
SyncFunctor &  sync 
)

Request data about a range of ghost nodes uniquely identified by their xyz location or a range of active ghost elements uniquely identified by their centroids' xyz location. Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type. The user-provided location_map will be used and left unchanged if it is provided, or filled and cleared if it is empty.

◆ sync_dofobject_data_by_xyz() [2/2]

template<typename Iterator , typename DofObjType , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_xyz ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
LocationMap< DofObjType > &  location_map,
SyncFunctor &  sync 
)

Definition at line 232 of file parallel_ghost_sync.h.

References data, libMesh::LocationMap< T >::empty(), libMesh::LocationMap< T >::find(), libMesh::DofObject::invalid_processor_id, libMesh::Parallel::Communicator::max(), libMesh::LocationMap< T >::point_of(), pull_parallel_vector_data(), libMesh::Parallel::Communicator::rank(), and libMesh::Parallel::Communicator::size().

237 {
238  // This function must be run on all processors at once
239  libmesh_parallel_only(comm);
240 
241  // We need a valid location_map
242 #ifdef DEBUG
243  bool need_map_update = (range_begin != range_end && location_map.empty());
244  comm.max(need_map_update);
245  libmesh_assert(!need_map_update);
246 #endif
247 
248  // Count the objects to ask each processor about
249  std::vector<dof_id_type>
250  ghost_objects_from_proc(comm.size(), 0);
251 
252  for (Iterator it = range_begin; it != range_end; ++it)
253  {
254  DofObjType * obj = *it;
255  libmesh_assert (obj);
256  processor_id_type obj_procid = obj->processor_id();
257  if (obj_procid != DofObject::invalid_processor_id)
258  ghost_objects_from_proc[obj_procid]++;
259  }
260 
261  // Request sets to send to each processor
262  std::map<processor_id_type, std::vector<Point>>
263  requested_objs_pt;
264  // Corresponding ids to keep track of
265  std::map<processor_id_type, std::vector<dof_id_type>>
266  requested_objs_id;
267 
268  // We know how many objects live on each processor, so reserve()
269  // space for each.
270  for (processor_id_type p=0; p != comm.size(); ++p)
271  if (p != comm.rank() && ghost_objects_from_proc[p])
272  {
273  requested_objs_pt[p].reserve(ghost_objects_from_proc[p]);
274  requested_objs_id[p].reserve(ghost_objects_from_proc[p]);
275  }
276 
277  for (Iterator it = range_begin; it != range_end; ++it)
278  {
279  DofObjType * obj = *it;
280  processor_id_type obj_procid = obj->processor_id();
281  if (obj_procid == comm.rank() ||
282  obj_procid == DofObject::invalid_processor_id)
283  continue;
284 
285  Point p = location_map.point_of(*obj);
286  requested_objs_pt[obj_procid].push_back(p);
287  requested_objs_id[obj_procid].push_back(obj->id());
288  }
289 
290  auto gather_functor =
291  [&location_map, &sync]
292  (processor_id_type /*pid*/, const std::vector<Point> & pts,
293  std::vector<typename SyncFunctor::datum> & data)
294  {
295  // Find the local id of each requested object
296  std::size_t query_size = pts.size();
297  std::vector<dof_id_type> query_id(query_size);
298  for (std::size_t i=0; i != query_size; ++i)
299  {
300  Point pt = pts[i];
301 
302  // Look for this object in the multimap
303  DofObjType * obj = location_map.find(pt);
304 
305  // We'd better find every object we're asked for
306  libmesh_assert (obj);
307 
308  // Return the object's correct processor id,
309  // and our (correct if it's local) id for it.
310  query_id[i] = obj->id();
311  }
312 
313  // Gather whatever data the user wants
314  sync.gather_data(query_id, data);
315  };
316 
317  auto action_functor =
318  [&sync, &requested_objs_id]
319  (processor_id_type pid, const std::vector<Point> &,
320  const std::vector<typename SyncFunctor::datum> & data)
321  {
322  // Let the user process the results
323  sync.act_on_data(requested_objs_id[pid], data);
324  };
325 
326  // Trade requests with other processors
327  typename SyncFunctor::datum * ex = nullptr;
329  (comm, requested_objs_pt, gather_functor, action_functor, ex);
330 }
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
IterBase * data

◆ sync_element_data_by_parent_id()

template<typename Iterator , typename SyncFunctor >
void libMesh::Parallel::sync_element_data_by_parent_id ( MeshBase mesh,
const Iterator &  range_begin,
const Iterator &  range_end,
SyncFunctor &  sync 
)

Request data about a range of ghost elements uniquely identified by their parent id and which child they are. Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 429 of file parallel_ghost_sync.h.

References libMesh::Elem::active(), libMesh::Elem::child_ptr(), data, libMesh::Elem::has_children(), libMesh::DofObject::id(), libMesh::DofObject::invalid_processor_id, mesh, libMesh::Elem::parent(), libMesh::DofObject::processor_id(), pull_parallel_vector_data(), and libMesh::Elem::which_child_am_i().

Referenced by libMesh::MeshCommunication::make_elems_parallel_consistent().

433 {
434  const Communicator & comm (mesh.comm());
435 
436  // This function must be run on all processors at once
437  libmesh_parallel_only(comm);
438 
439  // Count the objects to ask each processor about
440  std::vector<dof_id_type>
441  ghost_objects_from_proc(comm.size(), 0);
442 
443  for (Iterator it = range_begin; it != range_end; ++it)
444  {
445  Elem * elem = *it;
446  processor_id_type obj_procid = elem->processor_id();
447  if (obj_procid == comm.rank() ||
448  obj_procid == DofObject::invalid_processor_id)
449  continue;
450  const Elem * parent = elem->parent();
451  if (!parent || !elem->active())
452  continue;
453 
454  ghost_objects_from_proc[obj_procid]++;
455  }
456 
457  // Request sets to send to each processor
458  std::map<processor_id_type, std::vector<dof_id_type>>
459  requested_objs_id;
460  std::map<processor_id_type, std::vector<std::pair<dof_id_type,unsigned char>>>
461  requested_objs_parent_id_child_num;
462 
463  // We know how many objects live on each processor, so reserve()
464  // space for each.
465  for (processor_id_type p=0; p != comm.size(); ++p)
466  if (p != comm.rank() && ghost_objects_from_proc[p])
467  {
468  requested_objs_id[p].reserve(ghost_objects_from_proc[p]);
469  requested_objs_parent_id_child_num[p].reserve(ghost_objects_from_proc[p]);
470  }
471 
472  for (Iterator it = range_begin; it != range_end; ++it)
473  {
474  Elem * elem = *it;
475  processor_id_type obj_procid = elem->processor_id();
476  if (obj_procid == comm.rank() ||
477  obj_procid == DofObject::invalid_processor_id)
478  continue;
479  const Elem * parent = elem->parent();
480  if (!parent || !elem->active())
481  continue;
482 
483  requested_objs_id[obj_procid].push_back(elem->id());
484  requested_objs_parent_id_child_num[obj_procid].push_back
485  (std::make_pair
486  (parent->id(),
487  cast_int<unsigned char>
488  (parent->which_child_am_i(elem))));
489  }
490 
491  auto gather_functor =
492  [&mesh, &sync]
494  const std::vector<std::pair<dof_id_type, unsigned char>> & parent_id_child_num,
495  std::vector<typename SyncFunctor::datum> & data)
496  {
497  // Find the id of each requested element
498  std::size_t query_size = parent_id_child_num.size();
499  std::vector<dof_id_type> query_id(query_size);
500  for (std::size_t i=0; i != query_size; ++i)
501  {
502  Elem & parent = mesh.elem_ref(parent_id_child_num[i].first);
503  libmesh_assert(parent.has_children());
504  Elem * child = parent.child_ptr(parent_id_child_num[i].second);
505  libmesh_assert(child);
506  libmesh_assert(child->active());
507  query_id[i] = child->id();
508  }
509 
510  // Gather whatever data the user wants
511  sync.gather_data(query_id, data);
512  };
513 
514  auto action_functor =
515  [&sync, &requested_objs_id]
516  (processor_id_type pid,
517  const std::vector<std::pair<dof_id_type, unsigned char>> &,
518  const std::vector<typename SyncFunctor::datum> & data)
519  {
520  // Let the user process the results
521  sync.act_on_data(requested_objs_id[pid], data);
522  };
523 
524  // Trade requests with other processors
525  typename SyncFunctor::datum * ex = nullptr;
527  (comm, requested_objs_parent_id_child_num, gather_functor,
528  action_functor, ex);
529 }
MeshBase & mesh
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
IterBase * data

◆ sync_node_data_by_element_id()

template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
void libMesh::Parallel::sync_node_data_by_element_id ( MeshBase mesh,
const MeshBase::const_element_iterator range_begin,
const MeshBase::const_element_iterator range_end,
const ElemCheckFunctor &  elem_check,
const NodeCheckFunctor &  node_check,
SyncFunctor &  sync 
)

Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, iterating until data is completely in sync and futher synchronization passes cause no changes.

Imagine a vertex surrounded by triangles, each on a different processor, with a ghosting policy that include only face neighbors and not point neighbors. Then the only way for authoritative information to trickle out from that vertex is by being passed along, one neighbor at a time, to processors who mostly don't even see the node's true owner!

Data for all nodes connected to elements in the given range of element iterators will be requested.

Elements can be further excluded from the request by returning false from element_check(elem)

Nodes can be further excluded from the request by returning false from node_check(elem, local_node_num)

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with bool sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) and return true iff the response changed any data.

The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 752 of file parallel_ghost_sync.h.

References mesh, and sync_node_data_by_element_id_once().

Referenced by libMesh::MeshCommunication::make_new_node_proc_ids_parallel_consistent(), libMesh::MeshCommunication::make_node_ids_parallel_consistent(), and libMesh::MeshCommunication::make_node_proc_ids_parallel_consistent().

758 {
759  // This function must be run on all processors at once
760  libmesh_parallel_only(mesh.comm());
761 
762  bool need_sync = false;
763 
764  do
765  {
766  need_sync =
768  (mesh, range_begin, range_end, elem_check, node_check,
769  sync);
770  } while (need_sync);
771 }
MeshBase & mesh
bool sync_node_data_by_element_id_once(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)

◆ sync_node_data_by_element_id_once()

template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
bool libMesh::Parallel::sync_node_data_by_element_id_once ( MeshBase mesh,
const MeshBase::const_element_iterator range_begin,
const MeshBase::const_element_iterator range_end,
const ElemCheckFunctor &  elem_check,
const NodeCheckFunctor &  node_check,
SyncFunctor &  sync 
)

Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, assuming a single synchronization pass is necessary.

Data for all nodes connected to elements in the given range of element iterators will be requested.

Elements can be further excluded from the request by returning false from element_check(elem)

Nodes can be further excluded from the request by returning false from node_check(elem, local_node_num)

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with bool sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) and return true iff the response changed any data.

The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

This method returns true iff the sync pass changed any data on any processor.

Definition at line 546 of file parallel_ghost_sync.h.

References libMesh::as_range(), data, libMesh::DofObject::id(), libMesh::DofObject::invalid_id, libMesh::DofObject::invalid_processor_id, mesh, libMesh::Elem::n_nodes(), libMesh::Elem::node_ref(), libMesh::DofObject::processor_id(), and pull_parallel_vector_data().

Referenced by libMesh::MeshCommunication::make_new_node_proc_ids_parallel_consistent(), and sync_node_data_by_element_id().

552 {
553  const Communicator & comm (mesh.comm());
554 
555  // Count the objects to ask each processor about
556  std::vector<dof_id_type>
557  ghost_objects_from_proc(comm.size(), 0);
558 
559  for (const auto & elem : as_range(range_begin, range_end))
560  {
561  libmesh_assert (elem);
562 
563  if (!elem_check(elem))
564  continue;
565 
566  const processor_id_type proc_id = elem->processor_id();
567 
568  bool i_have_elem =
569  (proc_id == comm.rank() ||
570  proc_id == DofObject::invalid_processor_id);
571 
572  if (elem->active() && i_have_elem)
573  continue;
574 
575  for (auto n : elem->node_index_range())
576  {
577  if (!node_check(elem, n))
578  continue;
579 
580  const processor_id_type node_pid =
581  elem->node_ref(n).processor_id();
582 
583  if (i_have_elem && (node_pid == comm.rank()))
584  continue;
585 
586  if (i_have_elem)
587  {
588  libmesh_assert_not_equal_to
589  (node_pid, DofObject::invalid_processor_id);
590  ghost_objects_from_proc[node_pid]++;
591  }
592  else
593  {
594  const processor_id_type request_pid =
595  (node_pid == DofObject::invalid_processor_id) ?
596  proc_id : node_pid;
597  ghost_objects_from_proc[request_pid]++;
598  }
599  }
600  }
601 
602  // Now repeat that iteration, filling request sets this time.
603 
604  // Request sets to send to each processor
605  std::map<processor_id_type, std::vector<std::pair<dof_id_type, unsigned char>>>
606  requested_objs_elem_id_node_num;
607 
608  // We know how many objects live on each processor, so reserve()
609  // space for each.
610  for (processor_id_type p=0; p != comm.size(); ++p)
611  if (p != comm.rank() && ghost_objects_from_proc[p])
612  {
613  requested_objs_elem_id_node_num[p].reserve(ghost_objects_from_proc[p]);
614  }
615 
616  for (const auto & elem : as_range(range_begin, range_end))
617  {
618  libmesh_assert (elem);
619 
620  if (!elem_check(elem))
621  continue;
622 
623  const processor_id_type proc_id = elem->processor_id();
624 
625  bool i_have_elem =
626  (proc_id == comm.rank() ||
627  proc_id == DofObject::invalid_processor_id);
628 
629  if (elem->active() && i_have_elem)
630  continue;
631 
632  const dof_id_type elem_id = elem->id();
633 
634  for (auto n : elem->node_index_range())
635  {
636  if (!node_check(elem, n))
637  continue;
638 
639  const Node & node = elem->node_ref(n);
640  const processor_id_type node_pid = node.processor_id();
641 
642  if (i_have_elem && (node_pid == comm.rank()))
643  continue;
644 
645  if (i_have_elem)
646  {
647  libmesh_assert_not_equal_to
648  (node_pid, DofObject::invalid_processor_id);
649  requested_objs_elem_id_node_num[node_pid].push_back
650  (std::make_pair
651  (elem_id,
652  cast_int<unsigned char>(n)));
653  }
654  else
655  {
656  const processor_id_type request_pid =
657  (node_pid == DofObject::invalid_processor_id) ?
658  proc_id : node_pid;
659  requested_objs_elem_id_node_num[request_pid].push_back
660  (std::make_pair
661  (elem_id,
662  cast_int<unsigned char>(n)));
663  }
664  }
665  }
666 
667  auto gather_functor =
668  [&mesh, &sync]
670  const std::vector<std::pair<dof_id_type, unsigned char>> & elem_id_node_num,
671  std::vector<typename SyncFunctor::datum> & data)
672  {
673  // Find the id of each requested element
674  std::size_t request_size = elem_id_node_num.size();
675  std::vector<dof_id_type> query_id(request_size);
676  for (std::size_t i=0; i != request_size; ++i)
677  {
678  // We might now get queries about remote elements, in which
679  // case we'll have to ignore them and wait for the query
680  // answer to filter to the querier via another source.
681  const Elem * elem = mesh.query_elem_ptr(elem_id_node_num[i].first);
682 
683  if (elem)
684  {
685  const unsigned int n = elem_id_node_num[i].second;
686  libmesh_assert_less (n, elem->n_nodes());
687 
688  const Node & node = elem->node_ref(n);
689 
690  // This isn't a safe assertion in the case where we're
691  // syncing processor ids
692  // libmesh_assert_equal_to (node->processor_id(), comm.rank());
693 
694  query_id[i] = node.id();
695  }
696  else
697  query_id[i] = DofObject::invalid_id;
698  }
699 
700  // Gather whatever data the user wants
701  sync.gather_data(query_id, data);
702  };
703 
704  bool data_changed = false;
705 
706  auto action_functor =
707  [&sync, &mesh, &requested_objs_elem_id_node_num, &data_changed]
708  (processor_id_type pid,
709  const std::vector<std::pair<dof_id_type, unsigned char>> &,
710  const std::vector<typename SyncFunctor::datum> & data)
711  {
712  const auto & elem_id_node_num =
713  requested_objs_elem_id_node_num[pid];
714 
715  const std::size_t data_size = data.size();
716 
717  libmesh_assert_equal_to(elem_id_node_num.size(), data_size);
718 
719  std::vector<dof_id_type> requested_objs_id(data.size());
720 
721  for (auto i : IntRange<std::size_t>(0,data_size))
722  {
723  const Elem & elem = mesh.elem_ref(elem_id_node_num[i].first);
724  const Node & node = elem.node_ref(elem_id_node_num[i].second);
725  requested_objs_id[i] = node.id();
726  }
727 
728  // Let the user process the results. If any of the results
729  // were different than what the user expected, then we may
730  // need to sync again just in case this processor has to
731  // pass on the changes to yet another processor.
732  if (sync.act_on_data(requested_objs_id, data))
733  data_changed = true;
734  };
735 
736  // Trade requests with other processors
737  typename SyncFunctor::datum * ex = nullptr;
739  (comm, requested_objs_elem_id_node_num, gather_functor,
740  action_functor, ex);
741 
742  comm.max(data_changed);
743 
744  return data_changed;
745 }
MeshBase & mesh
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
SimpleRange< I > as_range(const std::pair< I, I > &p)
Definition: simple_range.h:57
IterBase * data
uint8_t dof_id_type
Definition: id_types.h:64

◆ unpack_range() [1/2]

template<typename Context , typename buffertype , typename OutputIter , typename T >
void libMesh::Parallel::unpack_range ( const typename std::vector< buffertype > &  buffer,
Context *  context,
OutputIter  out,
const T *  output_type 
)
inline

◆ unpack_range() [2/2]

template<typename Context , typename buffertype , typename OutputIter , typename T >
void libMesh::Parallel::unpack_range ( const std::vector< buffertype > &  buffer,
Context *  context,
OutputIter  out_iter,
const T *   
)
inline

Helper function for range unpacking

Definition at line 194 of file packing.h.

References libMesh::Parallel::Packing< T >::packed_size(), and libMesh::Parallel::Packing< T >::unpack().

198 {
199  // Loop through the buffer and unpack each object, returning the
200  // object pointer via the output iterator
201  typename std::vector<buffertype>::const_iterator
202  next_object_start = buffer.begin();
203 
204  while (next_object_start < buffer.end())
205  {
206  *out_iter++ = Parallel::Packing<T>::unpack(next_object_start, context);
207  next_object_start +=
208  Parallel::Packing<T>::packed_size(next_object_start);
209  }
210 
211  // We should have used up the exact amount of data in the buffer
212  libmesh_assert (next_object_start == buffer.end());
213 }

◆ wait() [1/2]

Status libMesh::Parallel::wait ( Request r)
inline

Wait for a non-blocking send or receive to finish

Definition at line 129 of file request.h.

References libMesh::Parallel::Request::wait().

129 { return r.wait(); }

◆ wait() [2/2]

◆ waitany()

std::size_t libMesh::Parallel::waitany ( std::vector< Request > &  r)

Wait for at least one non-blocking operation to finish. Return the index of the request which completed.

Definition at line 219 of file request.C.

References libMesh::Parallel::Request::_prior_request, libMesh::Parallel::Request::get(), libMesh::MeshTools::Subdivision::next, and libMesh::Parallel::Request::post_wait_work.

Referenced by pull_parallel_vector_data(), and push_parallel_vector_data().

220 {
221  libmesh_assert(!r.empty());
222 
223  int index = 0;
224  int r_size = cast_int<int>(r.size());
225  std::vector<request> raw(r_size);
226  for (int i=0; i != r_size; ++i)
227  {
228  Request * root = &r[i];
229  // If we have prior requests, we need to complete the first one
230  // first
231  while (root->_prior_request.get())
232  root = root->_prior_request.get();
233  raw[i] = *root->get();
234  }
235 
236  bool only_priors_completed = false;
237 
238  Request * next;
239 
240  do
241  {
242 #ifdef LIBMESH_HAVE_MPI
243  libmesh_call_mpi
244  (MPI_Waitany(r_size, raw.data(), &index, MPI_STATUS_IGNORE));
245 #endif
246 
247  Request * completed = &r[index];
248  next = completed;
249 
250  // If we completed a prior request, we're not really done yet,
251  // so find the next in that line to try again.
252  while (completed->_prior_request.get())
253  {
254  only_priors_completed = true;
255  next = completed;
256  completed = completed->_prior_request.get();
257  }
258 
259  // Do any post-wait work for the completed request
260  if (completed->post_wait_work)
261  for (auto & item : completed->post_wait_work->first)
262  {
263  // The user should never try to give us non-existent work or try
264  // to wait() twice.
265  libmesh_assert (item);
266  item->run();
267  delete item;
268  item = nullptr;
269  }
270 
271  next->_prior_request.reset(nullptr);
272  raw[index] = *next->get();
273 
274  } while(only_priors_completed);
275 
276  return index;
277 }
static const unsigned int next[3]

Variable Documentation

◆ any_source

◆ any_tag

const MessageTag libMesh::Parallel::any_tag = MessageTag(MPI_ANY_TAG)

Default message tag ids

Definition at line 115 of file message_tag.h.

◆ no_tag

const MessageTag libMesh::Parallel::no_tag = MessageTag(0)

Definition at line 120 of file message_tag.h.