42 template <
typename KeyType,
typename IdxType>
44 std::vector<KeyType> & d) :
48 _bin_is_sorted(false),
59 template <
typename KeyType,
typename IdxType>
65 IdxType global_data_size = cast_int<IdxType>(_data.size());
67 this->comm().sum (global_data_size);
69 if (global_data_size < 2)
75 this->comm().allgather (static_cast<IdxType>(_my_bin.size()),
80 if (this->n_processors() > 1)
83 this->communicate_bins();
88 this->sort_local_bin();
92 _bin_is_sorted =
true;
97 template <
typename KeyType,
typename IdxType>
102 std::vector<KeyType> global_min_max(2);
105 global_min_max[0] = -_data.front();
106 global_min_max[1] = _data.back();
110 this->comm().max(global_min_max);
113 global_min_max[0] *= -1;
117 bs.
binsort(_n_procs, global_min_max[1], global_min_max[0]);
127 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI) 137 local_min, local_max,
138 global_min, global_max;
142 #ifdef LIBMESH_ENABLE_UNIQUE_ID 143 local_min.first.rack0 = local_min.first.rack1 = local_min.first.rack2 =
static_cast<Hilbert::inttype
>(-1);
145 local_max.first.rack0 = local_max.first.rack1 = local_max.first.rack2 = 0;
146 local_max.second = 0;
148 local_min.rack0 = local_min.rack1 = local_min.rack2 =
static_cast<Hilbert::inttype
>(-1);
149 local_max.rack0 = local_max.rack1 = local_max.rack2 = 0;
154 local_min = _data.front();
155 local_max = _data.back();
158 MPI_Op hilbert_max, hilbert_min;
165 MPI_Allreduce(&local_min,
172 MPI_Allreduce(&local_max,
179 MPI_Op_free (&hilbert_max);
180 MPI_Op_free (&hilbert_min);
184 bs.
binsort(_n_procs, global_max, global_min);
192 #endif // #ifdef LIBMESH_HAVE_LIBHILBERT 195 template <
typename KeyType,
typename IdxType>
198 #ifdef LIBMESH_HAVE_MPI 200 IdxType local_offset = 0;
201 std::map<processor_id_type, std::vector<KeyType> > pushed_keys, received_keys;
205 IdxType next_offset = local_offset + _local_bin_sizes[i];
206 if (_local_bin_sizes[i])
208 auto begin = _data.begin() + local_offset;
209 auto end = _data.begin() + next_offset;
210 pushed_keys[i].assign(begin,
end);
213 local_offset = next_offset;
216 auto keys_action_functor =
219 const std::vector<KeyType> & keys)
221 received_keys[pid] = keys;
225 (this->comm(), pushed_keys, keys_action_functor);
227 std::size_t my_bin_size = 0;
228 for (
auto & p : received_keys)
229 my_bin_size += p.second.size();
232 _my_bin.reserve(my_bin_size);
234 for (
auto & p : received_keys)
235 _my_bin.insert(_my_bin.end(), p.second.begin(), p.second.end());
238 std::vector<IdxType> global_bin_sizes = _local_bin_sizes;
240 this->comm().sum(global_bin_sizes);
242 libmesh_assert_equal_to
243 (global_bin_sizes[this->processor_id()], _my_bin.size());
246 #endif // LIBMESH_HAVE_MPI 251 template <
typename KeyType,
typename IdxType>
254 std::sort(_my_bin.begin(), _my_bin.end());
259 template <
typename KeyType,
typename IdxType>
264 libMesh::out <<
"Warning! Bin is not yet sorted!" << std::endl;
277 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI)
const std::vector< KeyType > & bin()
void dofobjectkey_min_op(libMesh::Parallel::DofObjectKey *in, libMesh::Parallel::DofObjectKey *inout, int *len, void *)
std::pair< Hilbert::HilbertIndices, unique_id_type > DofObjectKey
uint8_t processor_id_type
std::vector< KeyType > & _data
std::vector< IdxType > _local_bin_sizes
long double max(long double a, double b)
Tnew cast_int(Told oldvar)
void dofobjectkey_max_op(libMesh::Parallel::DofObjectKey *in, libMesh::Parallel::DofObjectKey *inout, int *len, void *)
const processor_id_type _n_procs
Parallel bin sorting object.
An object whose state is distributed along a set of processors.
IdxType sizeof_bin(const IdxType bin) const
Object for performing parallel sorts using MPI.
Sort(const Parallel::Communicator &comm, std::vector< KeyType > &d)
void push_parallel_vector_data(const Communicator &comm, const MapToVectors &data, RequestContainer &reqs, ActionFunctor &act_on_data)
OStreamProxy out(std::cout)
void binsort(const IdxType nbins, KeyType max, KeyType min)