mesh_communication.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2018 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // Local Includes
21 #include "libmesh/boundary_info.h"
23 #include "libmesh/elem.h"
25 #include "libmesh/libmesh_config.h"
26 #include "libmesh/libmesh_common.h"
28 #include "libmesh/mesh_base.h"
31 #include "libmesh/mesh_tools.h"
32 #include "libmesh/parallel.h"
33 #include "libmesh/parallel_elem.h"
34 #include "libmesh/parallel_node.h"
36 #include "libmesh/utility.h"
37 #include "libmesh/remote_elem.h"
38 
39 // C++ Includes
40 #include <numeric>
41 #include <set>
42 #include <unordered_set>
43 #include <unordered_map>
44 
45 
46 
47 //-----------------------------------------------
48 // anonymous namespace for implementation details
49 namespace {
50 
51 using namespace libMesh;
52 
53 struct SyncNeighbors
54 {
55  typedef std::vector<dof_id_type> datum;
56 
57  SyncNeighbors(MeshBase & _mesh) :
58  mesh(_mesh) {}
59 
60  MeshBase & mesh;
61 
62  // Find the neighbor ids for each requested element
63  void gather_data (const std::vector<dof_id_type> & ids,
64  std::vector<datum> & neighbors) const
65  {
66  neighbors.resize(ids.size());
67 
68  for (std::size_t i=0; i != ids.size(); ++i)
69  {
70  // Look for this element in the mesh
71  // We'd better find every element we're asked for
72  const Elem & elem = mesh.elem_ref(ids[i]);
73 
74  // Return the element's neighbors
75  const unsigned int n_neigh = elem.n_neighbors();
76  neighbors[i].resize(n_neigh);
77  for (unsigned int n = 0; n != n_neigh; ++n)
78  {
79  const Elem * neigh = elem.neighbor_ptr(n);
80  if (neigh)
81  {
82  libmesh_assert_not_equal_to(neigh, remote_elem);
83  neighbors[i][n] = neigh->id();
84  }
85  else
86  neighbors[i][n] = DofObject::invalid_id;
87  }
88  }
89  }
90 
91  void act_on_data (const std::vector<dof_id_type> & ids,
92  const std::vector<datum> & neighbors) const
93  {
94  for (std::size_t i=0; i != ids.size(); ++i)
95  {
96  Elem & elem = mesh.elem_ref(ids[i]);
97 
98  const datum & new_neigh = neighbors[i];
99 
100  const unsigned int n_neigh = elem.n_neighbors();
101  libmesh_assert_equal_to (n_neigh, new_neigh.size());
102 
103  for (unsigned int n = 0; n != n_neigh; ++n)
104  {
105  const dof_id_type new_neigh_id = new_neigh[n];
106  const Elem * old_neigh = elem.neighbor_ptr(n);
107  if (old_neigh && old_neigh != remote_elem)
108  {
109  libmesh_assert_equal_to(old_neigh->id(), new_neigh_id);
110  }
111  else if (new_neigh_id == DofObject::invalid_id)
112  {
113  libmesh_assert (!old_neigh);
114  }
115  else
116  {
117  Elem * neigh = mesh.query_elem_ptr(new_neigh_id);
118  if (neigh)
119  elem.set_neighbor(n, neigh);
120  else
121  elem.set_neighbor(n, const_cast<RemoteElem *>(remote_elem));
122  }
123  }
124  }
125  }
126 };
127 
128 
129 }
130 
131 
132 
133 namespace libMesh
134 {
135 
136 
138  processor_id_type pid,
141  std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
142 {
143  for (auto & gf :
146  {
147  GhostingFunctor::map_type elements_to_ghost;
148  libmesh_assert(gf);
149  (*gf)(elem_it, elem_end, pid, elements_to_ghost);
150 
151  // We can ignore the CouplingMatrix in ->second, but we
152  // need to ghost all the elements in ->first.
153  for (auto & pr : elements_to_ghost)
154  {
155  const Elem * elem = pr.first;
156  libmesh_assert(elem != remote_elem);
157  connected_elements.insert(elem);
158  }
159  }
160 
161  // The GhostingFunctors won't be telling us about the elements from
162  // pid; we need to add those ourselves.
163  for (; elem_it != elem_end; ++elem_it)
164  connected_elements.insert(*elem_it);
165 }
166 
167 
171  std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
172 {
173  // None of these parameters are used when !LIBMESH_ENABLE_AMR.
174  libmesh_ignore(mesh, elem_it, elem_end, connected_elements);
175 
176 #ifdef LIBMESH_ENABLE_AMR
177  // Our XdrIO output needs inactive local elements to not have any
178  // remote_elem children. Let's make sure that doesn't happen.
179  //
180  for (const auto & elem : as_range(elem_it, elem_end))
181  {
182  if (elem->has_children())
183  for (auto & child : elem->child_ref_range())
184  if (&child != remote_elem)
185  connected_elements.insert(&child);
186  }
187 #endif // LIBMESH_ENABLE_AMR
188 }
189 
190 
191 void connect_families(std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
192 {
193  // This parameter is not used when !LIBMESH_ENABLE_AMR.
194  libmesh_ignore(connected_elements);
195 
196 #ifdef LIBMESH_ENABLE_AMR
197 
198  // Because our set is sorted by ascending level, we can traverse it
199  // in reverse order, adding parents as we go, and end up with all
200  // ancestors added. This is safe for std::set where insert doesn't
201  // invalidate iterators.
202  //
203  // This only works because we do *not* cache
204  // connected_elements.rend(), whose value can change when we insert
205  // elements which are sorted before the original rend.
206  //
207  // We're also going to get subactive descendents here, when any
208  // exist. We're iterating in the wrong direction to do that
209  // non-recursively, so we'll cop out and rely on total_family_tree.
210  // Iterating backwards does mean that we won't be querying the newly
211  // inserted subactive elements redundantly.
212 
213  std::set<const Elem *, CompareElemIdsByLevel>::reverse_iterator
214  elem_rit = connected_elements.rbegin();
215 
216  for (; elem_rit != connected_elements.rend(); ++elem_rit)
217  {
218  const Elem * elem = *elem_rit;
219  libmesh_assert(elem);
220  const Elem * parent = elem->parent();
221 
222  // We let ghosting functors worry about only active elements,
223  // but the remote processor needs all its semilocal elements'
224  // ancestors and active semilocal elements' descendants too.
225  if (parent)
226  connected_elements.insert (parent);
227 
228  if (elem->active() && elem->has_children())
229  {
230  std::vector<const Elem *> subactive_family;
231  elem->total_family_tree(subactive_family);
232  for (std::size_t i=0; i != subactive_family.size(); ++i)
233  {
234  libmesh_assert(subactive_family[i] != remote_elem);
235  connected_elements.insert(subactive_family[i]);
236  }
237  }
238  }
239 
240 # ifdef DEBUG
241  // Let's be paranoid and make sure that all our ancestors
242  // really did get inserted. I screwed this up the first time
243  // by caching rend, and I can easily imagine screwing it up in
244  // the future by changing containers.
245  for (const auto & elem : connected_elements)
246  {
247  libmesh_assert(elem);
248  const Elem * parent = elem->parent();
249  if (parent)
250  libmesh_assert(connected_elements.count(parent));
251  }
252 # endif // DEBUG
253 
254 #endif // LIBMESH_ENABLE_AMR
255 }
256 
257 
258 void reconnect_nodes (const std::set<const Elem *, CompareElemIdsByLevel> & connected_elements,
259  std::set<const Node *> & connected_nodes)
260 {
261  // We're done using the nodes list for element decisions; now
262  // let's reuse it for nodes of the elements we've decided on.
263  connected_nodes.clear();
264 
265  for (const auto & elem : connected_elements)
266  for (auto & n : elem->node_ref_range())
267  connected_nodes.insert(&n);
268 }
269 
270 
271 
272 
273 // ------------------------------------------------------------
274 // MeshCommunication class members
275 void MeshCommunication::clear ()
276 {
277  // _neighboring_processors.clear();
278 }
279 
280 
281 
282 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
283 // ------------------------------------------------------------
285 {
286  // no MPI == one processor, no redistribution
287  return;
288 }
289 
290 #else
291 // ------------------------------------------------------------
293  bool newly_coarsened_only) const
294 {
295  // This method will be called after a new partitioning has been
296  // assigned to the elements. This partitioning was defined in
297  // terms of the active elements, and "trickled down" to the
298  // parents and nodes as to be consistent.
299  //
300  // The point is that the entire concept of local elements is
301  // kinda shaky in this method. Elements which were previously
302  // local may now be assigned to other processors, so we need to
303  // send those off. Similarly, we need to accept elements from
304  // other processors.
305 
306  // This method is also useful in the more limited case of
307  // post-coarsening redistribution: if elements are only ghosting
308  // neighbors of their active elements, but adaptive coarsening
309  // causes an inactive element to become active, then we may need a
310  // copy of that inactive element's neighbors.
311 
312  // The approach is as follows:
313  // (1) send all relevant elements we have stored to their proper homes
314  // (2) receive elements from all processors, watching for duplicates
315  // (3) deleting all nonlocal elements elements
316  // (4) obtaining required ghost elements from neighboring processors
317  libmesh_parallel_only(mesh.comm());
318  libmesh_assert (!mesh.is_serial());
321 
322  LOG_SCOPE("redistribute()", "MeshCommunication");
323 
324  // Get a few unique message tags to use in communications; we'll
325  // default to some numbers around pi*1000
327  nodestag = mesh.comm().get_unique_tag(3141),
328  elemstag = mesh.comm().get_unique_tag(3142);
329 
330  // Figure out how many nodes and elements we have which are assigned to each
331  // processor. send_n_nodes_and_elem_per_proc contains the number of nodes/elements
332  // we will be sending to each processor, recv_n_nodes_and_elem_per_proc contains
333  // the number of nodes/elements we will be receiving from each processor.
334  // Format:
335  // send_n_nodes_and_elem_per_proc[2*pid+0] = number of nodes to send to pid
336  // send_n_nodes_and_elem_per_proc[2*pid+1] = number of elements to send to pid
337  std::vector<dof_id_type> send_n_nodes_and_elem_per_proc(2*mesh.n_processors(), 0);
338 
339  std::vector<Parallel::Request>
340  node_send_requests, element_send_requests;
341 
342  // We're going to sort elements-to-send by pid in one pass, to avoid
343  // sending predicated iterators through the whole mesh N_p times
344  std::unordered_map<processor_id_type, std::vector<Elem *>> send_to_pid;
345 
346  const MeshBase::const_element_iterator send_elems_begin =
347 #ifdef LIBMESH_ENABLE_AMR
348  newly_coarsened_only ?
349  mesh.flagged_elements_begin(Elem::JUST_COARSENED) :
350 #endif
352 
353  const MeshBase::const_element_iterator send_elems_end =
354 #ifdef LIBMESH_ENABLE_AMR
355  newly_coarsened_only ?
356  mesh.flagged_elements_end(Elem::JUST_COARSENED) :
357 #endif
359 
360  for (auto & elem : as_range(send_elems_begin, send_elems_end))
361  send_to_pid[elem->processor_id()].push_back(elem);
362 
363  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
364  if (pid != mesh.processor_id()) // don't send to ourselves!!
365  {
366  // Build up a list of nodes and elements to send to processor pid.
367  // We will certainly send all the elements assigned to this processor,
368  // but we will also ship off any elements which are required
369  // to be ghosted and any nodes which are used by any of the
370  // above.
371 
372  const auto elements_vec_it = send_to_pid.find(pid);
373 
374  // If we don't have any just-coarsened elements to send to
375  // pid, then there won't be any nodes or any elements pulled
376  // in by ghosting either, and we're done with this pid.
377  if (elements_vec_it == send_to_pid.end())
378  continue;
379 
380  const auto & p_elements = elements_vec_it->second;
381  libmesh_assert(!p_elements.empty());
382 
383  Elem * const * elempp = p_elements.data();
384  Elem * const * elemend = elempp + p_elements.size();
385 
386 #ifndef LIBMESH_ENABLE_AMR
387  // This parameter is not used when !LIBMESH_ENABLE_AMR.
388  libmesh_ignore(newly_coarsened_only);
389  libmesh_assert(!newly_coarsened_only);
390 #endif
391 
394  (elempp, elemend, Predicates::NotNull<Elem * const *>());
395 
396  const MeshBase::const_element_iterator elem_end =
398  (elemend, elemend, Predicates::NotNull<Elem * const *>());
399 
400  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
401 
402  // See which to-be-ghosted elements we need to send
403  query_ghosting_functors (mesh, pid, elem_it, elem_end,
404  elements_to_send);
405 
406  // The inactive elements we need to send should have their
407  // immediate children present.
409  mesh.pid_elements_end(pid),
410  elements_to_send);
411 
412  // The elements we need should have their ancestors and their
413  // subactive children present too.
414  connect_families(elements_to_send);
415 
416  std::set<const Node *> connected_nodes;
417  reconnect_nodes(elements_to_send, connected_nodes);
418 
419  // the number of nodes we will ship to pid
420  send_n_nodes_and_elem_per_proc[2*pid+0] =
421  cast_int<dof_id_type>(connected_nodes.size());
422 
423  // send any nodes off to the destination processor
424  libmesh_assert (!connected_nodes.empty());
425  node_send_requests.push_back(Parallel::request());
426 
427  mesh.comm().send_packed_range (pid, &mesh,
428  connected_nodes.begin(),
429  connected_nodes.end(),
430  node_send_requests.back(),
431  nodestag);
432 
433  // the number of elements we will send to this processor
434  send_n_nodes_and_elem_per_proc[2*pid+1] =
435  cast_int<dof_id_type>(elements_to_send.size());
436 
437  // send the elements off to the destination processor
438  libmesh_assert (!elements_to_send.empty());
439  element_send_requests.push_back(Parallel::request());
440 
441  mesh.comm().send_packed_range (pid, &mesh,
442  elements_to_send.begin(),
443  elements_to_send.end(),
444  element_send_requests.back(),
445  elemstag);
446  }
447 
448  std::vector<dof_id_type> recv_n_nodes_and_elem_per_proc(send_n_nodes_and_elem_per_proc);
449 
450  mesh.comm().alltoall (recv_n_nodes_and_elem_per_proc);
451 
452  // In general we will only need to communicate with a subset of the other processors.
453  // I can't immediately think of a case where we will send elements but not nodes, but
454  // these are only bools and we have the information anyway...
455  std::vector<bool>
456  send_node_pair(mesh.n_processors(),false), send_elem_pair(mesh.n_processors(),false),
457  recv_node_pair(mesh.n_processors(),false), recv_elem_pair(mesh.n_processors(),false);
458 
459  unsigned int
460  n_send_node_pairs=0, n_send_elem_pairs=0,
461  n_recv_node_pairs=0, n_recv_elem_pairs=0;
462 
463  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
464  {
465  if (send_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to send
466  {
467  send_node_pair[pid] = true;
468  n_send_node_pairs++;
469  }
470 
471  if (send_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to send
472  {
473  send_elem_pair[pid] = true;
474  n_send_elem_pairs++;
475  }
476 
477  if (recv_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to receive
478  {
479  recv_node_pair[pid] = true;
480  n_recv_node_pairs++;
481  }
482 
483  if (recv_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to receive
484  {
485  recv_elem_pair[pid] = true;
486  n_recv_elem_pairs++;
487  }
488  }
489  libmesh_assert_equal_to (n_send_node_pairs, node_send_requests.size());
490  libmesh_assert_equal_to (n_send_elem_pairs, element_send_requests.size());
491 
492  // Receive nodes.
493  // We now know how many processors will be sending us nodes.
494  for (unsigned int node_comm_step=0; node_comm_step<n_recv_node_pairs; node_comm_step++)
495  // but we don't necessarily want to impose an ordering, so
496  // just process whatever message is available next.
498  &mesh,
500  (Node**)nullptr,
501  nodestag);
502 
503  // Receive elements.
504  // Similarly we know how many processors are sending us elements,
505  // but we don't really care in what order we receive them.
506  for (unsigned int elem_comm_step=0; elem_comm_step<n_recv_elem_pairs; elem_comm_step++)
508  &mesh,
510  (Elem**)nullptr,
511  elemstag);
512 
513  // Wait for all sends to complete
514  Parallel::wait (node_send_requests);
515  Parallel::wait (element_send_requests);
516 
517  // Check on the redistribution consistency
518 #ifdef DEBUG
520 
522 #endif
523 
524  // If we had a point locator, it's invalid now that there are new
525  // elements it can't locate.
527 
528  // We now have all elements and nodes redistributed; our ghosting
529  // functors should be ready to redistribute and/or recompute any
530  // cached data they use too.
532  gf->redistribute();
533 }
534 #endif // LIBMESH_HAVE_MPI
535 
536 
537 
538 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
539 // ------------------------------------------------------------
540 void MeshCommunication::gather_neighboring_elements (DistributedMesh &) const
541 {
542  // no MPI == one processor, no need for this method...
543  return;
544 }
545 #else
546 // ------------------------------------------------------------
547 void MeshCommunication::gather_neighboring_elements (DistributedMesh & mesh) const
548 {
549  // Don't need to do anything if there is
550  // only one processor.
551  if (mesh.n_processors() == 1)
552  return;
553 
554  // This function must be run on all processors at once
555  libmesh_parallel_only(mesh.comm());
556 
557  LOG_SCOPE("gather_neighboring_elements()", "MeshCommunication");
558 
559  //------------------------------------------------------------------
560  // The purpose of this function is to provide neighbor data structure
561  // consistency for a parallel, distributed mesh. In libMesh we require
562  // that each local element have access to a full set of valid face
563  // neighbors. In some cases this requires us to store "ghost elements" -
564  // elements that belong to other processors but we store to provide
565  // data structure consistency. Also, it is assumed that any element
566  // with a nullptr neighbor resides on a physical domain boundary. So,
567  // even our "ghost elements" must have non-nullptr neighbors. To handle
568  // this the concept of "RemoteElem" is used - a special construct which
569  // is used to denote that an element has a face neighbor, but we do
570  // not actually store detailed information about that neighbor. This
571  // is required to prevent data structure explosion.
572  //
573  // So when this method is called we should have only local elements.
574  // These local elements will then find neighbors among the local
575  // element set. After this is completed, any element with a nullptr
576  // neighbor has either (i) a face on the physical boundary of the mesh,
577  // or (ii) a neighboring element which lives on a remote processor.
578  // To handle case (ii), we communicate the global node indices connected
579  // to all such faces to our neighboring processors. They then send us
580  // all their elements with a nullptr neighbor that are connected to any
581  // of the nodes in our list.
582  //------------------------------------------------------------------
583 
584  // Let's begin with finding consistent neighbor data information
585  // for all the elements we currently have. We'll use a clean
586  // slate here - clear any existing information, including RemoteElem's.
587  mesh.find_neighbors (/* reset_remote_elements = */ true,
588  /* reset_current_list = */ true);
589 
590  // Get a unique message tag to use in communications; we'll default
591  // to some numbers around pi*10000
593  element_neighbors_tag = mesh.comm().get_unique_tag(31416);
594 
595  // Now any element with a nullptr neighbor either
596  // (i) lives on the physical domain boundary, or
597  // (ii) lives on an inter-processor boundary.
598  // We will now gather all the elements from adjacent processors
599  // which are of the same state, which should address all the type (ii)
600  // elements.
601 
602  // A list of all the processors which *may* contain neighboring elements.
603  // (for development simplicity, just make this the identity map)
604  std::vector<processor_id_type> adjacent_processors;
605  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
606  if (pid != mesh.processor_id())
607  adjacent_processors.push_back (pid);
608 
609 
610  const processor_id_type n_adjacent_processors =
611  cast_int<processor_id_type>(adjacent_processors.size());
612 
613  //-------------------------------------------------------------------------
614  // Let's build a list of all nodes which live on nullptr-neighbor sides.
615  // For simplicity, we will use a set to build the list, then transfer
616  // it to a vector for communication.
617  std::vector<dof_id_type> my_interface_node_list;
618  std::vector<const Elem *> my_interface_elements;
619  {
620  std::set<dof_id_type> my_interface_node_set;
621 
622  // Pull objects out of the loop to reduce heap operations
623  std::unique_ptr<const Elem> side;
624 
625  // since parent nodes are a subset of children nodes, this should be sufficient
626  for (const auto & elem : mesh.active_local_element_ptr_range())
627  {
628  libmesh_assert(elem);
629 
630  if (elem->on_boundary()) // denotes *any* side has a nullptr neighbor
631  {
632  my_interface_elements.push_back(elem); // add the element, but only once, even
633  // if there are multiple nullptr neighbors
634  for (auto s : elem->side_index_range())
635  if (elem->neighbor_ptr(s) == nullptr)
636  {
637  elem->build_side_ptr(side, s);
638 
639  for (unsigned int n=0; n<side->n_vertices(); n++)
640  my_interface_node_set.insert (side->node_id(n));
641  }
642  }
643  }
644 
645  my_interface_node_list.reserve (my_interface_node_set.size());
646  my_interface_node_list.insert (my_interface_node_list.end(),
647  my_interface_node_set.begin(),
648  my_interface_node_set.end());
649  }
650 
651  // we will now send my_interface_node_list to all of the adjacent processors.
652  // note that for the time being we will copy the list to a unique buffer for
653  // each processor so that we can use a nonblocking send and not access the
654  // buffer again until the send completes. it is my understanding that the
655  // MPI 2.1 standard seeks to remove this restriction as unnecessary, so in
656  // the future we should change this to send the same buffer to each of the
657  // adjacent processors. - BSK 11/17/2008
658  std::vector<std::vector<dof_id_type>>
659  my_interface_node_xfer_buffers (n_adjacent_processors, my_interface_node_list);
660  std::map<processor_id_type, unsigned char> n_comm_steps;
661 
662  std::vector<Parallel::Request> send_requests (3*n_adjacent_processors);
663  unsigned int current_request = 0;
664 
665  for (unsigned int comm_step=0; comm_step<n_adjacent_processors; comm_step++)
666  {
667  n_comm_steps[adjacent_processors[comm_step]]=1;
668  mesh.comm().send (adjacent_processors[comm_step],
669  my_interface_node_xfer_buffers[comm_step],
670  send_requests[current_request++],
671  element_neighbors_tag);
672  }
673 
674  //-------------------------------------------------------------------------
675  // processor pairings are symmetric - I expect to receive an interface node
676  // list from each processor in adjacent_processors as well!
677  // now we will catch an incoming node list for each of our adjacent processors.
678  //
679  // we are done with the adjacent_processors list - note that it is in general
680  // a superset of the processors we truly share elements with. so let's
681  // clear the superset list, and we will fill it with the true list.
682  adjacent_processors.clear();
683 
684  std::vector<dof_id_type> common_interface_node_list;
685 
686  // we expect two classes of messages -
687  // (1) incoming interface node lists, to which we will reply with our elements
688  // touching nodes in the list, and
689  // (2) replies from the requests we sent off previously.
690  // (2.a) - nodes
691  // (2.b) - elements
692  // so we expect 3 communications from each adjacent processor.
693  // by structuring the communication in this way we hopefully impose no
694  // order on the handling of the arriving messages. in particular, we
695  // should be able to handle the case where we receive a request and
696  // all replies from processor A before even receiving a request from
697  // processor B.
698 
699  for (unsigned int comm_step=0; comm_step<3*n_adjacent_processors; comm_step++)
700  {
701  //------------------------------------------------------------------
702  // catch incoming node list
705  element_neighbors_tag));
706  const processor_id_type
707  source_pid_idx = cast_int<processor_id_type>(status.source()),
708  dest_pid_idx = source_pid_idx;
709 
710  //------------------------------------------------------------------
711  // first time - incoming request
712  if (n_comm_steps[source_pid_idx] == 1)
713  {
714  n_comm_steps[source_pid_idx]++;
715 
716  mesh.comm().receive (source_pid_idx,
717  common_interface_node_list,
718  element_neighbors_tag);
719  const std::size_t
720  their_interface_node_list_size = common_interface_node_list.size();
721 
722  // we now have the interface node list from processor source_pid_idx.
723  // now we can find all of our elements which touch any of these nodes
724  // and send copies back to this processor. however, we can make our
725  // search more efficient by first excluding all the nodes in
726  // their list which are not also contained in
727  // my_interface_node_list. we can do this in place as a set
728  // intersection.
729  common_interface_node_list.erase
730  (std::set_intersection (my_interface_node_list.begin(),
731  my_interface_node_list.end(),
732  common_interface_node_list.begin(),
733  common_interface_node_list.end(),
734  common_interface_node_list.begin()),
735  common_interface_node_list.end());
736 
737  if (false)
738  libMesh::out << "[" << mesh.processor_id() << "] "
739  << "my_interface_node_list.size()=" << my_interface_node_list.size()
740  << ", [" << source_pid_idx << "] "
741  << "their_interface_node_list.size()=" << their_interface_node_list_size
742  << ", common_interface_node_list.size()=" << common_interface_node_list.size()
743  << std::endl;
744 
745  // Now we need to see which of our elements touch the nodes in the list.
746  // We will certainly send all the active elements which intersect source_pid_idx,
747  // but we will also ship off the other elements in the same family tree
748  // as the active ones for data structure consistency.
749  //
750  // FIXME - shipping full family trees is unnecessary and inefficient.
751  //
752  // We also ship any nodes connected to these elements. Note
753  // some of these nodes and elements may be replicated from
754  // other processors, but that is OK.
755  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
756  std::set<const Node *> connected_nodes;
757 
758  // Check for quick return?
759  if (common_interface_node_list.empty())
760  {
761  // let's try to be smart here - if we have no nodes in common,
762  // we cannot share elements. so post the messages expected
763  // from us here and go on about our business.
764  // note that even though these are nonblocking sends
765  // they should complete essentially instantly, because
766  // in all cases the send buffers are empty
767  mesh.comm().send_packed_range (dest_pid_idx,
768  &mesh,
769  connected_nodes.begin(),
770  connected_nodes.end(),
771  send_requests[current_request++],
772  element_neighbors_tag);
773 
774  mesh.comm().send_packed_range (dest_pid_idx,
775  &mesh,
776  elements_to_send.begin(),
777  elements_to_send.end(),
778  send_requests[current_request++],
779  element_neighbors_tag);
780 
781  continue;
782  }
783  // otherwise, this really *is* an adjacent processor.
784  adjacent_processors.push_back(source_pid_idx);
785 
786  std::vector<const Elem *> family_tree;
787 
788  for (std::size_t e=0, n_shared_nodes=0; e<my_interface_elements.size(); e++, n_shared_nodes=0)
789  {
790  const Elem * elem = my_interface_elements[e];
791 
792  for (unsigned int n=0; n<elem->n_vertices(); n++)
793  if (std::binary_search (common_interface_node_list.begin(),
794  common_interface_node_list.end(),
795  elem->node_id(n)))
796  {
797  n_shared_nodes++;
798 
799  // TBD - how many nodes do we need to share
800  // before we care? certainly 2, but 1? not
801  // sure, so let's play it safe...
802  if (n_shared_nodes > 0) break;
803  }
804 
805  if (n_shared_nodes) // share at least one node?
806  {
807  elem = elem->top_parent();
808 
809  // avoid a lot of duplicated effort -- if we already have elem
810  // in the set its entire family tree is already in the set.
811  if (!elements_to_send.count(elem))
812  {
813 #ifdef LIBMESH_ENABLE_AMR
814  elem->family_tree(family_tree);
815 #else
816  family_tree.clear();
817  family_tree.push_back(elem);
818 #endif
819  for (std::size_t leaf=0; leaf<family_tree.size(); leaf++)
820  {
821  elem = family_tree[leaf];
822  elements_to_send.insert (elem);
823 
824  for (auto & n : elem->node_ref_range())
825  connected_nodes.insert (&n);
826  }
827  }
828  }
829  }
830 
831  // The elements_to_send and connected_nodes sets now contain all
832  // the elements and nodes we need to send to this processor.
833  // All that remains is to pack up the objects (along with
834  // any boundary conditions) and send the messages off.
835  {
836  libmesh_assert (connected_nodes.empty() || !elements_to_send.empty());
837  libmesh_assert (!connected_nodes.empty() || elements_to_send.empty());
838 
839  // send the nodes off to the destination processor
840  mesh.comm().send_packed_range (dest_pid_idx,
841  &mesh,
842  connected_nodes.begin(),
843  connected_nodes.end(),
844  send_requests[current_request++],
845  element_neighbors_tag);
846 
847  // send the elements off to the destination processor
848  mesh.comm().send_packed_range (dest_pid_idx,
849  &mesh,
850  elements_to_send.begin(),
851  elements_to_send.end(),
852  send_requests[current_request++],
853  element_neighbors_tag);
854  }
855  }
856  //------------------------------------------------------------------
857  // second time - reply of nodes
858  else if (n_comm_steps[source_pid_idx] == 2)
859  {
860  n_comm_steps[source_pid_idx]++;
861 
862  mesh.comm().receive_packed_range (source_pid_idx,
863  &mesh,
865  (Node**)nullptr,
866  element_neighbors_tag);
867  }
868  //------------------------------------------------------------------
869  // third time - reply of elements
870  else if (n_comm_steps[source_pid_idx] == 3)
871  {
872  n_comm_steps[source_pid_idx]++;
873 
874  mesh.comm().receive_packed_range (source_pid_idx,
875  &mesh,
877  (Elem**)nullptr,
878  element_neighbors_tag);
879  }
880  //------------------------------------------------------------------
881  // fourth time - shouldn't happen
882  else
883  {
884  libMesh::err << "ERROR: unexpected number of replies: "
885  << n_comm_steps[source_pid_idx]
886  << std::endl;
887  }
888  } // done catching & processing replies associated with tag ~ 100,000pi
889 
890  // allow any pending requests to complete
891  Parallel::wait (send_requests);
892 
893  // If we had a point locator, it's invalid now that there are new
894  // elements it can't locate.
896 
897  // We can now find neighbor information for the interfaces between
898  // local elements and ghost elements.
899  mesh.find_neighbors (/* reset_remote_elements = */ true,
900  /* reset_current_list = */ false);
901 
902  // Ghost elements may not have correct remote_elem neighbor links,
903  // and we may not be able to locally infer correct neighbor links to
904  // remote elements. So we synchronize ghost element neighbor links.
905  SyncNeighbors nsync(mesh);
906 
908  (mesh.comm(), mesh.elements_begin(), mesh.elements_end(), nsync);
909 }
910 #endif // LIBMESH_HAVE_MPI
911 
912 
913 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
914 // ------------------------------------------------------------
915 void MeshCommunication::send_coarse_ghosts(MeshBase &) const
916 {
917  // no MPI == one processor, no need for this method...
918  return;
919 }
920 #else
921 void MeshCommunication::send_coarse_ghosts(MeshBase & mesh) const
922 {
923 
924  // Don't need to do anything if all processors already ghost all non-local
925  // elements.
926  if (mesh.is_serial())
927  return;
928 
929  // This algorithm uses the MeshBase::flagged_pid_elements_begin/end iterators
930  // which are only available when AMR is enabled.
931 #ifndef LIBMESH_ENABLE_AMR
932  libmesh_error_msg("Calling MeshCommunication::send_coarse_ghosts() requires AMR to be enabled. "
933  "Please configure libmesh with --enable-amr.");
934 #else
935  // When we coarsen elements on a DistributedMesh, we make their
936  // parents active. This may increase the ghosting requirements on
937  // the processor which owns the newly-activated parent element. To
938  // ensure ghosting requirements are satisfied, processors which
939  // coarsen an element will send all the associated ghosted elements
940  // to all processors which own any of the coarsened-away-element's
941  // siblings.
942  typedef std::unordered_map<processor_id_type, std::vector<Elem *>> ghost_map;
943  ghost_map coarsening_elements_to_ghost;
944 
945  const processor_id_type proc_id = mesh.processor_id();
946  // Look for just-coarsened elements
947  for (auto elem : as_range(mesh.flagged_pid_elements_begin(Elem::COARSEN, proc_id),
948  mesh.flagged_pid_elements_end(Elem::COARSEN, proc_id)))
949  {
950  // If it's flagged for coarsening it had better have a parent
951  libmesh_assert(elem->parent());
952 
953  // On a distributed mesh:
954  // If we don't own this element's parent but we do own it, then
955  // there is a chance that we are aware of ghost elements which
956  // the parent's owner needs us to send them.
957  const processor_id_type their_proc_id = elem->parent()->processor_id();
958  if (their_proc_id != proc_id)
959  coarsening_elements_to_ghost[their_proc_id].push_back(elem);
960  }
961 
962  const processor_id_type n_proc = mesh.n_processors();
963 
964  // Get a few unique message tags to use in communications; we'll
965  // default to some numbers around pi*1000
967  nodestag = mesh.comm().get_unique_tag(3141),
968  elemstag = mesh.comm().get_unique_tag(3142);
969 
970  std::vector<Parallel::Request> send_requests;
971 
972  // Using unsigned char instead of bool since it'll get converted for
973  // MPI use later anyway
974  std::vector<unsigned char> send_to_proc(n_proc, 0);
975 
976  for (processor_id_type p=0; p != n_proc; ++p)
977  {
978  if (p == proc_id)
979  break;
980 
981  // We'll send these asynchronously, but their data will be
982  // packed into Parallel:: buffers so it will be okay when the
983  // original containers are destructed before the sends complete.
984  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
985  std::set<const Node *> nodes_to_send;
986 
987  const ghost_map::const_iterator it =
988  coarsening_elements_to_ghost.find(p);
989  if (it != coarsening_elements_to_ghost.end())
990  {
991  const std::vector<Elem *> & elems = it->second;
992  libmesh_assert(elems.size());
993 
994  // Make some fake element iterators defining this vector of
995  // elements
996  Elem * const * elempp = const_cast<Elem * const *>(elems.data());
997  Elem * const * elemend = elempp+elems.size();
998  const MeshBase::const_element_iterator elem_it =
1000  const MeshBase::const_element_iterator elem_end =
1002 
1003  for (auto & gf : as_range(mesh.ghosting_functors_begin(),
1005  {
1006  GhostingFunctor::map_type elements_to_ghost;
1007  libmesh_assert(gf);
1008  (*gf)(elem_it, elem_end, p, elements_to_ghost);
1009 
1010  // We can ignore the CouplingMatrix in ->second, but we
1011  // need to ghost all the elements in ->first.
1012  for (auto & pr : elements_to_ghost)
1013  {
1014  const Elem * elem = pr.first;
1015  libmesh_assert(elem);
1016  while (elem)
1017  {
1018  libmesh_assert(elem != remote_elem);
1019  elements_to_send.insert(elem);
1020  for (auto & n : elem->node_ref_range())
1021  nodes_to_send.insert(&n);
1022  elem = elem->parent();
1023  }
1024  }
1025  }
1026 
1027  send_requests.push_back(Parallel::request());
1028 
1030  nodes_to_send.begin(),
1031  nodes_to_send.end(),
1032  send_requests.back(),
1033  nodestag);
1034 
1035  send_requests.push_back(Parallel::request());
1036 
1037  send_to_proc[p] = 1; // true
1038 
1040  elements_to_send.begin(),
1041  elements_to_send.end(),
1042  send_requests.back(),
1043  elemstag);
1044  }
1045  }
1046 
1047  // Find out how many other processors are sending us elements+nodes.
1048  std::vector<unsigned char> recv_from_proc(send_to_proc);
1049  mesh.comm().alltoall(recv_from_proc);
1050 
1051  const processor_id_type n_receives = cast_int<processor_id_type>
1052  (std::count(recv_from_proc.begin(), recv_from_proc.end(), 1));
1053 
1054  // Receive nodes first since elements will need to attach to them
1055  for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1056  {
1059  &mesh,
1061  (Node**)nullptr,
1062  nodestag);
1063  }
1064 
1065  for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1066  {
1069  &mesh,
1071  (Elem**)nullptr,
1072  elemstag);
1073  }
1074 
1075  // Wait for all sends to complete
1076  Parallel::wait (send_requests);
1077 #endif // LIBMESH_ENABLE_AMR
1078 }
1079 
1080 #endif // LIBMESH_HAVE_MPI
1081 
1082 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1083 // ------------------------------------------------------------
1084 void MeshCommunication::broadcast (MeshBase &) const
1085 {
1086  // no MPI == one processor, no need for this method...
1087  return;
1088 }
1089 #else
1090 // ------------------------------------------------------------
1091 void MeshCommunication::broadcast (MeshBase & mesh) const
1092 {
1093  // Don't need to do anything if there is
1094  // only one processor.
1095  if (mesh.n_processors() == 1)
1096  return;
1097 
1098  // This function must be run on all processors at once
1099  libmesh_parallel_only(mesh.comm());
1100 
1101  LOG_SCOPE("broadcast()", "MeshCommunication");
1102 
1103  // Explicitly clear the mesh on all but processor 0.
1104  if (mesh.processor_id() != 0)
1105  mesh.clear();
1106 
1107  // Broadcast nodes
1109  mesh.nodes_begin(),
1110  mesh.nodes_end(),
1111  &mesh,
1113 
1114  // Broadcast elements from coarsest to finest, so that child
1115  // elements will see their parents already in place.
1116  //
1117  // When restarting from a checkpoint, we may have elements which are
1118  // assigned to a processor but which have not yet been sent to that
1119  // processor, so we need to use a paranoid n_levels() count and not
1120  // the usual fast algorithm.
1121  const unsigned int n_levels = MeshTools::paranoid_n_levels(mesh);
1122 
1123  for (unsigned int l=0; l != n_levels; ++l)
1127  &mesh,
1129 
1130  // Make sure mesh_dimension and elem_dimensions are consistent.
1132 
1133  // Broadcast all of the named entity information
1137 
1138  // If we had a point locator, it's invalid now that there are new
1139  // elements it can't locate.
1141 
1142  libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1143  libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1144 
1145 #ifdef DEBUG
1146  MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
1147  MeshTools::libmesh_assert_valid_procids<Node>(mesh);
1148 #endif
1149 }
1150 #endif // LIBMESH_HAVE_MPI
1151 
1152 
1153 
1154 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1155 // ------------------------------------------------------------
1156 void MeshCommunication::gather (const processor_id_type, DistributedMesh &) const
1157 {
1158  // no MPI == one processor, no need for this method...
1159  return;
1160 }
1161 #else
1162 // ------------------------------------------------------------
1163 void MeshCommunication::gather (const processor_id_type root_id, DistributedMesh & mesh) const
1164 {
1165  // Check for quick return
1166  if (mesh.n_processors() == 1)
1167  return;
1168 
1169  // This function must be run on all processors at once
1170  libmesh_parallel_only(mesh.comm());
1171 
1172  LOG_SCOPE("(all)gather()", "MeshCommunication");
1173 
1174  (root_id == DofObject::invalid_processor_id) ?
1175 
1177  mesh.nodes_begin(),
1178  mesh.nodes_end(),
1180 
1181  mesh.comm().gather_packed_range (root_id,
1182  &mesh,
1183  mesh.nodes_begin(),
1184  mesh.nodes_end(),
1186 
1187  // Gather elements from coarsest to finest, so that child
1188  // elements will see their parents already in place.
1189  const unsigned int n_levels = MeshTools::n_levels(mesh);
1190 
1191  for (unsigned int l=0; l != n_levels; ++l)
1192  (root_id == DofObject::invalid_processor_id) ?
1193 
1198 
1199  mesh.comm().gather_packed_range (root_id,
1200  &mesh,
1204 
1205  // If we had a point locator, it's invalid now that there are new
1206  // elements it can't locate.
1208 
1209  // If we are doing an allgather(), perform sanity check on the result.
1210  if (root_id == DofObject::invalid_processor_id)
1211  {
1212  libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1213  libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1214  }
1215 
1216  // Inform new elements of their neighbors,
1217  // while resetting all remote_elem links on
1218  // the ranks which did the gather.
1219  mesh.find_neighbors(root_id == DofObject::invalid_processor_id ||
1220  root_id == mesh.processor_id());
1221 
1222  // All done, but let's make sure it's done correctly
1223 
1224 #ifdef DEBUG
1226 #endif
1227 }
1228 #endif // LIBMESH_HAVE_MPI
1229 
1230 
1231 
1232 // Functor for make_elems_parallel_consistent and
1233 // make_node_ids_parallel_consistent
1234 namespace {
1235 
1236 struct SyncIds
1237 {
1238  typedef dof_id_type datum;
1239  typedef void (MeshBase::*renumber_obj)(dof_id_type, dof_id_type);
1240 
1241  SyncIds(MeshBase & _mesh, renumber_obj _renumberer) :
1242  mesh(_mesh),
1243  renumber(_renumberer) {}
1244 
1246  renumber_obj renumber;
1247  // renumber_obj & renumber;
1248 
1249  // Find the id of each requested DofObject -
1250  // Parallel::sync_* already did the work for us
1251  void gather_data (const std::vector<dof_id_type> & ids,
1252  std::vector<datum> & ids_out) const
1253  {
1254  ids_out = ids;
1255  }
1256 
1257  void act_on_data (const std::vector<dof_id_type> & old_ids,
1258  const std::vector<datum> & new_ids) const
1259  {
1260  for (std::size_t i=0; i != old_ids.size(); ++i)
1261  if (old_ids[i] != new_ids[i])
1262  (mesh.*renumber)(old_ids[i], new_ids[i]);
1263  }
1264 };
1265 
1266 
1267 struct SyncNodeIds
1268 {
1269  typedef dof_id_type datum;
1270 
1271  SyncNodeIds(MeshBase & _mesh) :
1272  mesh(_mesh) {}
1273 
1274  MeshBase & mesh;
1275 
1276  // We only know a Node id() is definitive if we own the Node or if
1277  // we're told it's definitive. We keep track of the latter cases by
1278  // putting definitively id'd ghost nodes into this set.
1279  typedef std::unordered_set<const Node *> uset_type;
1280  uset_type definitive_ids;
1281 
1282  // We should never be told two different definitive ids for the same
1283  // node, but let's check on that in debug mode.
1284 #ifdef DEBUG
1285  typedef std::unordered_map<dof_id_type, dof_id_type> umap_type;
1287 #endif
1288 
1289  // Find the id of each requested DofObject -
1290  // Parallel::sync_* already tried to do the work for us, but we can
1291  // only say the result is definitive if we own the DofObject or if
1292  // we were given the definitive result from another processor.
1293  void gather_data (const std::vector<dof_id_type> & ids,
1294  std::vector<datum> & ids_out) const
1295  {
1296  ids_out.clear();
1297  ids_out.resize(ids.size(), DofObject::invalid_id);
1298 
1299  for (std::size_t i = 0; i != ids.size(); ++i)
1300  {
1301  const dof_id_type id = ids[i];
1302  const Node * node = mesh.query_node_ptr(id);
1303  if (node && (node->processor_id() == mesh.processor_id() ||
1304  definitive_ids.count(node)))
1305  ids_out[i] = id;
1306  }
1307  }
1308 
1309  bool act_on_data (const std::vector<dof_id_type> & old_ids,
1310  const std::vector<datum> & new_ids)
1311  {
1312  bool data_changed = false;
1313  for (std::size_t i=0; i != old_ids.size(); ++i)
1314  {
1315  const dof_id_type new_id = new_ids[i];
1316 
1317  const dof_id_type old_id = old_ids[i];
1318 
1319  Node * node = mesh.query_node_ptr(old_id);
1320 
1321  // If we can't find the node we were asking about, another
1322  // processor must have already given us the definitive id
1323  // for it
1324  if (!node)
1325  {
1326  // But let's check anyway in debug mode
1327 #ifdef DEBUG
1328  libmesh_assert
1329  (definitive_renumbering.count(old_id));
1330  libmesh_assert_equal_to
1331  (new_id, definitive_renumbering[old_id]);
1332 #endif
1333  continue;
1334  }
1335 
1336  // If we asked for an id but there's no definitive id ready
1337  // for us yet, then we can't quit trying to sync yet.
1338  if (new_id == DofObject::invalid_id)
1339  {
1340  // But we might have gotten a definitive id from a
1341  // different request
1342  if (!definitive_ids.count(mesh.node_ptr(old_id)))
1343  data_changed = true;
1344  }
1345  else
1346  {
1347  if (node->processor_id() != mesh.processor_id())
1348  definitive_ids.insert(node);
1349  if (old_id != new_id)
1350  {
1351 #ifdef DEBUG
1352  libmesh_assert
1353  (!definitive_renumbering.count(old_id));
1354  definitive_renumbering[old_id] = new_id;
1355 #endif
1356  mesh.renumber_node(old_id, new_id);
1357  data_changed = true;
1358  }
1359  }
1360  }
1361  return data_changed;
1362  }
1363 };
1364 
1365 
1366 #ifdef LIBMESH_ENABLE_AMR
1367 struct SyncPLevels
1368 {
1369  typedef unsigned char datum;
1370 
1371  SyncPLevels(MeshBase & _mesh) :
1372  mesh(_mesh) {}
1373 
1374  MeshBase & mesh;
1375 
1376  // Find the p_level of each requested Elem
1377  void gather_data (const std::vector<dof_id_type> & ids,
1378  std::vector<datum> & ids_out) const
1379  {
1380  ids_out.reserve(ids.size());
1381 
1382  for (std::size_t i=0; i != ids.size(); ++i)
1383  {
1384  Elem & elem = mesh.elem_ref(ids[i]);
1385 
1386  ids_out.push_back(cast_int<unsigned char>(elem.p_level()));
1387  }
1388  }
1389 
1390  void act_on_data (const std::vector<dof_id_type> & old_ids,
1391  const std::vector<datum> & new_p_levels) const
1392  {
1393  for (std::size_t i=0; i != old_ids.size(); ++i)
1394  {
1395  Elem & elem = mesh.elem_ref(old_ids[i]);
1396 
1397  elem.set_p_level(new_p_levels[i]);
1398  }
1399  }
1400 };
1401 #endif // LIBMESH_ENABLE_AMR
1402 
1403 
1404 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1405 template <typename DofObjSubclass>
1406 struct SyncUniqueIds
1407 {
1408  typedef unique_id_type datum;
1409  typedef DofObjSubclass* (MeshBase::*query_obj)(const dof_id_type);
1410 
1411  SyncUniqueIds(MeshBase &_mesh, query_obj _querier) :
1412  mesh(_mesh),
1413  query(_querier) {}
1414 
1415  MeshBase &mesh;
1416  query_obj query;
1417 
1418  // Find the id of each requested DofObject -
1419  // Parallel::sync_* already did the work for us
1420  void gather_data (const std::vector<dof_id_type>& ids,
1421  std::vector<datum>& ids_out) const
1422  {
1423  ids_out.reserve(ids.size());
1424 
1425  for (std::size_t i=0; i != ids.size(); ++i)
1426  {
1427  DofObjSubclass *d = (mesh.*query)(ids[i]);
1428  libmesh_assert(d);
1429 
1430  ids_out.push_back(d->unique_id());
1431  }
1432  }
1433 
1434  void act_on_data (const std::vector<dof_id_type>& ids,
1435  const std::vector<datum>& unique_ids) const
1436  {
1437  for (std::size_t i=0; i != ids.size(); ++i)
1438  {
1439  DofObjSubclass *d = (mesh.*query)(ids[i]);
1440  libmesh_assert(d);
1441 
1442  d->set_unique_id() = unique_ids[i];
1443  }
1444  }
1445 };
1446 #endif // LIBMESH_ENABLE_UNIQUE_ID
1447 }
1448 
1449 
1450 
1451 // ------------------------------------------------------------
1452 void MeshCommunication::make_node_ids_parallel_consistent (MeshBase & mesh)
1453 {
1454  // This function must be run on all processors at once
1455  libmesh_parallel_only(mesh.comm());
1456 
1457  // We need to agree on which processor owns every node, but we can't
1458  // easily assert that here because we don't currently agree on which
1459  // id every node has, and some of our temporary ids on unrelated
1460  // nodes will "overlap".
1461 //#ifdef DEBUG
1462 // MeshTools::libmesh_assert_parallel_consistent_procids<Node> (mesh);
1463 //#endif // DEBUG
1464 
1465  LOG_SCOPE ("make_node_ids_parallel_consistent()", "MeshCommunication");
1466 
1467  SyncNodeIds syncids(mesh);
1471 
1472  // At this point, with both ids and processor ids synced, we can
1473  // finally check for topological consistency of node processor ids.
1474 #ifdef DEBUG
1476 #endif
1477 }
1478 
1479 
1480 
1481 void MeshCommunication::make_node_unique_ids_parallel_consistent (MeshBase & mesh)
1482 {
1483  // Avoid unused variable warnings if unique ids aren't enabled.
1485 
1486  // This function must be run on all processors at once
1487  libmesh_parallel_only(mesh.comm());
1488 
1489 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1490  LOG_SCOPE ("make_node_unique_ids_parallel_consistent()", "MeshCommunication");
1491 
1492  SyncUniqueIds<Node> syncuniqueids(mesh, &MeshBase::query_node_ptr);
1494  mesh.nodes_begin(),
1495  mesh.nodes_end(),
1496  syncuniqueids);
1497 
1498 #endif
1499 }
1500 
1501 
1502 
1503 
1504 // ------------------------------------------------------------
1505 void MeshCommunication::make_elems_parallel_consistent(MeshBase & mesh)
1506 {
1507  // This function must be run on all processors at once
1508  libmesh_parallel_only(mesh.comm());
1509 
1510  LOG_SCOPE ("make_elems_parallel_consistent()", "MeshCommunication");
1511 
1512  SyncIds syncids(mesh, &MeshBase::renumber_elem);
1515  mesh.active_elements_end(), syncids);
1516 
1517 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1518  SyncUniqueIds<Elem> syncuniqueids(mesh, &MeshBase::query_elem_ptr);
1521  mesh.active_elements_end(), syncuniqueids);
1522 #endif
1523 }
1524 
1525 
1526 
1527 // ------------------------------------------------------------
1528 #ifdef LIBMESH_ENABLE_AMR
1529 void MeshCommunication::make_p_levels_parallel_consistent(MeshBase & mesh)
1530 {
1531  // This function must be run on all processors at once
1532  libmesh_parallel_only(mesh.comm());
1533 
1534  LOG_SCOPE ("make_p_levels_parallel_consistent()", "MeshCommunication");
1535 
1536  SyncPLevels syncplevels(mesh);
1539  syncplevels);
1540 }
1541 #endif // LIBMESH_ENABLE_AMR
1542 
1543 
1544 
1545 // Functors for make_node_proc_ids_parallel_consistent
1546 namespace {
1547 
1548 struct SyncProcIds
1549 {
1550  typedef processor_id_type datum;
1551 
1552  SyncProcIds(MeshBase & _mesh) : mesh(_mesh) {}
1553 
1554  MeshBase & mesh;
1555 
1556  // ------------------------------------------------------------
1557  void gather_data (const std::vector<dof_id_type> & ids,
1558  std::vector<datum> & data)
1559  {
1560  // Find the processor id of each requested node
1561  data.resize(ids.size());
1562 
1563  for (std::size_t i=0; i != ids.size(); ++i)
1564  {
1565  // Look for this point in the mesh
1566  if (ids[i] != DofObject::invalid_id)
1567  {
1568  Node & node = mesh.node_ref(ids[i]);
1569 
1570  // Return the node's correct processor id,
1571  data[i] = node.processor_id();
1572  }
1573  else
1574  data[i] = DofObject::invalid_processor_id;
1575  }
1576  }
1577 
1578  // ------------------------------------------------------------
1579  bool act_on_data (const std::vector<dof_id_type> & ids,
1580  const std::vector<datum> proc_ids)
1581  {
1582  bool data_changed = false;
1583 
1584  // Set the ghost node processor ids we've now been informed of
1585  for (std::size_t i=0; i != ids.size(); ++i)
1586  {
1587  Node & node = mesh.node_ref(ids[i]);
1588 
1589  // We may not have ids synched when this synchronization is done, so we
1590  // *can't* use id to load-balance processor id properly; we have to use
1591  // the old heuristic of choosing the smallest valid processor id.
1592  //
1593  // If someone tells us our node processor id is too low, then
1594  // they're wrong. If they tell us our node processor id is
1595  // too high, then we're wrong.
1596  if (node.processor_id() > proc_ids[i])
1597  {
1598  data_changed = true;
1599  node.processor_id() = proc_ids[i];
1600  }
1601  }
1602 
1603  return data_changed;
1604  }
1605 };
1606 
1607 
1608 struct ElemNodesMaybeNew
1609 {
1610  ElemNodesMaybeNew() {}
1611 
1612  bool operator() (const Elem * elem) const
1613  {
1614  // If this element was just refined then it may have new nodes we
1615  // need to work on
1616 #ifdef LIBMESH_ENABLE_AMR
1617  if (elem->refinement_flag() == Elem::JUST_REFINED)
1618  return true;
1619 #endif
1620 
1621  // If this element has remote_elem neighbors then there may have
1622  // been refinement of those neighbors that affect its nodes'
1623  // processor_id()
1624  for (auto neigh : elem->neighbor_ptr_range())
1625  if (neigh == remote_elem)
1626  return true;
1627  return false;
1628  }
1629 };
1630 
1631 
1632 struct NodeWasNew
1633 {
1634  NodeWasNew(const MeshBase & mesh)
1635  {
1636  for (const auto & node : mesh.node_ptr_range())
1637  if (node->processor_id() == DofObject::invalid_processor_id)
1638  was_new.insert(node);
1639  }
1640 
1641  bool operator() (const Elem * elem, unsigned int local_node_num) const
1642  {
1643  if (was_new.count(elem->node_ptr(local_node_num)))
1644  return true;
1645  return false;
1646  }
1647 
1648  std::unordered_set<const Node *> was_new;
1649 };
1650 
1651 }
1652 
1653 
1654 
1655 // ------------------------------------------------------------
1656 void MeshCommunication::make_node_proc_ids_parallel_consistent(MeshBase & mesh)
1657 {
1658  LOG_SCOPE ("make_node_proc_ids_parallel_consistent()", "MeshCommunication");
1659 
1660  // This function must be run on all processors at once
1661  libmesh_parallel_only(mesh.comm());
1662 
1663  // When this function is called, each section of a parallelized mesh
1664  // should be in the following state:
1665  //
1666  // All nodes should have the exact same physical location on every
1667  // processor where they exist.
1668  //
1669  // Local nodes should have unique authoritative ids,
1670  // and processor ids consistent with all processors which own
1671  // an element touching them.
1672  //
1673  // Ghost nodes touching local elements should have processor ids
1674  // consistent with all processors which own an element touching
1675  // them.
1676  SyncProcIds sync(mesh);
1680 }
1681 
1682 
1683 
1684 // ------------------------------------------------------------
1685 void MeshCommunication::make_new_node_proc_ids_parallel_consistent(MeshBase & mesh)
1686 {
1687  LOG_SCOPE ("make_new_node_proc_ids_parallel_consistent()", "MeshCommunication");
1688 
1689  // This function must be run on all processors at once
1690  libmesh_parallel_only(mesh.comm());
1691 
1692  // When this function is called, each section of a parallelized mesh
1693  // should be in the following state:
1694  //
1695  // Local nodes should have unique authoritative ids,
1696  // and new nodes should be unpartitioned.
1697  //
1698  // New ghost nodes touching local elements should be unpartitioned.
1699 
1700  // We may not have consistent processor ids for new nodes (because a
1701  // node may be old and partitioned on one processor but new and
1702  // unpartitioned on another) when we start
1703 #ifdef DEBUG
1705  // MeshTools::libmesh_assert_parallel_consistent_new_node_procids(mesh);
1706 #endif
1707 
1708  // We have two kinds of new nodes. *NEW* nodes are unpartitioned on
1709  // all processors: we need to use a id-independent (i.e. dumb)
1710  // heuristic to partition them. But "new" nodes are newly created
1711  // on some processors (when ghost elements are refined) yet
1712  // correspond to existing nodes on other processors: we need to use
1713  // the existing processor id for them.
1714  //
1715  // A node which is "new" on one processor will be associated with at
1716  // least one ghost element, and we can just query that ghost
1717  // element's owner to find out the correct processor id.
1718 
1719  auto node_unpartitioned =
1720  [](const Elem * elem, unsigned int local_node_num)
1721  { return elem->node_ref(local_node_num).processor_id() ==
1722  DofObject::invalid_processor_id; };
1723 
1724  SyncProcIds sync(mesh);
1725 
1729  node_unpartitioned, sync);
1730 
1731  // Nodes should now be unpartitioned iff they are truly new; those
1732  // are the *only* nodes we will touch.
1733 #ifdef DEBUG
1735 #endif
1736 
1737  NodeWasNew node_was_new(mesh);
1738 
1739  // Set the lowest processor id we can on truly new nodes
1740  for (auto & elem : mesh.element_ptr_range())
1741  for (auto & node : elem->node_ref_range())
1742  if (node_was_new.was_new.count(&node))
1743  {
1744  processor_id_type & pid = node.processor_id();
1745  pid = std::min(pid, elem->processor_id());
1746  }
1747 
1748  // Then finally see if other processors have a lower option
1751  ElemNodesMaybeNew(), node_was_new, sync);
1752 
1753  // We should have consistent processor ids when we're done.
1754 #ifdef DEBUG
1757 #endif
1758 }
1759 
1760 
1761 
1762 // ------------------------------------------------------------
1763 void MeshCommunication::make_nodes_parallel_consistent (MeshBase & mesh)
1764 {
1765  // This function must be run on all processors at once
1766  libmesh_parallel_only(mesh.comm());
1767 
1768  // When this function is called, each section of a parallelized mesh
1769  // should be in the following state:
1770  //
1771  // All nodes should have the exact same physical location on every
1772  // processor where they exist.
1773  //
1774  // Local nodes should have unique authoritative ids,
1775  // and processor ids consistent with all processors which own
1776  // an element touching them.
1777  //
1778  // Ghost nodes touching local elements should have processor ids
1779  // consistent with all processors which own an element touching
1780  // them.
1781  //
1782  // Ghost nodes should have ids which are either already correct
1783  // or which are in the "unpartitioned" id space.
1784 
1785  // First, let's sync up processor ids. Some of these processor ids
1786  // may be "wrong" from coarsening, but they're right in the sense
1787  // that they'll tell us who has the authoritative dofobject ids for
1788  // each node.
1789 
1790  this->make_node_proc_ids_parallel_consistent(mesh);
1791 
1792  // Second, sync up dofobject ids.
1793  this->make_node_ids_parallel_consistent(mesh);
1794 
1795  // Third, sync up dofobject unique_ids if applicable.
1796  this->make_node_unique_ids_parallel_consistent(mesh);
1797 
1798  // Finally, correct the processor ids to make DofMap happy
1800 }
1801 
1802 
1803 
1804 // ------------------------------------------------------------
1805 void MeshCommunication::make_new_nodes_parallel_consistent (MeshBase & mesh)
1806 {
1807  // This function must be run on all processors at once
1808  libmesh_parallel_only(mesh.comm());
1809 
1810  // When this function is called, each section of a parallelized mesh
1811  // should be in the following state:
1812  //
1813  // All nodes should have the exact same physical location on every
1814  // processor where they exist.
1815  //
1816  // Local nodes should have unique authoritative ids,
1817  // and new nodes should be unpartitioned.
1818  //
1819  // New ghost nodes touching local elements should be unpartitioned.
1820  //
1821  // New ghost nodes should have ids which are either already correct
1822  // or which are in the "unpartitioned" id space.
1823  //
1824  // Non-new nodes should have correct ids and processor ids already.
1825 
1826  // First, let's sync up new nodes' processor ids.
1827 
1828  this->make_new_node_proc_ids_parallel_consistent(mesh);
1829 
1830  // Second, sync up dofobject ids.
1831  this->make_node_ids_parallel_consistent(mesh);
1832 
1833  // Third, sync up dofobject unique_ids if applicable.
1834  this->make_node_unique_ids_parallel_consistent(mesh);
1835 
1836  // Finally, correct the processor ids to make DofMap happy
1838 }
1839 
1840 
1841 
1842 // ------------------------------------------------------------
1843 void
1844 MeshCommunication::delete_remote_elements (DistributedMesh & mesh,
1845  const std::set<Elem *> & extra_ghost_elem_ids) const
1846 {
1847  // The mesh should know it's about to be parallelized
1848  libmesh_assert (!mesh.is_serial());
1849 
1850  LOG_SCOPE("delete_remote_elements()", "MeshCommunication");
1851 
1852 #ifdef DEBUG
1853  // We expect maximum ids to be in sync so we can use them to size
1854  // vectors
1855  libmesh_assert(mesh.comm().verify(mesh.max_node_id()));
1856  libmesh_assert(mesh.comm().verify(mesh.max_elem_id()));
1857  const dof_id_type par_max_node_id = mesh.parallel_max_node_id();
1858  const dof_id_type par_max_elem_id = mesh.parallel_max_elem_id();
1859  libmesh_assert_equal_to (par_max_node_id, mesh.max_node_id());
1860  libmesh_assert_equal_to (par_max_elem_id, mesh.max_elem_id());
1861 #endif
1862 
1863  std::set<const Elem *, CompareElemIdsByLevel> elements_to_keep;
1864 
1865  // Don't delete elements that we were explicitly told not to
1866  for (const auto & elem : extra_ghost_elem_ids)
1867  {
1868  std::vector<const Elem *> active_family;
1869 #ifdef LIBMESH_ENABLE_AMR
1870  if (!elem->subactive())
1871  elem->active_family_tree(active_family);
1872  else
1873 #endif
1874  active_family.push_back(elem);
1875 
1876  for (std::size_t i=0; i != active_family.size(); ++i)
1877  elements_to_keep.insert(active_family[i]);
1878  }
1879 
1880  // See which elements we still need to keep ghosted, given that
1881  // we're keeping local and unpartitioned elements.
1883  (mesh, mesh.processor_id(),
1886  elements_to_keep);
1888  (mesh, DofObject::invalid_processor_id,
1889  mesh.active_pid_elements_begin(DofObject::invalid_processor_id),
1890  mesh.active_pid_elements_end(DofObject::invalid_processor_id),
1891  elements_to_keep);
1892 
1893  // The inactive elements we need to send should have their
1894  // immediate children present.
1897  elements_to_keep);
1899  mesh.pid_elements_begin(DofObject::invalid_processor_id),
1900  mesh.pid_elements_end(DofObject::invalid_processor_id),
1901  elements_to_keep);
1902 
1903  // The elements we need should have their ancestors and their
1904  // subactive children present too.
1905  connect_families(elements_to_keep);
1906 
1907  // Don't delete nodes that our semilocal elements need
1908  std::set<const Node *> connected_nodes;
1909  reconnect_nodes(elements_to_keep, connected_nodes);
1910 
1911  // Delete all the elements we have no reason to save,
1912  // starting with the most refined so that the mesh
1913  // is valid at all intermediate steps
1914  unsigned int n_levels = MeshTools::n_levels(mesh);
1915 
1916  for (int l = n_levels - 1; l >= 0; --l)
1917  for (auto & elem : as_range(mesh.level_elements_begin(l),
1919  {
1920  libmesh_assert (elem);
1921  // Make sure we don't leave any invalid pointers
1922  const bool keep_me = elements_to_keep.count(elem);
1923 
1924  if (!keep_me)
1925  elem->make_links_to_me_remote();
1926 
1927  // delete_elem doesn't currently invalidate element
1928  // iterators... that had better not change
1929  if (!keep_me)
1930  mesh.delete_elem(elem);
1931  }
1932 
1933  // Delete all the nodes we have no reason to save
1934  for (auto & node : mesh.node_ptr_range())
1935  {
1936  libmesh_assert(node);
1937  if (!connected_nodes.count(node))
1938  mesh.delete_node(node);
1939  }
1940 
1941  // If we had a point locator, it's invalid now that some of the
1942  // elements it pointed to have been deleted.
1944 
1945  // Much of our boundary info may have been for now-remote parts of
1946  // the mesh, in which case we don't want to keep local copies.
1948 
1949  // We now have all remote elements and nodes deleted; our ghosting
1950  // functors should be ready to delete any now-redundant cached data
1951  // they use too.
1953  gf->delete_remote_elements();
1954 
1955 #ifdef DEBUG
1957 #endif
1958 }
1959 
1960 } // namespace libMesh
void set_p_level(const unsigned int p)
Definition: elem.h:2692
RefinementState refinement_flag() const
Definition: elem.h:2638
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
void libmesh_assert_topology_consistent_procids< Node >(const MeshBase &mesh)
Definition: mesh_tools.C:1826
void wait(std::vector< Request > &r)
Definition: request.C:213
const Elem * parent() const
Definition: elem.h:2479
A geometric point in (x,y,z) space associated with a DOF.
Definition: node.h:52
virtual void renumber_node(dof_id_type old_id, dof_id_type new_id)=0
virtual element_iterator not_local_elements_end()=0
virtual element_iterator level_elements_begin(unsigned int level)=0
void correct_node_proc_ids(MeshBase &)
Definition: mesh_tools.C:2259
const unsigned int any_source
Definition: communicator.h:70
IntRange< unsigned short > side_index_range() const
Definition: elem.h:2166
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:702
void send_packed_range(const unsigned int dest_processor_id, const Context *context, Iter range_begin, const Iter range_end, const MessageTag &tag=no_tag) const
const Elem * top_parent() const
Definition: elem.h:2503
virtual element_iterator unpartitioned_elements_begin()=0
void libmesh_assert_valid_refinement_tree(const MeshBase &mesh)
Definition: mesh_tools.C:2033
std::unordered_set< const Node * > was_new
unsigned short int side
Definition: xdr_io.C:50
The base class for all geometric element types.
Definition: elem.h:100
MeshBase & mesh
uint8_t processor_id_type
Definition: id_types.h:99
void receive_packed_range(const unsigned int dest_processor_id, Context *context, OutputIter out, const T *output_type, const MessageTag &tag=any_tag) const
virtual element_iterator flagged_elements_end(unsigned char rflag)=0
void alltoall(std::vector< T, A > &r) const
void libmesh_assert_parallel_consistent_new_node_procids(const MeshBase &mesh)
Definition: mesh_tools.C:1877
const Parallel::Communicator & comm() const
unsigned int p_level() const
Definition: elem.h:2555
MessageTag get_unique_tag(int tagvalue) const
Definition: communicator.C:201
void active_family_tree(std::vector< const Elem *> &active_family, const bool reset=true) const
Definition: elem.C:1577
virtual element_iterator flagged_pid_elements_end(unsigned char rflag, processor_id_type pid)=0
void make_links_to_me_remote()
Definition: elem.C:1149
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
void total_family_tree(std::vector< const Elem *> &active_family, const bool reset=true) const
Definition: elem.C:1557
Base class for Mesh.
Definition: mesh_base.h:77
SimpleRange< ChildRefIter > child_ref_range()
Definition: elem.h:1779
unsigned int paranoid_n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:677
void libmesh_assert_equal_n_systems(const MeshBase &mesh)
Definition: mesh_tools.C:1173
virtual std::unique_ptr< Elem > build_side_ptr(const unsigned int i, bool proxy=true)=0
virtual node_iterator nodes_begin()=0
virtual element_iterator elements_begin()=0
virtual SimpleRange< element_iterator > active_local_element_ptr_range()=0
virtual element_iterator level_elements_end(unsigned int level)=0
std::map< boundary_id_type, std::string > & set_sideset_name_map()
processor_id_type n_processors() const
virtual bool is_serial() const
Definition: mesh_base.h:154
void libmesh_ignore(const Args &...)
virtual void find_neighbors(const bool reset_remote_elements=false, const bool reset_current_list=true)=0
virtual void delete_elem(Elem *e)=0
void reconnect_nodes(const std::set< const Elem *, CompareElemIdsByLevel > &connected_elements, std::set< const Node *> &connected_nodes)
virtual SimpleRange< element_iterator > element_ptr_range()=0
const Node & node_ref(const unsigned int i) const
Definition: elem.h:1979
dof_id_type id() const
Definition: dof_object.h:655
virtual element_iterator active_pid_elements_begin(processor_id_type proc_id)=0
void sync_node_data_by_element_id(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
virtual element_iterator flagged_elements_begin(unsigned char rflag)=0
void connect_families(std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
virtual element_iterator elements_end()=0
unsigned int n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:653
virtual const Node * query_node_ptr(const dof_id_type i) const =0
virtual SimpleRange< node_iterator > node_ptr_range()=0
virtual dof_id_type max_elem_id() const =0
void clear_point_locator()
Definition: mesh_base.C:517
SimpleRange< I > as_range(const std::pair< I, I > &p)
Definition: simple_range.h:57
void family_tree(std::vector< const Elem *> &family, const bool reset=true) const
Definition: elem.C:1534
std::unordered_map< const Elem *, const CouplingMatrix * > map_type
void libmesh_assert_valid_boundary_ids(const MeshBase &mesh)
Definition: mesh_tools.C:1401
virtual void delete_node(Node *n)=0
MPI_Status status
Definition: status.h:41
OStreamProxy err(std::cerr)
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1335
void sync_dofobject_data_by_id(const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
void set_neighbor(const unsigned int i, Elem *n)
Definition: elem.h:2083
Mesh data structure which is distributed across all processors.
status probe(const unsigned int src_processor_id, const MessageTag &tag=any_tag) const
virtual void clear()
Definition: mesh_base.C:260
umap_type definitive_renumbering
bool on_boundary() const
Definition: elem.h:2304
void libmesh_assert_parallel_consistent_procids< Node >(const MeshBase &mesh)
Definition: mesh_tools.C:1915
unsigned int size(const data_type &type) const
Definition: status.h:152
virtual element_iterator active_elements_begin()=0
virtual element_iterator flagged_pid_elements_begin(unsigned char rflag, processor_id_type pid)=0
An output iterator for use with packed_range functions.
void allgather_packed_range(Context *context, Iter range_begin, const Iter range_end, OutputIter out) const
SimpleRange< NodeRefIter > node_ref_range()
Definition: elem.h:2130
virtual element_iterator pid_elements_begin(processor_id_type proc_id)=0
virtual element_iterator active_elements_end()=0
query_obj query
void query_ghosting_functors(const MeshBase &mesh, processor_id_type pid, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
const Elem * neighbor_ptr(unsigned int i) const
Definition: elem.h:2050
std::set< GhostingFunctor * >::const_iterator ghosting_functors_begin() const
Definition: mesh_base.h:835
virtual unsigned int n_vertices() const =0
virtual node_iterator nodes_end()=0
virtual element_iterator active_pid_elements_end(processor_id_type proc_id)=0
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
const Node * node_ptr(const unsigned int i) const
Definition: elem.h:1957
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
bool sync_node_data_by_element_id_once(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
unsigned int n_neighbors() const
Definition: elem.h:644
bool subactive() const
Definition: elem.h:2408
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:504
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
virtual element_iterator unpartitioned_elements_end()=0
void redistribute(MeshBase &mesh, const FunctionBase< Real > &mapfunc)
virtual const Node & node_ref(const dof_id_type i) const
Definition: mesh_base.h:434
uset_type definitive_ids
SimpleRange< NeighborPtrIter > neighbor_ptr_range()
Definition: elem.h:2988
void connect_children(const MeshBase &mesh, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
renumber_obj renumber
virtual element_iterator not_local_elements_begin()=0
void cache_elem_dims()
Definition: mesh_base.C:574
IterBase * data
virtual dof_id_type max_node_id() const =0
virtual dof_id_type n_elem() const =0
virtual const Node * node_ptr(const dof_id_type i) const =0
processor_id_type processor_id() const
OStreamProxy out(std::cout)
bool active() const
Definition: elem.h:2390
void gather_packed_range(const unsigned int root_id, Context *context, Iter range_begin, const Iter range_end, OutputIter out) const
processor_id_type processor_id() const
Definition: dof_object.h:717
long double min(long double a, double b)
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:1914
uint8_t unique_id_type
Definition: id_types.h:79
bool has_children() const
Definition: elem.h:2428
void broadcast(T &data, const unsigned int root_id=0) const
virtual dof_id_type n_nodes() const =0
void sync_element_data_by_parent_id(MeshBase &mesh, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
void broadcast_packed_range(const Context *context1, Iter range_begin, const Iter range_end, OutputContext *context2, OutputIter out, const unsigned int root_id=0) const
std::set< GhostingFunctor * >::const_iterator ghosting_functors_end() const
Definition: mesh_base.h:841
uint8_t dof_id_type
Definition: id_types.h:64
const RemoteElem * remote_elem
Definition: remote_elem.C:57
virtual element_iterator pid_elements_end(processor_id_type proc_id)=0