checkpoint_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2018 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 #include "libmesh/checkpoint_io.h"
19 
20 // Local includes
21 #include "libmesh/boundary_info.h"
23 #include "libmesh/elem.h"
24 #include "libmesh/enum_xdr_mode.h"
26 #include "libmesh/mesh_base.h"
28 #include "libmesh/mesh_tools.h"
29 #include "libmesh/node.h"
30 #include "libmesh/parallel.h"
31 #include "libmesh/partitioner.h"
33 #include "libmesh/remote_elem.h"
34 #include "libmesh/xdr_io.h"
35 #include "libmesh/xdr_cxx.h"
36 #include "libmesh/utility.h"
37 #include "libmesh/int_range.h"
38 
39 // C++ includes
40 #include <iostream>
41 #include <iomanip>
42 #include <cstdio>
43 #include <unistd.h>
44 #include <vector>
45 #include <string>
46 #include <cstring>
47 #include <fstream>
48 #include <sstream> // for ostringstream
49 #include <unordered_map>
50 #include <unordered_set>
51 
52 namespace
53 {
54 // chunking computes the number of chunks and first-chunk-offset when splitting a mesh
55 // into nsplits pieces using size procs for the given MPI rank. The number of chunks and offset
56 // are stored in nchunks and first_chunk respectively.
59 {
60  if (nsplits % size == 0) // the chunks divide evenly over the processors
61  {
62  nchunks = nsplits / size;
63  first_chunk = libMesh::cast_int<libMesh::processor_id_type>(nchunks * rank);
64  return;
65  }
66 
67  libMesh::processor_id_type nextra = nsplits % size;
68  if (rank < nextra) // leftover chunks cause an extra chunk to be added to this processor
69  {
70  nchunks = libMesh::cast_int<libMesh::processor_id_type>(nsplits / size + 1);
71  first_chunk = libMesh::cast_int<libMesh::processor_id_type>(nchunks * rank);
72  }
73  else // no extra chunks, but first chunk is offset by extras on earlier ranks
74  {
75  nchunks = nsplits / size;
76  // account for the case where nchunks is zero where we want max int
77  first_chunk = libMesh::cast_int<libMesh::processor_id_type>
78  (std::max((int)((nchunks + 1) * (nsplits % size) + nchunks * (rank - nsplits % size)),
79  (1 - (int)nchunks) * std::numeric_limits<int>::max()));
80  }
81 }
82 
83 std::string extension(const std::string & s)
84 {
85  auto pos = s.rfind(".");
86  if (pos == std::string::npos)
87  return "";
88  return s.substr(pos, s.size() - pos);
89 }
90 
91 std::string split_dir(const std::string & input_name, libMesh::processor_id_type n_procs)
92 {
93  return input_name + "/" + std::to_string(n_procs);
94 }
95 
96 
97 std::string header_file(const std::string & input_name, libMesh::processor_id_type n_procs)
98 {
99  return split_dir(input_name, n_procs) + "/header" + extension(input_name);
100 }
101 
102 std::string
103 split_file(const std::string & input_name,
106 {
107  return split_dir(input_name, n_procs) + "/split-" + std::to_string(n_procs) + "-" +
108  std::to_string(proc_id) + extension(input_name);
109 }
110 
111 void make_dir(const std::string & input_name, libMesh::processor_id_type n_procs)
112 {
113  auto ret = libMesh::Utility::mkdir(input_name.c_str());
114  // error only if we failed to create dir - don't care if it was already there
115  if (ret != 0 && ret != -1)
116  libmesh_error_msg(
117  "Failed to create mesh split directory '" << input_name << "': " << std::strerror(ret));
118 
119  auto dir_name = split_dir(input_name, n_procs);
120  ret = libMesh::Utility::mkdir(dir_name.c_str());
121  if (ret == -1)
122  libmesh_warning("In CheckpointIO::write, directory '"
123  << dir_name << "' already exists, overwriting contents.");
124  else if (ret != 0)
125  libmesh_error_msg(
126  "Failed to create mesh split directory '" << dir_name << "': " << std::strerror(ret));
127 }
128 
129 } // namespace
130 
131 namespace libMesh
132 {
133 
134 std::unique_ptr<CheckpointIO> split_mesh(MeshBase & mesh, processor_id_type nsplits)
135 {
136  // There is currently an issue with DofObjects not being properly
137  // reset if the mesh is not first repartitioned onto 1 processor
138  // *before* being repartitioned onto the desired number of
139  // processors. So, this is a workaround, but not a particularly
140  // onerous one.
141  mesh.partition(1);
142  mesh.partition(nsplits);
143 
144  processor_id_type my_num_chunks = 0;
145  processor_id_type my_first_chunk = 0;
146  chunking(mesh.comm().size(), mesh.comm().rank(), nsplits, my_num_chunks, my_first_chunk);
147 
148  auto cpr = libmesh_make_unique<CheckpointIO>(mesh);
149  cpr->current_processor_ids().clear();
150  for (processor_id_type i = my_first_chunk; i < my_first_chunk + my_num_chunks; i++)
151  cpr->current_processor_ids().push_back(i);
152  cpr->current_n_processors() = nsplits;
153  cpr->parallel() = true;
154  return cpr;
155 }
156 
157 
158 // ------------------------------------------------------------
159 // CheckpointIO members
160 CheckpointIO::CheckpointIO (MeshBase & mesh, const bool binary_in) :
161  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
162  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
164  _binary (binary_in),
165  _parallel (false),
166  _version ("checkpoint-1.2"),
167  _my_processor_ids (1, processor_id()),
168  _my_n_processors (mesh.is_replicated() ? 1 : n_processors())
169 {
170 }
171 
172 CheckpointIO::CheckpointIO (const MeshBase & mesh, const bool binary_in) :
173  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
175  _binary (binary_in),
176  _parallel (false),
177  _my_processor_ids (1, processor_id()),
178  _my_n_processors (mesh.is_replicated() ? 1 : n_processors())
179 {
180 }
181 
183 {
184 }
185 
186 processor_id_type CheckpointIO::select_split_config(const std::string & input_name, header_id_type & data_size)
187 {
188  std::string header_name;
189 
190  // We'll read a header file from processor 0 and broadcast.
191  if (this->processor_id() == 0)
192  {
193  header_name = header_file(input_name, _my_n_processors);
194 
195  {
196  // look for header+splits with nprocs equal to _my_n_processors
197  std::ifstream in (header_name.c_str());
198  if (!in.good())
199  {
200  // otherwise fall back to a serial/single-split mesh
201  header_name = header_file(input_name, 1);
202  std::ifstream in2 (header_name.c_str());
203  if (!in2.good())
204  {
205  libmesh_error_msg("ERROR: cannot locate header file for input '" << input_name << "'");
206  }
207  }
208  }
209 
210  Xdr io (header_name, this->binary() ? DECODE : READ);
211 
212  // read the version, but don't care about it
213  std::string input_version;
214  io.data(input_version);
215 
216  // read the data type
217  io.data (data_size);
218  }
219 
220  this->comm().broadcast(data_size);
221  this->comm().broadcast(header_name);
222 
223  // How many per-processor files are here?
224  largest_id_type input_n_procs;
225 
226  switch (data_size) {
227  case 2:
228  input_n_procs = this->read_header<uint16_t>(header_name);
229  break;
230  case 4:
231  input_n_procs = this->read_header<uint32_t>(header_name);
232  break;
233  case 8:
234  input_n_procs = this->read_header<uint64_t>(header_name);
235  break;
236  default:
237  libmesh_error();
238  }
239 
240  if (!input_n_procs)
241  input_n_procs = 1;
242  return cast_int<processor_id_type>(input_n_procs);
243 }
244 
245 void CheckpointIO::cleanup(const std::string & input_name, processor_id_type n_procs)
246 {
247  auto header = header_file(input_name, n_procs);
248  auto ret = std::remove(header.c_str());
249  if (ret != 0)
250  libmesh_warning("Failed to clean up checkpoint header '" << header << "': " << std::strerror(ret));
251 
252  for (processor_id_type i = 0; i < n_procs; i++)
253  {
254  auto split = split_file(input_name, n_procs, i);
255  ret = std::remove(split.c_str());
256  if (ret != 0)
257  libmesh_warning("Failed to clean up checkpoint split file '" << split << "': " << std::strerror(ret));
258  }
259 
260  auto dir = split_dir(input_name, n_procs);
261  ret = rmdir(dir.c_str());
262  if (ret != 0)
263  libmesh_warning("Failed to clean up checkpoint split dir '" << dir << "': " << std::strerror(ret));
264 
265  // We expect that this may fail if there are other split configurations still present in this
266  // directory - so don't bother to check/warn for failure.
267  rmdir(input_name.c_str());
268 }
269 
270 void CheckpointIO::write (const std::string & name)
271 {
272  LOG_SCOPE("write()", "CheckpointIO");
273 
274  // convenient reference to our mesh
276 
277  // FIXME: For backwards compatibility, we'll assume for now that we
278  // only want to write distributed meshes in parallel. Later we can
279  // do a gather_to_zero() and support that case too.
281 
282  processor_id_type use_n_procs = 1;
283  if (_parallel)
284  use_n_procs = _my_n_processors;
285 
286  std::string header_file_name = header_file(name, use_n_procs);
287  make_dir(name, use_n_procs);
288 
289  // We'll write a header file from processor 0 to make it easier to do unambiguous
290  // restarts later:
291  if (this->processor_id() == 0)
292  {
293  Xdr io (header_file_name, this->binary() ? ENCODE : WRITE);
294 
295  // write the version
296  io.data(_version, "# version");
297 
298  // write what kind of data type we're using
299  header_id_type data_size = sizeof(largest_id_type);
300  io.data(data_size, "# integer size");
301 
302  // Write out the max mesh dimension for backwards compatibility
303  // with code that sets it independently of element dimensions
304  {
305  uint16_t mesh_dimension = cast_int<uint16_t>(mesh.mesh_dimension());
306  io.data(mesh_dimension, "# dimensions");
307  }
308 
309  // Write out whether or not this is serial output
310  {
311  uint16_t parallel = _parallel;
312  io.data(parallel, "# parallel");
313  }
314 
315  // If we're writing out a parallel mesh then we need to write the number of processors
316  // so we can check it upon reading the file
317  if (_parallel)
318  {
320  io.data(n_procs, "# n_procs");
321  }
322 
323  // write subdomain names
324  this->write_subdomain_names(io);
325 
326  // write boundary id names
327  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
328  write_bc_names(io, boundary_info, true); // sideset names
329  write_bc_names(io, boundary_info, false); // nodeset names
330  }
331 
332  // If this is a serial mesh written to a serial file then we're only
333  // going to write local data from processor 0. If this is a mesh being
334  // written in parallel then we're going to write from every
335  // processor.
336  std::vector<processor_id_type> ids_to_write;
337 
338  // We're going to sort elements by pid in one pass, to avoid sending
339  // predicated iterators through the whole mesh N_p times
340  std::unordered_map<processor_id_type, std::vector<Elem *>> elements_on_pid;
341 
342  if (_parallel)
343  {
344  ids_to_write = _my_processor_ids;
345  for (processor_id_type p : ids_to_write)
346  elements_on_pid[p].clear();
347  auto eop_end = elements_on_pid.end();
348  for (auto & elem : mesh.element_ptr_range())
349  {
350  const processor_id_type p = elem->processor_id();
351  auto eop_it = elements_on_pid.find(p);
352  if (eop_it != eop_end)
353  eop_it->second.push_back(elem);
354  }
355  }
356  else if (mesh.is_serial())
357  {
358  if (mesh.processor_id() == 0)
359  {
360  // placeholder
361  ids_to_write.push_back(0);
362  }
363  }
364  else
365  {
366  libmesh_error_msg("Cannot write serial checkpoint from distributed mesh");
367  }
368 
369  // Call build_side_list() and build_node_list() just *once* to avoid
370  // redundant expensive sorts during mesh splitting.
371  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
372  std::vector<std::tuple<dof_id_type, unsigned short int, boundary_id_type>>
373  bc_triples = boundary_info.build_side_list();
374  std::vector<std::tuple<dof_id_type, boundary_id_type>>
375  bc_tuples = boundary_info.build_node_list();
376 
377  for (const auto & my_pid : ids_to_write)
378  {
379  auto file_name = split_file(name, use_n_procs, my_pid);
380  Xdr io (file_name, this->binary() ? ENCODE : WRITE);
381 
382  std::set<const Elem *, CompareElemIdsByLevel> elements;
383 
384  // For serial files or for already-distributed meshs, we write
385  // everything we can see.
386  if (!_parallel || !mesh.is_serial())
387  elements.insert(mesh.elements_begin(), mesh.elements_end());
388  // For parallel files written from serial meshes we write what
389  // we'd be required to keep if we were to be deleting remote
390  // elements. This allows us to write proper parallel files even
391  // from a ReplicateMesh.
392  //
393  // WARNING: If we have a DistributedMesh which used
394  // "add_extra_ghost_elem" rather than ghosting functors to
395  // preserve elements and which is *also* currently serialized
396  // then we're not preserving those elements here. As a quick
397  // workaround user code should delete_remote_elements() before
398  // writing the checkpoint; as a long term workaround user code
399  // should use ghosting functors instead of extra_ghost_elem
400  // lists.
401  else
402  {
404  {
405  const auto elements_vec_it = elements_on_pid.find(p);
406  if (elements_vec_it != elements_on_pid.end())
407  {
408  const auto & p_elements = elements_vec_it->second;
409  Elem * const * elempp = p_elements.data();
410  Elem * const * elemend = elempp + p_elements.size();
411 
413  pid_elements_begin = MeshBase::const_element_iterator
414  (elempp, elemend, Predicates::NotNull<Elem * const *>()),
415  pid_elements_end = MeshBase::const_element_iterator
416  (elemend, elemend, Predicates::NotNull<Elem * const *>()),
417  active_pid_elements_begin = MeshBase::const_element_iterator
418  (elempp, elemend, Predicates::Active<Elem * const *>()),
419  active_pid_elements_end = MeshBase::const_element_iterator
420  (elemend, elemend, Predicates::Active<Elem * const *>());
421 
423  (mesh, p, active_pid_elements_begin,
424  active_pid_elements_end, elements);
425  connect_children(mesh, pid_elements_begin,
426  pid_elements_end, elements);
427  }
428  connect_families(elements);
429  }
430  }
431 
432  std::set<const Node *> connected_nodes;
433  reconnect_nodes(elements, connected_nodes);
434 
435  // write the nodal locations
436  this->write_nodes (io, connected_nodes);
437 
438  // write connectivity
439  this->write_connectivity (io, elements);
440 
441  // write remote_elem connectivity
442  this->write_remote_elem (io, elements);
443 
444  // write the boundary condition information
445  this->write_bcs (io, elements, bc_triples);
446 
447  // write the nodeset information
448  this->write_nodesets (io, connected_nodes, bc_tuples);
449 
450  // close it up
451  io.close();
452  }
453 
454  // this->comm().barrier();
455 }
456 
458 {
459  {
461 
462  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
463 
464  std::vector<largest_id_type> subdomain_ids; subdomain_ids.reserve(subdomain_map.size());
465  std::vector<std::string> subdomain_names; subdomain_names.reserve(subdomain_map.size());
466 
467  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
468  // return writable references in mesh_base, it's possible for the user to leave some entity names
469  // blank. We can't write those to the XDA file.
470  largest_id_type n_subdomain_names = 0;
471  for (const auto & pr : subdomain_map)
472  if (!pr.second.empty())
473  {
474  n_subdomain_names++;
475  subdomain_ids.push_back(pr.first);
476  subdomain_names.push_back(pr.second);
477  }
478 
479  io.data(n_subdomain_names, "# subdomain id to name map");
480  // Write out the ids and names in two vectors
481  if (n_subdomain_names)
482  {
483  io.data(subdomain_ids);
484  io.data(subdomain_names);
485  }
486  }
487 }
488 
489 
490 
492  const std::set<const Node *> & nodeset) const
493 {
494  largest_id_type n_nodes_here = nodeset.size();
495 
496  io.data(n_nodes_here, "# n_nodes on proc");
497 
498  // Will hold the node id and pid
499  std::vector<largest_id_type> id_pid(2);
500 
501  // For the coordinates
502  std::vector<Real> coords(LIBMESH_DIM);
503 
504  for (const auto & node : nodeset)
505  {
506  id_pid[0] = node->id();
507  id_pid[1] = node->processor_id();
508 
509  io.data_stream(id_pid.data(), 2, 2);
510 
511 #ifdef LIBMESH_ENABLE_UNIQUE_ID
512  largest_id_type unique_id = node->unique_id();
513 
514  io.data(unique_id, "# unique id");
515 #endif
516 
517  coords[0] = (*node)(0);
518 
519 #if LIBMESH_DIM > 1
520  coords[1] = (*node)(1);
521 #endif
522 
523 #if LIBMESH_DIM > 2
524  coords[2] = (*node)(2);
525 #endif
526 
527  io.data_stream(coords.data(), LIBMESH_DIM, 3);
528  }
529 }
530 
531 
532 
534  const std::set<const Elem *, CompareElemIdsByLevel> & elements) const
535 {
536  libmesh_assert (io.writing());
537 
538  // Put these out here to reduce memory churn
539  // id type pid subdomain_id parent_id
540  std::vector<largest_id_type> elem_data(6);
541  std::vector<largest_id_type> conn_data;
542 
543  largest_id_type n_elems_here = elements.size();
544 
545  io.data(n_elems_here, "# number of elements");
546 
547  for (const auto & elem : elements)
548  {
549  unsigned int n_nodes = elem->n_nodes();
550 
551  elem_data[0] = elem->id();
552  elem_data[1] = elem->type();
553  elem_data[2] = elem->processor_id();
554  elem_data[3] = elem->subdomain_id();
555 
556 #ifdef LIBMESH_ENABLE_AMR
557  if (elem->parent() != nullptr)
558  {
559  elem_data[4] = elem->parent()->id();
560  elem_data[5] = elem->parent()->which_child_am_i(elem);
561  }
562  else
563 #endif
564  {
565  elem_data[4] = static_cast<largest_id_type>(-1);
566  elem_data[5] = static_cast<largest_id_type>(-1);
567  }
568 
569  conn_data.resize(n_nodes);
570 
571  for (unsigned int i=0; i<n_nodes; i++)
572  conn_data[i] = elem->node_id(i);
573 
574  io.data_stream(elem_data.data(),
575  cast_int<unsigned int>(elem_data.size()),
576  cast_int<unsigned int>(elem_data.size()));
577 
578 #ifdef LIBMESH_ENABLE_UNIQUE_ID
579  largest_id_type unique_id = elem->unique_id();
580 
581  io.data(unique_id, "# unique id");
582 #endif
583 
584 #ifdef LIBMESH_ENABLE_AMR
585  uint16_t p_level = cast_int<uint16_t>(elem->p_level());
586  io.data(p_level, "# p_level");
587 
588  uint16_t rflag = elem->refinement_flag();
589  io.data(rflag, "# rflag");
590 
591  uint16_t pflag = elem->p_refinement_flag();
592  io.data(pflag, "# pflag");
593 #endif
594  io.data_stream(conn_data.data(),
595  cast_int<unsigned int>(conn_data.size()),
596  cast_int<unsigned int>(conn_data.size()));
597  }
598 }
599 
600 
602  const std::set<const Elem *, CompareElemIdsByLevel> & elements) const
603 {
604  libmesh_assert (io.writing());
605 
606  // Find the remote_elem neighbor and child links
607  std::vector<largest_id_type> elem_ids, parent_ids;
608  std::vector<uint16_t> elem_sides, child_numbers;
609 
610  for (const auto & elem : elements)
611  {
612  for (auto n : elem->side_index_range())
613  {
614  const Elem * neigh = elem->neighbor_ptr(n);
615  if (neigh == remote_elem ||
616  (neigh && !elements.count(neigh)))
617  {
618  elem_ids.push_back(elem->id());
619  elem_sides.push_back(n);
620  }
621  }
622 
623 #ifdef LIBMESH_ENABLE_AMR
624  if (elem->has_children())
625  {
626  for (unsigned short c = 0,
627  nc = cast_int<unsigned short>(elem->n_children());
628  c != nc; ++c)
629  {
630  const Elem * child = elem->child_ptr(c);
631  if (child == remote_elem ||
632  (child && !elements.count(child)))
633  {
634  parent_ids.push_back(elem->id());
635  child_numbers.push_back(c);
636  }
637  }
638  }
639 #endif
640  }
641 
642  io.data(elem_ids, "# remote neighbor elem_ids");
643  io.data(elem_sides, "# remote neighbor elem_sides");
644  io.data(parent_ids, "# remote child parent_ids");
645  io.data(child_numbers, "# remote child_numbers");
646 }
647 
648 
649 
651  const std::set<const Elem *, CompareElemIdsByLevel> & elements,
652  const std::vector<std::tuple<dof_id_type, unsigned short int, boundary_id_type>> & bc_triples) const
653 {
654  libmesh_assert (io.writing());
655 
656  // Build a list of (elem, side, bc) tuples.
657  std::size_t bc_size = bc_triples.size();
658 
659  std::vector<largest_id_type> element_id_list;
660  std::vector<uint16_t> side_list;
661  std::vector<largest_id_type> bc_id_list;
662 
663  element_id_list.reserve(bc_size);
664  side_list.reserve(bc_size);
665  bc_id_list.reserve(bc_size);
666 
667  std::unordered_set<dof_id_type> elems;
668  for (auto & e : elements)
669  elems.insert(e->id());
670 
671  for (const auto & t : bc_triples)
672  if (elems.count(std::get<0>(t)))
673  {
674  element_id_list.push_back(std::get<0>(t));
675  side_list.push_back(std::get<1>(t));
676  bc_id_list.push_back(std::get<2>(t));
677  }
678 
679 
680  io.data(element_id_list, "# element ids for bcs");
681  io.data(side_list, "# sides of elements for bcs");
682  io.data(bc_id_list, "# bc ids");
683 }
684 
685 
686 
688  const std::set<const Node *> & nodeset,
689  const std::vector<std::tuple<dof_id_type, boundary_id_type>> & bc_tuples) const
690 {
691  libmesh_assert (io.writing());
692 
693  // convenient reference to our mesh
695 
696  // Build a list of (node, bc) tuples
697  std::size_t nodeset_size = bc_tuples.size();
698 
699  std::vector<largest_id_type> node_id_list;
700  std::vector<largest_id_type> bc_id_list;
701 
702  node_id_list.reserve(nodeset_size);
703  bc_id_list.reserve(nodeset_size);
704 
705  for (const auto & t : bc_tuples)
706  if (nodeset.count(mesh.node_ptr(std::get<0>(t))))
707  {
708  node_id_list.push_back(std::get<0>(t));
709  bc_id_list.push_back(std::get<1>(t));
710  }
711 
712  io.data(node_id_list, "# node id list");
713  io.data(bc_id_list, "# nodeset bc id list");
714 }
715 
716 
717 
718 void CheckpointIO::write_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
719 {
720  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
722 
723  std::vector<largest_id_type> boundary_ids; boundary_ids.reserve(boundary_map.size());
724  std::vector<std::string> boundary_names; boundary_names.reserve(boundary_map.size());
725 
726  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
727  // return writable references in boundary_info, it's possible for the user to leave some entity names
728  // blank. We can't write those to the XDA file.
729  largest_id_type n_boundary_names = 0;
730  for (const auto & pr : boundary_map)
731  if (!pr.second.empty())
732  {
733  n_boundary_names++;
734  boundary_ids.push_back(pr.first);
735  boundary_names.push_back(pr.second);
736  }
737 
738  if (is_sideset)
739  io.data(n_boundary_names, "# sideset id to name map");
740  else
741  io.data(n_boundary_names, "# nodeset id to name map");
742  // Write out the ids and names in two vectors
743  if (n_boundary_names)
744  {
745  io.data(boundary_ids);
746  io.data(boundary_names);
747  }
748 }
749 
750 void CheckpointIO::read (const std::string & input_name)
751 {
752  LOG_SCOPE("read()","CheckpointIO");
753 
755 
756  libmesh_assert(!mesh.n_elem());
757 
758  header_id_type data_size;
759  processor_id_type input_n_procs = select_split_config(input_name, data_size);
760  auto header_name = header_file(input_name, input_n_procs);
761  bool input_parallel = input_n_procs > 0;
762 
763  // If this is a serial read then we're going to only read the mesh
764  // on processor 0, then broadcast it
765  if ((input_parallel && !mesh.is_replicated()) || mesh.processor_id() == 0)
766  {
767  // If we're trying to read a parallel checkpoint file on a
768  // replicated mesh, we'll read every file on processor 0 so we
769  // can broadcast it later. If we're on a distributed mesh then
770  // we'll read every id to it's own processor and we'll "wrap
771  // around" with any ids that exceed our processor count.
772  const processor_id_type begin_proc_id =
773  (input_parallel && !mesh.is_replicated()) ?
774  mesh.processor_id() : 0;
775  const processor_id_type stride =
776  (input_parallel && !mesh.is_replicated()) ?
777  mesh.n_processors() : 1;
778 
779  for (processor_id_type proc_id = begin_proc_id; proc_id < input_n_procs;
780  proc_id = cast_int<processor_id_type>(proc_id + stride))
781  {
782  auto file_name = split_file(input_name, input_n_procs, proc_id);
783 
784  {
785  std::ifstream in (file_name.c_str());
786 
787  if (!in.good())
788  libmesh_error_msg("ERROR: cannot locate specified file:\n\t" << file_name);
789  }
790 
791  // Do we expect all our files' remote_elem entries to really
792  // be remote? Only if we're not reading multiple input
793  // files on the same processor.
794  const bool expect_all_remote =
795  (input_n_procs <= mesh.n_processors() &&
796  !mesh.is_replicated());
797 
798  Xdr io (file_name, this->binary() ? DECODE : READ);
799 
800  switch (data_size) {
801  case 2:
802  this->read_subfile<uint16_t>(io, expect_all_remote);
803  break;
804  case 4:
805  this->read_subfile<uint32_t>(io, expect_all_remote);
806  break;
807  case 8:
808  this->read_subfile<uint64_t>(io, expect_all_remote);
809  break;
810  default:
811  libmesh_error();
812  }
813 
814  io.close();
815  }
816  }
817 
818  // If the mesh was only read on processor 0 then we need to broadcast it
819  if (mesh.is_replicated())
821  // If the mesh is really distributed then we need to make sure it
822  // knows that
823  else if (mesh.n_processors() > 1)
825 }
826 
827 
828 
829 template <typename file_id_type>
830 file_id_type CheckpointIO::read_header (const std::string & name)
831 {
833 
834  // Hack for codes which don't look at all elem dimensions
835  uint16_t mesh_dimension;
836 
837  // Will this be a parallel input file? With how many processors? Stay tuned!
838  uint16_t input_parallel;
839  file_id_type input_n_procs;
840 
841  // We'll write a header file from processor 0 and broadcast.
842  if (this->processor_id() == 0)
843  {
844  Xdr io (name, this->binary() ? DECODE : READ);
845 
846  // read the version, but don't care about it
847  std::string input_version;
848  io.data(input_version);
849 
850  // read the data type, don't care about it this time
851  header_id_type data_size;
852  io.data (data_size);
853 
854  // read the dimension
855  io.data (mesh_dimension);
856 
857  // Read whether or not this is a parallel file
858  io.data(input_parallel);
859 
860  // With how many processors?
861  if (input_parallel)
862  io.data(input_n_procs);
863 
864  // read subdomain names
865  this->read_subdomain_names<file_id_type>(io);
866 
867  // read boundary names
868  BoundaryInfo & boundary_info = mesh.get_boundary_info();
869 
870  this->read_bc_names<file_id_type>(io, boundary_info, true); // sideset names
871  this->read_bc_names<file_id_type>(io, boundary_info, false); // nodeset names
872  }
873 
874  // broadcast data from processor 0, set values everywhere
875  this->comm().broadcast(mesh_dimension);
876  mesh.set_mesh_dimension(cast_int<unsigned char>(mesh_dimension));
877 
878  this->comm().broadcast(input_parallel);
879 
880  if (input_parallel)
881  this->comm().broadcast(input_n_procs);
882  else
883  input_n_procs = 1;
884 
885  std::map<subdomain_id_type, std::string> & subdomain_map =
887  this->comm().broadcast(subdomain_map);
888 
889  BoundaryInfo & boundary_info = mesh.get_boundary_info();
890  this->comm().broadcast(boundary_info.set_sideset_name_map());
891  this->comm().broadcast(boundary_info.set_nodeset_name_map());
892 
893  return input_parallel ? input_n_procs : 0;
894 }
895 
896 
897 
898 template <typename file_id_type>
899 void CheckpointIO::read_subfile (Xdr & io, bool expect_all_remote)
900 {
901  // read the nodal locations
902  this->read_nodes<file_id_type> (io);
903 
904  // read connectivity
905  this->read_connectivity<file_id_type> (io);
906 
907  // read remote_elem connectivity
908  this->read_remote_elem<file_id_type> (io, expect_all_remote);
909 
910  // read the boundary conditions
911  this->read_bcs<file_id_type> (io);
912 
913  // read the nodesets
914  this->read_nodesets<file_id_type> (io);
915 }
916 
917 
918 
919 template <typename file_id_type>
921 {
923 
924  std::map<subdomain_id_type, std::string> & subdomain_map =
926 
927  std::vector<file_id_type> subdomain_ids;
928  subdomain_ids.reserve(subdomain_map.size());
929 
930  std::vector<std::string> subdomain_names;
931  subdomain_names.reserve(subdomain_map.size());
932 
933  file_id_type n_subdomain_names = 0;
934  io.data(n_subdomain_names, "# subdomain id to name map");
935 
936  if (n_subdomain_names)
937  {
938  io.data(subdomain_ids);
939  io.data(subdomain_names);
940 
941  for (auto i : index_range(subdomain_ids))
942  subdomain_map[cast_int<subdomain_id_type>(subdomain_ids[i])] =
943  subdomain_names[i];
944  }
945 }
946 
947 
948 
949 template <typename file_id_type>
951 {
952  // convenient reference to our mesh
954 
955  file_id_type n_nodes_here;
956  io.data(n_nodes_here, "# n_nodes on proc");
957 
958  // Will hold the node id and pid
959  std::vector<file_id_type> id_pid(2);
960 
961  // For the coordinates
962  std::vector<Real> coords(LIBMESH_DIM);
963 
964  for (unsigned int i=0; i<n_nodes_here; i++)
965  {
966  io.data_stream(id_pid.data(), 2, 2);
967 
968 #ifdef LIBMESH_ENABLE_UNIQUE_ID
969  file_id_type unique_id = 0;
970  io.data(unique_id, "# unique id");
971 #endif
972 
973  io.data_stream(coords.data(), LIBMESH_DIM, LIBMESH_DIM);
974 
975  Point p;
976  p(0) = coords[0];
977 
978 #if LIBMESH_DIM > 1
979  p(1) = coords[1];
980 #endif
981 
982 #if LIBMESH_DIM > 2
983  p(2) = coords[2];
984 #endif
985 
986  const dof_id_type id = cast_int<dof_id_type>(id_pid[0]);
987 
988  // "Wrap around" if we see more processors than we're using.
989  processor_id_type pid =
990  cast_int<processor_id_type>(id_pid[1] % mesh.n_processors());
991 
992  // If we already have this node (e.g. from another file, when
993  // reading multiple distributed CheckpointIO files into a
994  // ReplicatedMesh) then we don't want to add it again (because
995  // ReplicatedMesh can't handle that) but we do want to assert
996  // consistency between what we're reading and what we have.
997  const Node * old_node = mesh.query_node_ptr(id);
998 
999  if (old_node)
1000  {
1001  libmesh_assert_equal_to(pid, old_node->processor_id());
1002 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1003  libmesh_assert_equal_to(unique_id, old_node->unique_id());
1004 #endif
1005  }
1006  else
1007  {
1008 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1009  Node * node =
1010 #endif
1011  mesh.add_point(p, id, pid);
1012 
1013 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1014  node->set_unique_id() = unique_id;
1015 #endif
1016  }
1017  }
1018 }
1019 
1020 
1021 
1022 template <typename file_id_type>
1024 {
1025  // convenient reference to our mesh
1027 
1028  file_id_type n_elems_here;
1029  io.data(n_elems_here);
1030 
1031  // Keep track of the highest dimensional element we've added to the mesh
1032  unsigned int highest_elem_dim = 1;
1033 
1034  // RHS: Originally we used invalid_processor_id as a "no parent" tag
1035  // number, because I'm an idiot. Let's try to support broken files
1036  // as much as possible.
1037  bool file_is_broken = false;
1038 
1039  for (unsigned int i=0; i<n_elems_here; i++)
1040  {
1041  // id type pid subdomain_id parent_id
1042  std::vector<file_id_type> elem_data(6);
1043  io.data_stream
1044  (elem_data.data(), cast_int<unsigned int>(elem_data.size()),
1045  cast_int<unsigned int>(elem_data.size()));
1046 
1047 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1048  file_id_type unique_id = 0;
1049  io.data(unique_id, "# unique id");
1050 #endif
1051 
1052 #ifdef LIBMESH_ENABLE_AMR
1053  uint16_t p_level = 0;
1054  io.data(p_level, "# p_level");
1055 
1056  uint16_t rflag, pflag;
1057  io.data(rflag, "# rflag");
1058  io.data(pflag, "# pflag");
1059 #endif
1060 
1061  unsigned int n_nodes = Elem::type_to_n_nodes_map[elem_data[1]];
1062 
1063  // Snag the node ids this element was connected to
1064  std::vector<file_id_type> conn_data(n_nodes);
1065  io.data_stream
1066  (conn_data.data(), cast_int<unsigned int>(conn_data.size()),
1067  cast_int<unsigned int>(conn_data.size()));
1068 
1069  const dof_id_type id =
1070  cast_int<dof_id_type> (elem_data[0]);
1071  const ElemType elem_type =
1072  static_cast<ElemType> (elem_data[1]);
1073  const processor_id_type proc_id =
1074  cast_int<processor_id_type>
1075  (elem_data[2] % mesh.n_processors());
1076  const subdomain_id_type subdomain_id =
1077  cast_int<subdomain_id_type>(elem_data[3]);
1078 
1079  // Old broken files used processsor_id_type(-1)...
1080  // But we *know* our first element will be level 0
1081  if (i == 0 && elem_data[4] == 65535)
1082  file_is_broken = true;
1083 
1084  // On a broken file we can't tell whether a parent of 65535 is a
1085  // null parent or an actual parent of 65535. Assuming the
1086  // former will cause less breakage.
1087  Elem * parent =
1088  (elem_data[4] == static_cast<largest_id_type>(-1) ||
1089  (file_is_broken && elem_data[4] == 65535)) ?
1090  nullptr : mesh.elem_ptr(cast_int<dof_id_type>(elem_data[4]));
1091  const unsigned short int child_num =
1092  (elem_data[5] == static_cast<largest_id_type>(-1) ||
1093  (file_is_broken && elem_data[5] == 65535)) ?
1094  static_cast<unsigned short>(-1) :
1095  cast_int<unsigned short>(elem_data[5]);
1096 
1097  if (!parent)
1098  libmesh_assert_equal_to
1099  (child_num, static_cast<unsigned short>(-1));
1100 
1101  Elem * old_elem = mesh.query_elem_ptr(id);
1102 
1103  // If we already have this element (e.g. from another file,
1104  // when reading multiple distributed CheckpointIO files into
1105  // a ReplicatedMesh) then we don't want to add it again
1106  // (because ReplicatedMesh can't handle that) but we do want
1107  // to assert consistency between what we're reading and what
1108  // we have.
1109  if (old_elem)
1110  {
1111  libmesh_assert_equal_to(elem_type, old_elem->type());
1112  libmesh_assert_equal_to(proc_id, old_elem->processor_id());
1113  libmesh_assert_equal_to(subdomain_id, old_elem->subdomain_id());
1114  if (parent)
1115  libmesh_assert_equal_to(parent, old_elem->parent());
1116  else
1117  libmesh_assert(!old_elem->parent());
1118 
1119  libmesh_assert_equal_to(old_elem->n_nodes(), conn_data.size());
1120 
1121  for (unsigned int n=0,
1122  n_conn = cast_int<unsigned int>(conn_data.size());
1123  n != n_conn; n++)
1124  libmesh_assert_equal_to
1125  (old_elem->node_id(n),
1126  cast_int<dof_id_type>(conn_data[n]));
1127  }
1128  else
1129  {
1130  // Create the element
1131  Elem * elem = Elem::build(elem_type, parent).release();
1132 
1133 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1134  elem->set_unique_id() = unique_id;
1135 #endif
1136 
1137  if (elem->dim() > highest_elem_dim)
1138  highest_elem_dim = elem->dim();
1139 
1140  elem->set_id() = id;
1141  elem->processor_id() = proc_id;
1142  elem->subdomain_id() = subdomain_id;
1143 
1144 #ifdef LIBMESH_ENABLE_AMR
1145  elem->hack_p_level(p_level);
1146 
1147  elem->set_refinement_flag (cast_int<Elem::RefinementState>(rflag));
1148  elem->set_p_refinement_flag(cast_int<Elem::RefinementState>(pflag));
1149 
1150  // Set parent connections
1151  if (parent)
1152  {
1153  // We must specify a child_num, because we will have
1154  // skipped adding any preceding remote_elem children
1155  parent->add_child(elem, child_num);
1156  }
1157 #endif
1158 
1159  libmesh_assert(elem->n_nodes() == conn_data.size());
1160 
1161  // Connect all the nodes to this element
1162  for (unsigned int n=0,
1163  n_conn = cast_int<unsigned int>(conn_data.size());
1164  n != n_conn; n++)
1165  elem->set_node(n) =
1166  mesh.node_ptr(cast_int<dof_id_type>(conn_data[n]));
1167 
1168  mesh.add_elem(elem);
1169  }
1170  }
1171 
1172  mesh.set_mesh_dimension(cast_int<unsigned char>(highest_elem_dim));
1173 }
1174 
1175 
1176 template <typename file_id_type>
1177 void CheckpointIO::read_remote_elem (Xdr & io, bool libmesh_dbg_var(expect_all_remote))
1178 {
1179  // convenient reference to our mesh
1181 
1182  // Find the remote_elem neighbor links
1183  std::vector<file_id_type> elem_ids;
1184  std::vector<uint16_t> elem_sides;
1185 
1186  io.data(elem_ids, "# remote neighbor elem_ids");
1187  io.data(elem_sides, "# remote neighbor elem_sides");
1188 
1189  libmesh_assert_equal_to(elem_ids.size(), elem_sides.size());
1190 
1191  for (auto i : index_range(elem_ids))
1192  {
1193  Elem & elem = mesh.elem_ref(cast_int<dof_id_type>(elem_ids[i]));
1194  if (!elem.neighbor_ptr(elem_sides[i]))
1195  elem.set_neighbor(elem_sides[i],
1196  const_cast<RemoteElem *>(remote_elem));
1197  else
1198  libmesh_assert(!expect_all_remote);
1199  }
1200 
1201  // Find the remote_elem children links
1202  std::vector<file_id_type> parent_ids;
1203  std::vector<uint16_t> child_numbers;
1204 
1205  io.data(parent_ids, "# remote child parent_ids");
1206  io.data(child_numbers, "# remote child_numbers");
1207 
1208 #ifdef LIBMESH_ENABLE_AMR
1209  for (auto i : index_range(parent_ids))
1210  {
1211  Elem & elem = mesh.elem_ref(cast_int<dof_id_type>(parent_ids[i]));
1212 
1213  // We'd like to assert that no child pointer already exists to
1214  // be overwritten by remote_elem, but Elem doesn't actually have
1215  // an API that will return a child pointer without asserting
1216  // that it isn't nullptr.
1217  const Elem * child = elem.raw_child_ptr(child_numbers[i]);
1218 
1219  if (!child)
1220  elem.add_child(const_cast<RemoteElem *>(remote_elem),
1221  child_numbers[i]);
1222  else
1223  libmesh_assert(!expect_all_remote);
1224  }
1225 #endif
1226 }
1227 
1228 
1229 
1230 template <typename file_id_type>
1232 {
1233  // convenient reference to our mesh
1235 
1236  // and our boundary info object
1237  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1238 
1239  std::vector<file_id_type> element_id_list;
1240  std::vector<uint16_t> side_list;
1241  std::vector<file_id_type> bc_id_list;
1242 
1243  io.data(element_id_list, "# element ids for bcs");
1244  io.data(side_list, "# sides of elements for bcs");
1245  io.data(bc_id_list, "# bc ids");
1246 
1247  for (auto i : index_range(element_id_list))
1248  boundary_info.add_side
1249  (cast_int<dof_id_type>(element_id_list[i]), side_list[i],
1250  cast_int<boundary_id_type>(bc_id_list[i]));
1251 }
1252 
1253 
1254 
1255 template <typename file_id_type>
1257 {
1258  // convenient reference to our mesh
1260 
1261  // and our boundary info object
1262  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1263 
1264  std::vector<file_id_type> node_id_list;
1265  std::vector<file_id_type> bc_id_list;
1266 
1267  io.data(node_id_list, "# node id list");
1268  io.data(bc_id_list, "# nodeset bc id list");
1269 
1270  for (auto i : index_range(node_id_list))
1271  boundary_info.add_node
1272  (cast_int<dof_id_type>(node_id_list[i]),
1273  cast_int<boundary_id_type>(bc_id_list[i]));
1274 }
1275 
1276 
1277 
1278 template <typename file_id_type>
1279 void CheckpointIO::read_bc_names(Xdr & io, BoundaryInfo & info, bool is_sideset)
1280 {
1281  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1283 
1284  std::vector<file_id_type> boundary_ids;
1285  std::vector<std::string> boundary_names;
1286 
1287  file_id_type n_boundary_names = 0;
1288 
1289  if (is_sideset)
1290  io.data(n_boundary_names, "# sideset id to name map");
1291  else
1292  io.data(n_boundary_names, "# nodeset id to name map");
1293 
1294  if (n_boundary_names)
1295  {
1296  io.data(boundary_ids);
1297  io.data(boundary_names);
1298  }
1299 
1300  // Add them back into the map
1301  for (auto i : index_range(boundary_ids))
1302  boundary_map[cast_int<boundary_id_type>(boundary_ids[i])] =
1303  boundary_names[i];
1304 }
1305 
1306 
1309 {
1310  unsigned int max_level = 0;
1311 
1312  for (const auto & elem : as_range(begin, end))
1313  max_level = std::max(elem->level(), max_level);
1314 
1315  return max_level + 1;
1316 }
1317 
1318 } // namespace libMesh
std::string name(const ElemQuality q)
Definition: elem_quality.C:42
const MT & mesh() const
Definition: mesh_output.h:234
bool writing() const
Definition: xdr_cxx.h:117
void data(T &a, const char *comment="")
Definition: xdr_cxx.C:761
unique_id_type & set_unique_id()
Definition: dof_object.h:685
virtual void write(const std::string &name) override
const Elem * parent() const
Definition: elem.h:2479
virtual Node *& set_node(const unsigned int i)
Definition: elem.h:2024
A geometric point in (x,y,z) space associated with a DOF.
Definition: node.h:52
void write_remote_elem(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements) const
void write_connectivity(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements) const
processor_id_type size() const
Definition: communicator.h:175
virtual void read(const std::string &input_name) override
void close()
Definition: xdr_cxx.C:273
const std::map< boundary_id_type, std::string > & get_sideset_name_map() const
The base class for all geometric element types.
Definition: elem.h:100
uint64_t largest_id_type
Definition: id_types.h:139
MeshBase & mesh
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Definition: int_range.h:104
uint8_t processor_id_type
Definition: id_types.h:99
void add_child(Elem *elem)
Definition: elem.C:1461
void set_refinement_flag(const RefinementState rflag)
Definition: elem.h:2646
const Parallel::Communicator & comm() const
int mkdir(const char *pathname)
Definition: utility.C:140
IterBase * end
void write_bcs(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements, const std::vector< std::tuple< dof_id_type, unsigned short int, boundary_id_type >> &bc_triples) const
void read_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
void read_remote_elem(Xdr &io, bool expect_all_remote)
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
unsigned int n_active_levels_in(MeshBase::const_element_iterator begin, MeshBase::const_element_iterator end) const
long double max(long double a, double b)
unsigned int max_level(const MeshBase &mesh)
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
Definition: elem.h:589
processor_id_type select_split_config(const std::string &input_name, header_id_type &data_size)
Base class for Mesh.
Definition: mesh_base.h:77
dof_id_type & set_id()
Definition: dof_object.h:664
void build_side_list(std::vector< dof_id_type > &element_id_list, std::vector< unsigned short int > &side_list, std::vector< boundary_id_type > &bc_id_list) const
virtual void set_distributed()
Definition: mesh_base.h:169
virtual void partition(const unsigned int n_parts)
Definition: mesh_base.C:426
virtual element_iterator elements_begin()=0
std::vector< processor_id_type > _my_processor_ids
std::map< boundary_id_type, std::string > & set_sideset_name_map()
processor_id_type n_processors() const
virtual bool is_serial() const
Definition: mesh_base.h:154
void build_node_list(std::vector< dof_id_type > &node_id_list, std::vector< boundary_id_type > &bc_id_list) const
void add_node(const Node *node, const boundary_id_type id)
const dof_id_type n_nodes
Definition: tecplot_io.C:68
const std::map< boundary_id_type, std::string > & get_nodeset_name_map() const
void reconnect_nodes(const std::set< const Elem *, CompareElemIdsByLevel > &connected_elements, std::set< const Node *> &connected_nodes)
virtual SimpleRange< element_iterator > element_ptr_range()=0
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1337
processor_id_type rank() const
Definition: communicator.h:173
void write_subdomain_names(Xdr &io) const
virtual unsigned int n_nodes() const =0
static const processor_id_type invalid_processor_id
Definition: dof_object.h:358
processor_id_type _my_n_processors
virtual Elem * add_elem(Elem *e)=0
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:245
void connect_families(std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
virtual element_iterator elements_end()=0
virtual const Node * query_node_ptr(const dof_id_type i) const =0
CheckpointIO(MeshBase &, const bool=false)
Used by the Mesh to keep track of boundary nodes and elements.
Definition: boundary_info.h:57
SimpleRange< I > as_range(const std::pair< I, I > &p)
Definition: simple_range.h:57
static void cleanup(const std::string &input_name, processor_id_type n_procs)
void write_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
void set_mesh_dimension(unsigned char d)
Definition: mesh_base.h:213
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1335
void set_neighbor(const unsigned int i, Elem *n)
Definition: elem.h:2083
An object whose state is distributed along a set of processors.
file_id_type read_header(const std::string &name)
C++ interface for the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:66
virtual const Elem * elem_ptr(const dof_id_type i) const =0
void query_ghosting_functors(const MeshBase &mesh, processor_id_type pid, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
tbb::split split
Definition: threads_tbb.h:79
void read_subdomain_names(Xdr &io)
void write_nodesets(Xdr &io, const std::set< const Node *> &nodeset, const std::vector< std::tuple< dof_id_type, boundary_id_type >> &bc_tuples) const
void read_bcs(Xdr &io)
const Elem * neighbor_ptr(unsigned int i) const
Definition: elem.h:2050
std::unique_ptr< CheckpointIO > split_mesh(MeshBase &mesh, processor_id_type nsplits)
const Elem * raw_child_ptr(unsigned int i) const
Definition: elem.h:2569
void read_subfile(Xdr &io, bool expect_all_remote)
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
subdomain_id_type subdomain_id() const
Definition: elem.h:2034
void read_nodesets(Xdr &io)
void read_nodes(Xdr &io)
virtual unsigned short dim() const =0
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
virtual bool is_replicated() const
Definition: mesh_base.h:176
void broadcast(MeshBase &) const
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:504
unsigned int mesh_dimension() const
Definition: mesh_base.C:126
void write_nodes(Xdr &io, const std::set< const Node *> &nodeset) const
void set_p_refinement_flag(const RefinementState pflag)
Definition: elem.h:2662
void connect_children(const MeshBase &mesh, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
virtual dof_id_type n_elem() const =0
virtual const Node * node_ptr(const dof_id_type i) const =0
processor_id_type processor_id() const
void read_connectivity(Xdr &io)
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Definition: xdr_cxx.C:825
void hack_p_level(const unsigned int p)
Definition: elem.h:2739
processor_id_type processor_id() const
Definition: dof_object.h:717
virtual ElemType type() const =0
A geometric point in (x,y,z) space.
Definition: point.h:38
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:1914
void broadcast(T &data, const unsigned int root_id=0) const
const Elem * child_ptr(unsigned int i) const
Definition: elem.h:2578
uint8_t dof_id_type
Definition: id_types.h:64
const RemoteElem * remote_elem
Definition: remote_elem.C:57