libMesh::XdrIO Class Reference

#include <xdr_io.h>

Inheritance diagram for libMesh::XdrIO:

Public Types

typedef largest_id_type xdr_id_type
 
typedef uint32_t old_header_id_type
 
typedef uint64_t new_header_id_type
 

Public Member Functions

 XdrIO (MeshBase &, const bool=false)
 
 XdrIO (const MeshBase &, const bool=false)
 
virtual ~XdrIO ()
 
virtual void read (const std::string &) override
 
virtual void write (const std::string &) override
 
bool binary () const
 
bool & binary ()
 
bool legacy () const
 
bool & legacy ()
 
bool write_parallel () const
 
void set_write_parallel (bool do_parallel=true)
 
void set_auto_parallel ()
 
const std::string & version () const
 
std::string & version ()
 
const std::string & boundary_condition_file_name () const
 
std::string & boundary_condition_file_name ()
 
const std::string & partition_map_file_name () const
 
std::string & partition_map_file_name ()
 
const std::string & subdomain_map_file_name () const
 
std::string & subdomain_map_file_name ()
 
const std::string & polynomial_level_file_name () const
 
std::string & polynomial_level_file_name ()
 
bool version_at_least_0_9_2 () const
 
bool version_at_least_0_9_6 () const
 
bool version_at_least_1_1_0 () const
 
bool version_at_least_1_3_0 () const
 
virtual void write_equation_systems (const std::string &, const EquationSystems &, const std::set< std::string > *system_names=nullptr)
 
virtual void write_discontinuous_equation_systems (const std::string &, const EquationSystems &, const std::set< std::string > *system_names=nullptr)
 
virtual void write_nodal_data (const std::string &, const std::vector< Number > &, const std::vector< std::string > &)
 
virtual void write_nodal_data (const std::string &, const NumericVector< Number > &, const std::vector< std::string > &)
 
virtual void write_nodal_data_discontinuous (const std::string &, const std::vector< Number > &, const std::vector< std::string > &)
 
unsigned int & ascii_precision ()
 
const Parallel::Communicatorcomm () const
 
processor_id_type n_processors () const
 
processor_id_type processor_id () const
 

Protected Member Functions

MeshBasemesh ()
 
void set_n_partitions (unsigned int n_parts)
 
void skip_comment_lines (std::istream &in, const char comment_start)
 
const MeshBasemesh () const
 

Protected Attributes

std::vector< bool > elems_of_dimension
 
const bool _is_parallel_format
 
const bool _serial_only_needed_on_proc_0
 
const Parallel::Communicator_communicator
 

Private Member Functions

void write_serialized_subdomain_names (Xdr &io) const
 
void write_serialized_connectivity (Xdr &io, const dof_id_type n_elem) const
 
void write_serialized_nodes (Xdr &io, const dof_id_type n_nodes) const
 
void write_serialized_bcs_helper (Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
 
void write_serialized_side_bcs (Xdr &io, const new_header_id_type n_side_bcs) const
 
void write_serialized_edge_bcs (Xdr &io, const new_header_id_type n_edge_bcs) const
 
void write_serialized_shellface_bcs (Xdr &io, const new_header_id_type n_shellface_bcs) const
 
void write_serialized_nodesets (Xdr &io, const new_header_id_type n_nodesets) const
 
void write_serialized_bc_names (Xdr &io, const BoundaryInfo &info, bool is_sideset) const
 
template<typename T >
void read_header (Xdr &io, std::vector< T > &meta_data)
 
void read_serialized_subdomain_names (Xdr &io)
 
template<typename T >
void read_serialized_connectivity (Xdr &io, const dof_id_type n_elem, std::vector< new_header_id_type > &sizes, T)
 
void read_serialized_nodes (Xdr &io, const dof_id_type n_nodes)
 
template<typename T >
void read_serialized_bcs_helper (Xdr &io, T, const std::string bc_type)
 
template<typename T >
void read_serialized_side_bcs (Xdr &io, T)
 
template<typename T >
void read_serialized_edge_bcs (Xdr &io, T)
 
template<typename T >
void read_serialized_shellface_bcs (Xdr &io, T)
 
template<typename T >
void read_serialized_nodesets (Xdr &io, T)
 
void read_serialized_bc_names (Xdr &io, BoundaryInfo &info, bool is_sideset)
 
void pack_element (std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id=DofObject::invalid_id, const dof_id_type parent_pid=DofObject::invalid_id) const
 

Private Attributes

bool _binary
 
bool _legacy
 
bool _write_serial
 
bool _write_parallel
 
bool _write_unique_id
 
unsigned int _field_width
 
std::string _version
 
std::string _bc_file_name
 
std::string _partition_map_file
 
std::string _subdomain_map_file
 
std::string _p_level_file
 

Static Private Attributes

static const std::size_t io_blksize = 128000
 

Detailed Description

MeshIO class used for writing XDR (eXternal Data Representation) and XDA mesh files. XDR/XDA is libmesh's internal data format, and allows the full refinement tree structure of the mesh to be written to file.

Author
Benjamin Kirk
John Peterson
Date
2004

Definition at line 51 of file xdr_io.h.

Member Typedef Documentation

◆ new_header_id_type

Definition at line 63 of file xdr_io.h.

◆ old_header_id_type

Definition at line 60 of file xdr_io.h.

◆ xdr_id_type

Definition at line 57 of file xdr_io.h.

Constructor & Destructor Documentation

◆ XdrIO() [1/2]

libMesh::XdrIO::XdrIO ( MeshBase mesh,
const bool  binary_in = false 
)
explicit

Constructor. Takes a writable reference to a mesh object. This is the constructor required to read a mesh. The optional parameter binary can be used to switch between ASCII (false, the default) or binary (true) files.

Definition at line 129 of file xdr_io.C.

129  :
130  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
131  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
133  _binary (binary_in),
134  _legacy (false),
135  _write_serial (false),
136  _write_parallel (false),
137 #ifdef LIBMESH_ENABLE_UNIQUE_ID
138  _write_unique_id (true),
139 #else
140  _write_unique_id (false),
141 #endif
142  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
143  _version ("libMesh-1.3.0"),
144  _bc_file_name ("n/a"),
145  _partition_map_file ("n/a"),
146  _subdomain_map_file ("n/a"),
147  _p_level_file ("n/a")
148 {
149 }
ParallelObject(const Parallel::Communicator &comm_in)
bool _binary
Definition: xdr_io.h:335
bool _write_parallel
Definition: xdr_io.h:338
std::string _partition_map_file
Definition: xdr_io.h:343
unsigned int _field_width
Definition: xdr_io.h:340
std::string _bc_file_name
Definition: xdr_io.h:342
bool _write_unique_id
Definition: xdr_io.h:339
std::string _subdomain_map_file
Definition: xdr_io.h:344
std::string _p_level_file
Definition: xdr_io.h:345
bool _write_serial
Definition: xdr_io.h:337
std::string _version
Definition: xdr_io.h:341
bool _legacy
Definition: xdr_io.h:336

◆ XdrIO() [2/2]

libMesh::XdrIO::XdrIO ( const MeshBase mesh,
const bool  binary_in = false 
)
explicit

Constructor. Takes a reference to a constant mesh object. This constructor will only allow us to write the mesh. The optional parameter binary can be used to switch between ASCII (false, the default) or binary (true) files.

Definition at line 153 of file xdr_io.C.

153  :
154  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
156  _binary (binary_in)
157 {
158 }
ParallelObject(const Parallel::Communicator &comm_in)
bool _binary
Definition: xdr_io.h:335

◆ ~XdrIO()

libMesh::XdrIO::~XdrIO ( )
virtual

Destructor.

Definition at line 162 of file xdr_io.C.

163 {
164 }

Member Function Documentation

◆ ascii_precision()

unsigned int & libMesh::MeshOutput< MeshBase >::ascii_precision ( )
inlineinherited

Return/set the precision to use when writing ASCII files.

By default we use numeric_limits<Real>::digits10 + 2, which should be enough to write out to ASCII and get the exact same Real back when reading in.

Definition at line 244 of file mesh_output.h.

Referenced by libMesh::TecplotIO::write_ascii(), libMesh::GMVIO::write_ascii_new_impl(), and libMesh::GMVIO::write_ascii_old_impl().

245 {
246  return _ascii_precision;
247 }

◆ binary() [1/2]

bool libMesh::XdrIO::binary ( ) const
inline

Get/Set the flag indicating if we should read/write binary.

Definition at line 103 of file xdr_io.h.

References _binary.

Referenced by libMesh::NameBasedIO::read(), read(), and write().

103 { return _binary; }
bool _binary
Definition: xdr_io.h:335

◆ binary() [2/2]

bool& libMesh::XdrIO::binary ( )
inline

Definition at line 104 of file xdr_io.h.

References _binary.

104 { return _binary; }
bool _binary
Definition: xdr_io.h:335

◆ boundary_condition_file_name() [1/2]

const std::string& libMesh::XdrIO::boundary_condition_file_name ( ) const
inline

Get/Set the boundary condition file name.

Definition at line 146 of file xdr_io.h.

References _bc_file_name.

Referenced by read_header(), read_serialized_bcs_helper(), read_serialized_nodesets(), and write().

146 { return _bc_file_name; }
std::string _bc_file_name
Definition: xdr_io.h:342

◆ boundary_condition_file_name() [2/2]

std::string& libMesh::XdrIO::boundary_condition_file_name ( )
inline

Definition at line 147 of file xdr_io.h.

References _bc_file_name.

147 { return _bc_file_name; }
std::string _bc_file_name
Definition: xdr_io.h:342

◆ comm()

const Parallel::Communicator& libMesh::ParallelObject::comm ( ) const
inlineinherited
Returns
A reference to the Parallel::Communicator object used by this mesh.

Definition at line 89 of file parallel_object.h.

References libMesh::ParallelObject::_communicator.

Referenced by libMesh::__libmesh_petsc_diff_solver_jacobian(), libMesh::__libmesh_petsc_diff_solver_monitor(), libMesh::__libmesh_petsc_diff_solver_residual(), libMesh::__libmesh_tao_equality_constraints(), libMesh::__libmesh_tao_equality_constraints_jacobian(), libMesh::__libmesh_tao_gradient(), libMesh::__libmesh_tao_hessian(), libMesh::__libmesh_tao_inequality_constraints(), libMesh::__libmesh_tao_inequality_constraints_jacobian(), libMesh::__libmesh_tao_objective(), libMesh::MeshRefinement::_coarsen_elements(), libMesh::ExactSolution::_compute_error(), libMesh::UniformRefinementEstimator::_estimate_error(), libMesh::BoundaryInfo::_find_id_maps(), libMesh::SlepcEigenSolver< T >::_petsc_shell_matrix_get_diagonal(), libMesh::PetscLinearSolver< T >::_petsc_shell_matrix_get_diagonal(), libMesh::SlepcEigenSolver< T >::_petsc_shell_matrix_mult(), libMesh::PetscLinearSolver< T >::_petsc_shell_matrix_mult(), libMesh::PetscLinearSolver< T >::_petsc_shell_matrix_mult_add(), libMesh::EquationSystems::_read_impl(), libMesh::MeshRefinement::_refine_elements(), libMesh::MeshRefinement::_smooth_flags(), libMesh::PetscDMWrapper::add_dofs_helper(), libMesh::PetscDMWrapper::add_dofs_to_section(), libMesh::ImplicitSystem::add_matrix(), libMesh::System::add_vector(), libMesh::UnstructuredMesh::all_second_order(), libMesh::MeshTools::Modification::all_tri(), libMesh::LaplaceMeshSmoother::allgather_graph(), libMesh::FEMSystem::assemble_qoi(), libMesh::MeshCommunication::assign_global_indices(), libMesh::DofMap::attach_matrix(), libMesh::MeshTools::Generation::build_extrusion(), libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::EquationSystems::build_parallel_elemental_solution_vector(), libMesh::EquationSystems::build_parallel_solution_vector(), libMesh::PetscDMWrapper::build_section(), libMesh::PetscDMWrapper::build_sf(), libMesh::MeshBase::cache_elem_dims(), libMesh::System::calculate_norm(), libMesh::DofMap::check_dirichlet_bcid_consistency(), libMesh::PetscDMWrapper::check_section_n_dofs(), libMesh::Nemesis_IO_Helper::compute_num_global_elem_blocks(), libMesh::Nemesis_IO_Helper::compute_num_global_nodesets(), libMesh::Nemesis_IO_Helper::compute_num_global_sidesets(), libMesh::Problem_Interface::computeF(), libMesh::Problem_Interface::computeJacobian(), libMesh::Problem_Interface::computePreconditioner(), libMesh::ExodusII_IO::copy_elemental_solution(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::MeshTools::create_bounding_box(), libMesh::MeshTools::create_nodal_bounding_box(), libMesh::MeshRefinement::create_parent_error_vector(), libMesh::MeshTools::create_processor_bounding_box(), libMesh::MeshTools::create_subdomain_bounding_box(), libMesh::MeshCommunication::delete_remote_elements(), libMesh::DofMap::distribute_dofs(), DMlibMeshFunction(), DMlibMeshJacobian(), DMlibMeshSetSystem_libMesh(), DMVariableBounds_libMesh(), libMesh::MeshRefinement::eliminate_unrefined_patches(), libMesh::EpetraVector< T >::EpetraVector(), libMesh::WeightedPatchRecoveryErrorEstimator::estimate_error(), libMesh::PatchRecoveryErrorEstimator::estimate_error(), libMesh::JumpErrorEstimator::estimate_error(), libMesh::AdjointRefinementEstimator::estimate_error(), libMesh::ExactErrorEstimator::estimate_error(), libMesh::MeshRefinement::flag_elements_by_elem_fraction(), libMesh::MeshRefinement::flag_elements_by_error_fraction(), libMesh::MeshRefinement::flag_elements_by_nelem_target(), libMesh::CondensedEigenSystem::get_eigenpair(), libMesh::DofMap::get_info(), libMesh::ImplicitSystem::get_linear_solver(), libMesh::LocationMap< T >::init(), libMesh::TimeSolver::init(), libMesh::SystemSubsetBySubdomain::init(), libMesh::PetscDMWrapper::init_and_attach_petscdm(), libMesh::EigenSystem::init_data(), libMesh::EigenSystem::init_matrices(), libMesh::OptimizationSystem::initialize_equality_constraints_storage(), libMesh::OptimizationSystem::initialize_inequality_constraints_storage(), libMesh::MeshTools::libmesh_assert_consistent_distributed(), libMesh::MeshTools::libmesh_assert_consistent_distributed_nodes(), libMesh::MeshTools::libmesh_assert_contiguous_dof_ids(), libMesh::MeshTools::libmesh_assert_parallel_consistent_new_node_procids(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_topology_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_valid_boundary_ids(), libMesh::MeshTools::libmesh_assert_valid_dof_ids(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_flags(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_object_ids(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_p_levels(), libMesh::MeshTools::libmesh_assert_valid_refinement_flags(), libMesh::MeshTools::libmesh_assert_valid_unique_ids(), libMesh::libmesh_petsc_snes_fd_residual(), libMesh::libmesh_petsc_snes_jacobian(), libMesh::libmesh_petsc_snes_mffd_residual(), libMesh::libmesh_petsc_snes_postcheck(), libMesh::libmesh_petsc_snes_residual(), libMesh::libmesh_petsc_snes_residual_helper(), libMesh::MeshRefinement::limit_level_mismatch_at_edge(), libMesh::MeshRefinement::limit_level_mismatch_at_node(), libMesh::MeshRefinement::limit_overrefined_boundary(), libMesh::MeshRefinement::limit_underrefined_boundary(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshCommunication::make_elems_parallel_consistent(), libMesh::MeshRefinement::make_flags_parallel_consistent(), libMesh::MeshCommunication::make_new_node_proc_ids_parallel_consistent(), libMesh::MeshCommunication::make_new_nodes_parallel_consistent(), libMesh::MeshCommunication::make_node_ids_parallel_consistent(), libMesh::MeshCommunication::make_node_proc_ids_parallel_consistent(), libMesh::MeshCommunication::make_node_unique_ids_parallel_consistent(), libMesh::MeshCommunication::make_nodes_parallel_consistent(), libMesh::MeshCommunication::make_p_levels_parallel_consistent(), libMesh::MeshRefinement::make_refinement_compatible(), libMesh::FEMSystem::mesh_position_set(), libMesh::DistributedMesh::n_active_elem(), libMesh::MeshTools::n_active_levels(), libMesh::BoundaryInfo::n_boundary_conds(), libMesh::BoundaryInfo::n_edge_conds(), libMesh::CondensedEigenSystem::n_global_non_condensed_dofs(), libMesh::MeshTools::n_levels(), libMesh::BoundaryInfo::n_nodeset_conds(), libMesh::MeshTools::n_p_levels(), libMesh::BoundaryInfo::n_shellface_conds(), libMesh::DistributedMesh::parallel_max_elem_id(), libMesh::DistributedMesh::parallel_max_node_id(), libMesh::ReplicatedMesh::parallel_max_unique_id(), libMesh::DistributedMesh::parallel_max_unique_id(), libMesh::DistributedMesh::parallel_n_elem(), libMesh::DistributedMesh::parallel_n_nodes(), libMesh::SparsityPattern::Build::parallel_sync(), libMesh::MeshTools::paranoid_n_levels(), libMesh::petsc_auto_fieldsplit(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::MeshBase::prepare_for_use(), libMesh::Nemesis_IO::read(), read(), libMesh::CheckpointIO::read_header(), read_header(), libMesh::System::read_header(), libMesh::System::read_legacy_data(), libMesh::System::read_SCALAR_dofs(), read_serialized_bc_names(), read_serialized_bcs_helper(), libMesh::System::read_serialized_blocked_dof_objects(), read_serialized_connectivity(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_subdomain_names(), libMesh::System::read_serialized_vector(), libMesh::MeshBase::recalculate_n_partitions(), libMesh::MeshRefinement::refine_and_coarsen_elements(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::CheckpointIO::select_split_config(), libMesh::DofMap::set_nonlocal_dof_objects(), libMesh::PetscDMWrapper::set_point_range_in_section(), libMesh::PetscDiffSolver::setup_petsc_data(), libMesh::LaplaceMeshSmoother::smooth(), libMesh::split_mesh(), libMesh::MeshBase::subdomain_ids(), libMesh::BoundaryInfo::sync(), libMesh::MeshRefinement::test_level_one(), libMesh::MeshRefinement::test_unflagged(), libMesh::MeshTools::total_weight(), libMesh::MeshRefinement::uniformly_coarsen(), libMesh::NameBasedIO::write(), write(), libMesh::System::write_SCALAR_dofs(), write_serialized_bcs_helper(), libMesh::System::write_serialized_blocked_dof_objects(), write_serialized_connectivity(), write_serialized_nodes(), and write_serialized_nodesets().

90  { return _communicator; }
const Parallel::Communicator & _communicator

◆ legacy() [1/2]

bool libMesh::XdrIO::legacy ( ) const
inline

Get/Set the flag indicating if we should read/write legacy.

Definition at line 109 of file xdr_io.h.

References _legacy.

Referenced by libMesh::NameBasedIO::read(), read(), and write().

109 { return _legacy; }
bool _legacy
Definition: xdr_io.h:336

◆ legacy() [2/2]

bool& libMesh::XdrIO::legacy ( )
inline

Definition at line 110 of file xdr_io.h.

References _legacy.

110 { return _legacy; }
bool _legacy
Definition: xdr_io.h:336

◆ mesh() [1/2]

MeshBase & libMesh::MeshInput< MeshBase >::mesh ( )
inlineprotectedinherited
Returns
The object as a writable reference.

Definition at line 169 of file mesh_input.h.

Referenced by libMesh::GMVIO::_read_one_cell(), libMesh::VTKIO::cells_to_vtk(), libMesh::TetGenIO::element_in(), libMesh::UNVIO::elements_in(), libMesh::UNVIO::elements_out(), libMesh::UNVIO::groups_in(), libMesh::TetGenIO::node_in(), libMesh::UNVIO::nodes_in(), libMesh::UNVIO::nodes_out(), libMesh::VTKIO::nodes_to_vtk(), libMesh::Nemesis_IO::prepare_to_write_nodal_data(), libMesh::Nemesis_IO::read(), libMesh::ExodusII_IO::read(), libMesh::GMVIO::read(), read(), libMesh::CheckpointIO::read(), libMesh::VTKIO::read(), libMesh::CheckpointIO::read_bcs(), libMesh::CheckpointIO::read_connectivity(), libMesh::CheckpointIO::read_header(), read_header(), libMesh::UCDIO::read_implementation(), libMesh::UNVIO::read_implementation(), libMesh::GmshIO::read_mesh(), libMesh::CheckpointIO::read_nodes(), libMesh::CheckpointIO::read_nodesets(), libMesh::CheckpointIO::read_remote_elem(), read_serialized_bcs_helper(), read_serialized_connectivity(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_subdomain_names(), libMesh::OFFIO::read_stream(), libMesh::MatlabIO::read_stream(), libMesh::CheckpointIO::read_subdomain_names(), libMesh::TetGenIO::write(), libMesh::Nemesis_IO::write(), libMesh::ExodusII_IO::write(), write(), libMesh::CheckpointIO::write(), libMesh::GMVIO::write_ascii_new_impl(), libMesh::GMVIO::write_ascii_old_impl(), libMesh::GMVIO::write_binary(), libMesh::GMVIO::write_discontinuous_gmv(), libMesh::Nemesis_IO::write_element_data(), libMesh::ExodusII_IO::write_element_data(), libMesh::UCDIO::write_header(), libMesh::UCDIO::write_implementation(), libMesh::UCDIO::write_interior_elems(), libMesh::GmshIO::write_mesh(), libMesh::VTKIO::write_nodal_data(), libMesh::UCDIO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data_common(), libMesh::ExodusII_IO::write_nodal_data_discontinuous(), libMesh::UCDIO::write_nodes(), libMesh::CheckpointIO::write_nodesets(), write_parallel(), libMesh::GmshIO::write_post(), write_serialized_bcs_helper(), write_serialized_connectivity(), write_serialized_nodes(), write_serialized_nodesets(), write_serialized_subdomain_names(), libMesh::UCDIO::write_soln(), and libMesh::CheckpointIO::write_subdomain_names().

170 {
171  if (_obj == nullptr)
172  libmesh_error_msg("ERROR: _obj should not be nullptr!");
173  return *_obj;
174 }

◆ mesh() [2/2]

◆ n_processors()

processor_id_type libMesh::ParallelObject::n_processors ( ) const
inlineinherited
Returns
The number of processors in the group.

Definition at line 95 of file parallel_object.h.

References libMesh::ParallelObject::_communicator, and libMesh::Parallel::Communicator::size().

Referenced by libMesh::BoundaryInfo::_find_id_maps(), libMesh::PetscDMWrapper::add_dofs_to_section(), libMesh::DistributedMesh::add_elem(), libMesh::DistributedMesh::add_node(), libMesh::LaplaceMeshSmoother::allgather_graph(), libMesh::FEMSystem::assembly(), libMesh::AztecLinearSolver< T >::AztecLinearSolver(), libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::EquationSystems::build_parallel_elemental_solution_vector(), libMesh::DistributedMesh::clear(), libMesh::Nemesis_IO_Helper::compute_border_node_ids(), libMesh::Nemesis_IO_Helper::construct_nemesis_filename(), libMesh::UnstructuredMesh::create_pid_mesh(), libMesh::MeshTools::create_processor_bounding_box(), libMesh::DofMap::distribute_dofs(), libMesh::DofMap::distribute_local_dofs_node_major(), libMesh::DofMap::distribute_local_dofs_var_major(), libMesh::EnsightIO::EnsightIO(), libMesh::MeshBase::get_info(), libMesh::SystemSubsetBySubdomain::init(), libMesh::PetscDMWrapper::init_and_attach_petscdm(), libMesh::Nemesis_IO_Helper::initialize(), libMesh::DistributedMesh::insert_elem(), libMesh::MeshTools::libmesh_assert_contiguous_dof_ids(), libMesh::MeshTools::libmesh_assert_parallel_consistent_new_node_procids(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_topology_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_valid_boundary_ids(), libMesh::MeshTools::libmesh_assert_valid_dof_ids(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::MeshTools::libmesh_assert_valid_refinement_flags(), libMesh::DofMap::local_variable_indices(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshBase::n_active_elem_on_proc(), libMesh::MeshBase::n_elem_on_proc(), libMesh::MeshBase::n_nodes_on_proc(), libMesh::MeshBase::partition(), libMesh::PetscLinearSolver< T >::PetscLinearSolver(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::NameBasedIO::read(), libMesh::Nemesis_IO::read(), libMesh::CheckpointIO::read(), libMesh::CheckpointIO::read_connectivity(), read_header(), libMesh::CheckpointIO::read_nodes(), libMesh::System::read_parallel_data(), libMesh::System::read_SCALAR_dofs(), libMesh::System::read_serialized_blocked_dof_objects(), libMesh::System::read_serialized_vector(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::DofMap::set_nonlocal_dof_objects(), libMesh::PetscDMWrapper::set_point_range_in_section(), libMesh::MeshRefinement::uniformly_coarsen(), libMesh::DistributedMesh::update_parallel_id_counts(), libMesh::GMVIO::write_binary(), libMesh::GMVIO::write_discontinuous_gmv(), libMesh::System::write_parallel_data(), libMesh::System::write_SCALAR_dofs(), write_serialized_bcs_helper(), libMesh::System::write_serialized_blocked_dof_objects(), write_serialized_connectivity(), write_serialized_nodes(), and write_serialized_nodesets().

96  { return cast_int<processor_id_type>(_communicator.size()); }
processor_id_type size() const
Definition: communicator.h:175
const Parallel::Communicator & _communicator

◆ pack_element()

void libMesh::XdrIO::pack_element ( std::vector< xdr_id_type > &  conn,
const Elem elem,
const dof_id_type  parent_id = DofObject::invalid_id,
const dof_id_type  parent_pid = DofObject::invalid_id 
) const
private

Pack an element into a transfer buffer for parallel communication.

Definition at line 2120 of file xdr_io.C.

References libMesh::DofObject::invalid_id, libMesh::Elem::n_nodes(), libMesh::Elem::node_id(), libMesh::Elem::node_index_range(), libMesh::Elem::p_level(), libMesh::DofObject::processor_id(), libMesh::Elem::subdomain_id(), libMesh::Elem::type(), libMesh::Elem::type_to_n_nodes_map, and libMesh::DofObject::unique_id().

Referenced by write_serialized_connectivity().

2122 {
2123  libmesh_assert(elem);
2124  libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
2125 
2126  conn.push_back(elem->n_nodes());
2127 
2128  conn.push_back (elem->type());
2129 
2130  // In version 0.7.0+ "id" is stored but it not used. In version 0.9.2+
2131  // we will store unique_id instead, therefore there is no need to
2132  // check for the older version when writing the unique_id.
2133  conn.push_back (elem->unique_id());
2134 
2135  if (parent_id != DofObject::invalid_id)
2136  {
2137  conn.push_back (parent_id);
2138  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
2139  conn.push_back (parent_pid);
2140  }
2141 
2142  conn.push_back (elem->processor_id());
2143  conn.push_back (elem->subdomain_id());
2144 
2145 #ifdef LIBMESH_ENABLE_AMR
2146  conn.push_back (elem->p_level());
2147 #endif
2148 
2149  for (auto n : elem->node_index_range())
2150  conn.push_back (elem->node_id(n));
2151 }
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
Definition: elem.h:589
static const dof_id_type invalid_id
Definition: dof_object.h:347

◆ partition_map_file_name() [1/2]

const std::string& libMesh::XdrIO::partition_map_file_name ( ) const
inline

Get/Set the partitioning file name.

Definition at line 152 of file xdr_io.h.

References _partition_map_file.

Referenced by read_header(), read_serialized_connectivity(), write(), and write_serialized_connectivity().

152 { return _partition_map_file; }
std::string _partition_map_file
Definition: xdr_io.h:343

◆ partition_map_file_name() [2/2]

std::string& libMesh::XdrIO::partition_map_file_name ( )
inline

Definition at line 153 of file xdr_io.h.

References _partition_map_file.

153 { return _partition_map_file; }
std::string _partition_map_file
Definition: xdr_io.h:343

◆ polynomial_level_file_name() [1/2]

const std::string& libMesh::XdrIO::polynomial_level_file_name ( ) const
inline

Get/Set the polynomial degree file name.

Definition at line 164 of file xdr_io.h.

References _p_level_file.

Referenced by read_header(), read_serialized_connectivity(), write(), and write_serialized_connectivity().

164 { return _p_level_file; }
std::string _p_level_file
Definition: xdr_io.h:345

◆ polynomial_level_file_name() [2/2]

std::string& libMesh::XdrIO::polynomial_level_file_name ( )
inline

Definition at line 165 of file xdr_io.h.

References _p_level_file.

165 { return _p_level_file; }
std::string _p_level_file
Definition: xdr_io.h:345

◆ processor_id()

processor_id_type libMesh::ParallelObject::processor_id ( ) const
inlineinherited
Returns
The rank of this processor in the group.

Definition at line 101 of file parallel_object.h.

References libMesh::ParallelObject::_communicator, and libMesh::Parallel::Communicator::rank().

Referenced by libMesh::BoundaryInfo::_find_id_maps(), libMesh::EquationSystems::_read_impl(), libMesh::PetscDMWrapper::add_dofs_to_section(), libMesh::DistributedMesh::add_elem(), libMesh::BoundaryInfo::add_elements(), libMesh::DofMap::add_neighbors_to_send_list(), libMesh::DistributedMesh::add_node(), libMesh::UnstructuredMesh::all_second_order(), libMesh::MeshTools::Modification::all_tri(), libMesh::FEMSystem::assembly(), libMesh::EquationSystems::build_discontinuous_solution_vector(), libMesh::Nemesis_IO_Helper::build_element_and_node_maps(), libMesh::InfElemBuilder::build_inf_elem(), libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::EquationSystems::build_parallel_elemental_solution_vector(), libMesh::DistributedMesh::clear(), libMesh::ExodusII_IO_Helper::close(), libMesh::Nemesis_IO_Helper::compute_border_node_ids(), libMesh::Nemesis_IO_Helper::compute_communication_map_parameters(), libMesh::Nemesis_IO_Helper::compute_internal_and_border_elems_and_internal_nodes(), libMesh::Nemesis_IO_Helper::compute_node_communication_maps(), libMesh::Nemesis_IO_Helper::compute_num_global_elem_blocks(), libMesh::Nemesis_IO_Helper::compute_num_global_nodesets(), libMesh::Nemesis_IO_Helper::compute_num_global_sidesets(), libMesh::Nemesis_IO_Helper::construct_nemesis_filename(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::ExodusII_IO_Helper::create(), libMesh::DistributedMesh::delete_elem(), libMesh::DistributedMesh::delete_node(), libMesh::MeshCommunication::delete_remote_elements(), libMesh::DofMap::distribute_dofs(), libMesh::DofMap::distribute_local_dofs_node_major(), libMesh::DofMap::distribute_local_dofs_var_major(), libMesh::DistributedMesh::DistributedMesh(), libMesh::DofMap::end_dof(), libMesh::DofMap::end_old_dof(), libMesh::EnsightIO::EnsightIO(), libMesh::MeshFunction::find_element(), libMesh::MeshFunction::find_elements(), libMesh::UnstructuredMesh::find_neighbors(), libMesh::DofMap::first_dof(), libMesh::DofMap::first_old_dof(), libMesh::Nemesis_IO_Helper::get_cmap_params(), libMesh::Nemesis_IO_Helper::get_eb_info_global(), libMesh::Nemesis_IO_Helper::get_elem_cmap(), libMesh::Nemesis_IO_Helper::get_elem_map(), libMesh::MeshBase::get_info(), libMesh::DofMap::get_info(), libMesh::Nemesis_IO_Helper::get_init_global(), libMesh::Nemesis_IO_Helper::get_init_info(), libMesh::Nemesis_IO_Helper::get_loadbal_param(), libMesh::Nemesis_IO_Helper::get_node_cmap(), libMesh::Nemesis_IO_Helper::get_node_map(), libMesh::Nemesis_IO_Helper::get_ns_param_global(), libMesh::Nemesis_IO_Helper::get_ss_param_global(), libMesh::SparsityPattern::Build::handle_vi_vj(), libMesh::SystemSubsetBySubdomain::init(), libMesh::ExodusII_IO_Helper::initialize(), libMesh::ExodusII_IO_Helper::initialize_element_variables(), libMesh::ExodusII_IO_Helper::initialize_global_variables(), libMesh::ExodusII_IO_Helper::initialize_nodal_variables(), libMesh::DistributedMesh::insert_elem(), libMesh::DofMap::is_evaluable(), libMesh::SparsityPattern::Build::join(), libMesh::DofMap::last_dof(), libMesh::MeshTools::libmesh_assert_consistent_distributed(), libMesh::MeshTools::libmesh_assert_consistent_distributed_nodes(), libMesh::MeshTools::libmesh_assert_contiguous_dof_ids(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_object_ids(), libMesh::DofMap::local_variable_indices(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshBase::n_active_local_elem(), libMesh::BoundaryInfo::n_boundary_conds(), libMesh::BoundaryInfo::n_edge_conds(), libMesh::DofMap::n_local_dofs(), libMesh::System::n_local_dofs(), libMesh::MeshBase::n_local_elem(), libMesh::MeshBase::n_local_nodes(), libMesh::BoundaryInfo::n_nodeset_conds(), libMesh::BoundaryInfo::n_shellface_conds(), libMesh::SparsityPattern::Build::operator()(), libMesh::DistributedMesh::own_node(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::Nemesis_IO_Helper::put_cmap_params(), libMesh::Nemesis_IO_Helper::put_elem_cmap(), libMesh::Nemesis_IO_Helper::put_elem_map(), libMesh::Nemesis_IO_Helper::put_loadbal_param(), libMesh::Nemesis_IO_Helper::put_node_cmap(), libMesh::Nemesis_IO_Helper::put_node_map(), libMesh::NameBasedIO::read(), libMesh::Nemesis_IO::read(), read(), libMesh::CheckpointIO::read(), libMesh::ExodusII_IO_Helper::read_elem_num_map(), libMesh::ExodusII_IO_Helper::read_global_values(), libMesh::CheckpointIO::read_header(), read_header(), libMesh::System::read_header(), libMesh::System::read_legacy_data(), libMesh::ExodusII_IO_Helper::read_node_num_map(), libMesh::System::read_parallel_data(), libMesh::System::read_SCALAR_dofs(), read_serialized_bc_names(), read_serialized_bcs_helper(), libMesh::System::read_serialized_blocked_dof_objects(), read_serialized_connectivity(), libMesh::System::read_serialized_data(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_subdomain_names(), libMesh::System::read_serialized_vector(), libMesh::System::read_serialized_vectors(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::CheckpointIO::select_split_config(), libMesh::DofMap::set_nonlocal_dof_objects(), libMesh::PetscDMWrapper::set_point_range_in_section(), libMesh::LaplaceMeshSmoother::smooth(), libMesh::MeshTools::total_weight(), libMesh::MeshRefinement::uniformly_coarsen(), libMesh::Parallel::Packing< T >::unpack(), libMesh::DistributedMesh::update_parallel_id_counts(), libMesh::NameBasedIO::write(), write(), libMesh::CheckpointIO::write(), libMesh::EquationSystems::write(), libMesh::GMVIO::write_discontinuous_gmv(), libMesh::ExodusII_IO::write_element_data(), libMesh::ExodusII_IO_Helper::write_element_values(), libMesh::ExodusII_IO_Helper::write_elements(), libMesh::ExodusII_IO::write_global_data(), libMesh::ExodusII_IO_Helper::write_global_values(), libMesh::System::write_header(), libMesh::ExodusII_IO::write_information_records(), libMesh::ExodusII_IO_Helper::write_information_records(), libMesh::ExodusII_IO_Helper::write_nodal_coordinates(), libMesh::UCDIO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data_discontinuous(), libMesh::ExodusII_IO_Helper::write_nodal_values(), libMesh::Nemesis_IO_Helper::write_nodesets(), libMesh::ExodusII_IO_Helper::write_nodesets(), libMesh::System::write_parallel_data(), libMesh::System::write_SCALAR_dofs(), write_serialized_bc_names(), write_serialized_bcs_helper(), libMesh::System::write_serialized_blocked_dof_objects(), write_serialized_connectivity(), libMesh::System::write_serialized_data(), write_serialized_nodes(), write_serialized_nodesets(), write_serialized_subdomain_names(), libMesh::System::write_serialized_vector(), libMesh::System::write_serialized_vectors(), libMesh::Nemesis_IO_Helper::write_sidesets(), libMesh::ExodusII_IO_Helper::write_sidesets(), libMesh::ExodusII_IO::write_timestep(), libMesh::ExodusII_IO_Helper::write_timestep(), and libMesh::ExodusII_IO::write_timestep_discontinuous().

102  { return cast_int<processor_id_type>(_communicator.rank()); }
const Parallel::Communicator & _communicator
processor_id_type rank() const
Definition: communicator.h:173

◆ read()

void libMesh::XdrIO::read ( const std::string &  name)
overridevirtual

This method implements reading a mesh from a specified file.

We are future proofing the layout of this file by adding in size information for all stored types. TODO: All types are stored as the same size. Use the size information to pack things efficiently. For now we will assume that "type size" is how the entire file will be encoded.

Implements libMesh::MeshInput< MeshBase >.

Definition at line 1250 of file xdr_io.C.

References _field_width, binary(), libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::DECODE, legacy(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshInput< MT >::mesh(), libMesh::MeshTools::n_elem(), n_nodes, libMesh::ParallelObject::processor_id(), libMesh::READ, read_header(), read_serialized_connectivity(), read_serialized_edge_bcs(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_shellface_bcs(), read_serialized_side_bcs(), read_serialized_subdomain_names(), libMesh::Partitioner::set_node_processor_ids(), value, version(), version_at_least_0_9_2(), version_at_least_1_1_0(), and version_at_least_1_3_0().

Referenced by libMesh::NameBasedIO::read().

1251 {
1252  LOG_SCOPE("read()","XdrIO");
1253 
1254  // Only open the file on processor 0 -- this is especially important because
1255  // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1256  // calls will produce a race condition.
1257  Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1258 
1259  // convenient reference to our mesh
1260  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1261 
1262  // get the version string.
1263  if (this->processor_id() == 0)
1264  io.data (this->version());
1265  this->comm().broadcast (this->version());
1266 
1267  // note that for "legacy" files the first entry is an
1268  // integer -- not a string at all.
1269  this->legacy() = !(this->version().find("libMesh") < this->version().size());
1270 
1271  // Check for a legacy version format.
1272  if (this->legacy())
1273  libmesh_error_msg("We no longer support reading files in the legacy format.");
1274 
1275  // Read headers with the old id type if they're pre-1.3.0, or with
1276  // the new id type if they're post-1.3.0
1277  std::vector<new_header_id_type> meta_data(10, sizeof(xdr_id_type));
1278  if (this->version_at_least_1_3_0())
1279  {
1280  this->read_header(io, meta_data);
1281  }
1282  else
1283  {
1284  std::vector<old_header_id_type> old_data(10, sizeof(xdr_id_type));
1285 
1286  this->read_header(io, old_data);
1287 
1288  meta_data.assign(old_data.begin(), old_data.end());
1289  }
1290 
1291  const new_header_id_type & n_elem = meta_data[0];
1292  const new_header_id_type & n_nodes = meta_data[1];
1293 
1299  if (version_at_least_0_9_2())
1300  _field_width = cast_int<unsigned int>(meta_data[2]);
1301 
1302  // On systems where uint64_t==unsigned long, we were previously
1303  // writing 64-bit unsigned integers via xdr_u_long(), a function
1304  // which is literally less suited for that task than abort() would
1305  // have been, because at least abort() would have *known* it
1306  // couldn't write rather than truncating writes to 32 bits.
1307  //
1308  // If we have files with version < 1.3.0, then we'll continue to use
1309  // 32 bit field width, regardless of whether the file thinks we
1310  // should, whenever we're on a system where the problem would have
1311  // occurred.
1312  if ((_field_width == 4) ||
1313  (!version_at_least_1_3_0() &&
1315  {
1316  uint32_t type_size = 0;
1317 
1318  // read subdomain names
1320 
1321  // read connectivity
1322  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1323 
1324  // read the nodal locations
1325  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes));
1326 
1327  // read the side boundary conditions
1328  this->read_serialized_side_bcs (io, type_size);
1329 
1330  if (version_at_least_0_9_2())
1331  // read the nodesets
1332  this->read_serialized_nodesets (io, type_size);
1333 
1334  if (version_at_least_1_1_0())
1335  {
1336  // read the edge boundary conditions
1337  this->read_serialized_edge_bcs (io, type_size);
1338 
1339  // read the "shell face" boundary conditions
1340  this->read_serialized_shellface_bcs (io, type_size);
1341  }
1342  }
1343  else if (_field_width == 8)
1344  {
1345  uint64_t type_size = 0;
1346 
1347  // read subdomain names
1349 
1350  // read connectivity
1351  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1352 
1353  // read the nodal locations
1354  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes));
1355 
1356  // read the boundary conditions
1357  this->read_serialized_side_bcs (io, type_size);
1358 
1359  if (version_at_least_0_9_2())
1360  // read the nodesets
1361  this->read_serialized_nodesets (io, type_size);
1362 
1363  if (version_at_least_1_1_0())
1364  {
1365  // read the edge boundary conditions
1366  this->read_serialized_edge_bcs (io, type_size);
1367 
1368  // read the "shell face" boundary conditions
1369  this->read_serialized_shellface_bcs (io, type_size);
1370  }
1371  }
1372 
1373  // set the node processor ids
1375 }
void read_serialized_connectivity(Xdr &io, const dof_id_type n_elem, std::vector< new_header_id_type > &sizes, T)
Definition: xdr_io.C:1504
std::string name(const ElemQuality q)
Definition: elem_quality.C:42
void read_serialized_shellface_bcs(Xdr &io, T)
Definition: xdr_io.C:1966
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:702
void read_serialized_subdomain_names(Xdr &io)
Definition: xdr_io.C:1442
bool legacy() const
Definition: xdr_io.h:109
static void set_node_processor_ids(MeshBase &mesh)
Definition: partitioner.C:679
const Parallel::Communicator & comm() const
largest_id_type xdr_id_type
Definition: xdr_io.h:57
const dof_id_type n_nodes
Definition: tecplot_io.C:68
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2177
unsigned int _field_width
Definition: xdr_io.h:340
void read_serialized_nodesets(Xdr &io, T)
Definition: xdr_io.C:1974
void read_header(Xdr &io, std::vector< T > &meta_data)
Definition: xdr_io.C:1380
bool version_at_least_1_1_0() const
Definition: xdr_io.C:2170
uint64_t new_header_id_type
Definition: xdr_io.h:63
void read_serialized_nodes(Xdr &io, const dof_id_type n_nodes)
Definition: xdr_io.C:1690
void read_serialized_edge_bcs(Xdr &io, T)
Definition: xdr_io.C:1958
const std::string & version() const
Definition: xdr_io.h:140
static const bool value
Definition: xdr_io.C:109
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2153
bool binary() const
Definition: xdr_io.h:103
processor_id_type processor_id() const
void broadcast(T &data, const unsigned int root_id=0) const
void read_serialized_side_bcs(Xdr &io, T)
Definition: xdr_io.C:1950

◆ read_header()

template<typename T >
void libMesh::XdrIO::read_header ( Xdr io,
std::vector< T > &  meta_data 
)
private

Read header information - templated to handle old (4-byte) or new (8-byte) header id types.

We are future proofing the layout of this file by adding in size information for all stored types. TODO: All types are stored as the same size. Use the size information to pack things efficiently. For now we will assume that "type size" is how the entire file will be encoded.

Definition at line 1380 of file xdr_io.C.

References _field_width, boundary_condition_file_name(), libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::MeshInput< MT >::mesh(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshTools::n_elem(), n_nodes, libMesh::ParallelObject::n_processors(), partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), libMesh::MeshBase::reserve_elem(), libMesh::MeshBase::reserve_nodes(), libMesh::MeshInput< MeshBase >::set_n_partitions(), subdomain_map_file_name(), and version_at_least_0_9_2().

Referenced by read().

1381 {
1382  LOG_SCOPE("read_header()","XdrIO");
1383 
1384  // convenient reference to our mesh
1385  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1386 
1387  if (this->processor_id() == 0)
1388  {
1389  unsigned int pos=0;
1390 
1391  io.data (meta_data[pos++]);
1392  io.data (meta_data[pos++]);
1393  io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1394  io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1395  io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1396  io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1397 
1398  if (version_at_least_0_9_2())
1399  {
1400  io.data (meta_data[pos++], "# type size");
1401  io.data (meta_data[pos++], "# uid size");
1402  io.data (meta_data[pos++], "# pid size");
1403  io.data (meta_data[pos++], "# sid size");
1404  io.data (meta_data[pos++], "# p-level size");
1405  // Boundary Condition sizes
1406  io.data (meta_data[pos++], "# eid size"); // elem id
1407  io.data (meta_data[pos++], "# side size"); // side number
1408  io.data (meta_data[pos++], "# bid size"); // boundary id
1409  }
1410  }
1411 
1412  // broadcast the n_elems, n_nodes, and size information
1413  this->comm().broadcast (meta_data);
1414 
1415  this->comm().broadcast (this->boundary_condition_file_name());
1416  this->comm().broadcast (this->subdomain_map_file_name());
1417  this->comm().broadcast (this->partition_map_file_name());
1418  this->comm().broadcast (this->polynomial_level_file_name());
1419 
1420  // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1421  // this may allow for efficient adding of nodes/elements.
1422  const T & n_elem = meta_data[0];
1423  const T & n_nodes = meta_data[1];
1424 
1425  mesh.reserve_elem(cast_int<dof_id_type>(n_elem));
1426  mesh.reserve_nodes(cast_int<dof_id_type>(n_nodes));
1427 
1428  // Our mesh is pre-partitioned as it's created
1429  this->set_n_partitions(this->n_processors());
1430 
1436  if (version_at_least_0_9_2())
1437  _field_width = cast_int<unsigned int>(meta_data[2]);
1438 }
virtual void reserve_nodes(const dof_id_type nn)=0
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:702
const Parallel::Communicator & comm() const
processor_id_type n_processors() const
const dof_id_type n_nodes
Definition: tecplot_io.C:68
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:146
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:158
unsigned int _field_width
Definition: xdr_io.h:340
const std::string & partition_map_file_name() const
Definition: xdr_io.h:152
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:164
void set_n_partitions(unsigned int n_parts)
Definition: mesh_input.h:91
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2153
processor_id_type processor_id() const
virtual void reserve_elem(const dof_id_type ne)=0
void broadcast(T &data, const unsigned int root_id=0) const

◆ read_serialized_bc_names()

void libMesh::XdrIO::read_serialized_bc_names ( Xdr io,
BoundaryInfo info,
bool  is_sideset 
)
private

Read boundary names information (sideset and nodeset) - NEW in 0.9.2 format

Definition at line 2061 of file xdr_io.C.

References libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::ParallelObject::processor_id(), libMesh::BoundaryInfo::set_nodeset_name_map(), libMesh::BoundaryInfo::set_sideset_name_map(), version_at_least_0_9_2(), and version_at_least_1_3_0().

Referenced by read_serialized_bcs_helper(), and read_serialized_nodesets().

2062 {
2063  const bool read_entity_info = version_at_least_0_9_2();
2064  const bool use_new_header_type (this->version_at_least_1_3_0());
2065  if (read_entity_info)
2066  {
2067  new_header_id_type n_boundary_names = 0;
2068  std::vector<new_header_id_type> boundary_ids;
2069  std::vector<std::string> boundary_names;
2070 
2071  // Read the sideset names
2072  if (this->processor_id() == 0)
2073  {
2074  if (use_new_header_type)
2075  io.data(n_boundary_names);
2076  else
2077  {
2078  old_header_id_type temp;
2079  io.data(temp);
2080  n_boundary_names = temp;
2081  }
2082 
2083  boundary_names.resize(n_boundary_names);
2084 
2085  if (n_boundary_names)
2086  {
2087  if (use_new_header_type)
2088  io.data(boundary_ids);
2089  else
2090  {
2091  std::vector<old_header_id_type> temp(n_boundary_names);
2092  io.data(temp);
2093  boundary_ids.assign(temp.begin(), temp.end());
2094  }
2095  io.data(boundary_names);
2096  }
2097  }
2098 
2099  // Broadcast the boundary names to all processors
2100  this->comm().broadcast(n_boundary_names);
2101  if (n_boundary_names == 0)
2102  return;
2103 
2104  boundary_ids.resize(n_boundary_names);
2105  boundary_names.resize(n_boundary_names);
2106  this->comm().broadcast(boundary_ids);
2107  this->comm().broadcast(boundary_names);
2108 
2109  // Reassemble the named boundary information
2110  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
2111  info.set_sideset_name_map() : info.set_nodeset_name_map();
2112 
2113  for (unsigned int i=0; i<n_boundary_names; ++i)
2114  boundary_map.insert(std::make_pair(cast_int<boundary_id_type>(boundary_ids[i]), boundary_names[i]));
2115  }
2116 }
uint32_t old_header_id_type
Definition: xdr_io.h:60
const Parallel::Communicator & comm() const
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2177
uint64_t new_header_id_type
Definition: xdr_io.h:63
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2153
processor_id_type processor_id() const
void broadcast(T &data, const unsigned int root_id=0) const

◆ read_serialized_bcs_helper()

template<typename T >
void libMesh::XdrIO::read_serialized_bcs_helper ( Xdr io,
,
const std::string  bc_type 
)
private

Helper function used in read_serialized_side_bcs, read_serialized_edge_bcs, and read_serialized_shellface_bcs.

Definition at line 1841 of file xdr_io.C.

References libMesh::BoundaryInfo::add_edge(), libMesh::BoundaryInfo::add_shellface(), libMesh::BoundaryInfo::add_side(), libMesh::as_range(), boundary_condition_file_name(), libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), data, libMesh::Xdr::data_stream(), libMesh::MeshBase::get_boundary_info(), libMesh::MeshTools::Generation::Private::idx(), io_blksize, libMesh::MeshBase::level_elements_begin(), libMesh::MeshBase::level_elements_end(), libMesh::MeshInput< MT >::mesh(), libMesh::MeshInput< MeshBase >::mesh(), std::min(), libMesh::ParallelObject::processor_id(), read_serialized_bc_names(), libMesh::Xdr::reading(), and version_at_least_1_3_0().

Referenced by read_serialized_edge_bcs(), read_serialized_shellface_bcs(), and read_serialized_side_bcs().

1842 {
1843  if (this->boundary_condition_file_name() == "n/a") return;
1844 
1845  libmesh_assert (io.reading());
1846 
1847  // convenient reference to our mesh
1848  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1849 
1850  // and our boundary info object
1851  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1852 
1853  // Version 0.9.2+ introduces unique ids
1854  read_serialized_bc_names(io, boundary_info, true); // sideset names
1855 
1856  std::vector<DofBCData> dof_bc_data;
1857  std::vector<T> input_buffer;
1858 
1859  new_header_id_type n_bcs=0;
1860  if (this->processor_id() == 0)
1861  {
1862  if (this->version_at_least_1_3_0())
1863  io.data (n_bcs);
1864  else
1865  {
1866  old_header_id_type temp;
1867  io.data (temp);
1868  n_bcs = temp;
1869  }
1870  }
1871  this->comm().broadcast (n_bcs);
1872 
1873  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
1874  {
1875  first_bc = blk*io_blksize;
1876  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
1877 
1878  input_buffer.resize (3*(last_bc - first_bc));
1879 
1880  if (this->processor_id() == 0)
1881  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
1882  cast_int<unsigned int>(input_buffer.size()));
1883 
1884  this->comm().broadcast (input_buffer);
1885  dof_bc_data.clear();
1886  dof_bc_data.reserve (input_buffer.size()/3);
1887 
1888  // convert the input_buffer to DofBCData to facilitate searching
1889  for (std::size_t idx=0; idx<input_buffer.size(); idx+=3)
1890  dof_bc_data.push_back
1891  (DofBCData(cast_int<dof_id_type>(input_buffer[idx+0]),
1892  cast_int<unsigned short>(input_buffer[idx+1]),
1893  cast_int<boundary_id_type>(input_buffer[idx+2])));
1894  input_buffer.clear();
1895  // note that while the files *we* write should already be sorted by
1896  // element id this is not necessarily guaranteed.
1897  std::sort (dof_bc_data.begin(), dof_bc_data.end());
1898 
1899  // Look for BCs in this block for all the level-0 elements we have
1900  // (not just local ones). Do this by finding all the entries
1901  // in dof_bc_data whose elem_id match the ID of the current element.
1902  // We cannot rely on nullptr neighbors at this point since the neighbor
1903  // data structure has not been initialized.
1904  for (const auto & elem :
1907  {
1908  auto bounds =
1909  std::equal_range (dof_bc_data.begin(),
1910  dof_bc_data.end(),
1911  elem->id()
1912 #if defined(__SUNPRO_CC) || defined(__PGI)
1913  , CompareIntDofBCData()
1914 #endif
1915  );
1916 
1917  for (const auto & data : as_range(bounds))
1918  {
1919  libmesh_assert_equal_to (data.dof_id, elem->id());
1920 
1921  if (bc_type == "side")
1922  {
1923  libmesh_assert_less (data.side, elem->n_sides());
1924  boundary_info.add_side (elem, data.side, data.bc_id);
1925  }
1926  else if (bc_type == "edge")
1927  {
1928  libmesh_assert_less (data.side, elem->n_edges());
1929  boundary_info.add_edge (elem, data.side, data.bc_id);
1930  }
1931  else if (bc_type == "shellface")
1932  {
1933  // Shell face IDs can only be 0 or 1.
1934  libmesh_assert_less(data.side, 2);
1935 
1936  boundary_info.add_shellface (elem, data.side, data.bc_id);
1937  }
1938  else
1939  {
1940  libmesh_error_msg("bc_type not recognized: " + bc_type);
1941  }
1942  }
1943  }
1944  }
1945 }
virtual element_iterator level_elements_begin(unsigned int level)=0
uint32_t old_header_id_type
Definition: xdr_io.h:60
const Parallel::Communicator & comm() const
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Definition: xdr_io.C:2061
virtual element_iterator level_elements_end(unsigned int level)=0
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:146
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2177
SimpleRange< I > as_range(const std::pair< I, I > &p)
Definition: simple_range.h:57
uint64_t new_header_id_type
Definition: xdr_io.h:63
static const std::size_t io_blksize
Definition: xdr_io.h:350
IterBase * data
processor_id_type processor_id() const
long double min(long double a, double b)
void broadcast(T &data, const unsigned int root_id=0) const
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)

◆ read_serialized_connectivity()

template<typename T >
void libMesh::XdrIO::read_serialized_connectivity ( Xdr io,
const dof_id_type  n_elem,
std::vector< new_header_id_type > &  sizes,
 
)
private

Read the connectivity for a parallel, distributed mesh

Definition at line 1504 of file xdr_io.C.

References libMesh::Elem::add_child(), libMesh::MeshBase::add_elem(), libMesh::MeshBase::add_point(), libMesh::Parallel::Communicator::broadcast(), libMesh::Elem::build(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), libMesh::Elem::dim(), libMesh::MeshBase::elem_ptr(), libMesh::MeshInput< MeshBase >::elems_of_dimension, libMesh::Elem::hack_p_level(), libMesh::Elem::INACTIVE, libMesh::DofObject::invalid_id, io_blksize, libMesh::Elem::JUST_REFINED, libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshInput< MT >::mesh(), libMesh::MeshBase::mesh_dimension(), std::min(), libMesh::MeshTools::n_elem(), libMesh::Elem::n_nodes(), partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), libMesh::DofObject::processor_id(), libMesh::Xdr::reading(), libMesh::DofObject::set_id(), libMesh::MeshBase::set_mesh_dimension(), libMesh::Elem::set_node(), libMesh::Elem::set_refinement_flag(), libMesh::DofObject::set_unique_id(), libMesh::Elem::subdomain_id(), subdomain_map_file_name(), libMesh::Elem::type_to_n_nodes_map, and version_at_least_0_9_2().

Referenced by read().

1505 {
1506  libmesh_assert (io.reading());
1507 
1508  if (!n_elem) return;
1509 
1510  const bool
1511  read_p_level = ("." == this->polynomial_level_file_name()),
1512  read_partitioning = ("." == this->partition_map_file_name()),
1513  read_subdomain_id = ("." == this->subdomain_map_file_name());
1514 
1515  // convenient reference to our mesh
1516  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1517 
1518  // Keep track of what kinds of elements this file contains
1519  elems_of_dimension.clear();
1520  elems_of_dimension.resize(4, false);
1521 
1522  std::vector<T> conn, input_buffer(100 /* oversized ! */);
1523 
1524  int level=-1;
1525 
1526  // Version 0.9.2+ introduces unique ids
1527  const size_t unique_id_size_index = 3;
1528 
1529  const bool read_unique_id =
1530  (version_at_least_0_9_2()) &&
1531  sizes[unique_id_size_index];
1532 
1533  T n_elem_at_level=0, n_processed_at_level=0;
1534  for (dof_id_type blk=0, first_elem=0, last_elem=0;
1535  last_elem<n_elem; blk++)
1536  {
1537  first_elem = cast_int<dof_id_type>(blk*io_blksize);
1538  last_elem = cast_int<dof_id_type>(std::min(cast_int<std::size_t>((blk+1)*io_blksize),
1539  cast_int<std::size_t>(n_elem)));
1540 
1541  conn.clear();
1542 
1543  if (this->processor_id() == 0)
1544  for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1545  {
1546  if (n_processed_at_level == n_elem_at_level)
1547  {
1548  // get the number of elements to read at this level
1549  io.data (n_elem_at_level);
1550  n_processed_at_level = 0;
1551  level++;
1552  }
1553 
1554  unsigned int pos = 0;
1555  // get the element type,
1556  io.data_stream (&input_buffer[pos++], 1);
1557 
1558  if (read_unique_id)
1559  io.data_stream (&input_buffer[pos++], 1);
1560  // Older versions won't have this field at all (no increment on pos)
1561 
1562  // maybe the parent
1563  if (level)
1564  io.data_stream (&input_buffer[pos++], 1);
1565  else
1566  // We can't always fit DofObject::invalid_id in an
1567  // xdr_id_type
1568  input_buffer[pos++] = static_cast<T>(-1);
1569 
1570  // maybe the processor id
1571  if (read_partitioning)
1572  io.data_stream (&input_buffer[pos++], 1);
1573  else
1574  input_buffer[pos++] = 0;
1575 
1576  // maybe the subdomain id
1577  if (read_subdomain_id)
1578  io.data_stream (&input_buffer[pos++], 1);
1579  else
1580  input_buffer[pos++] = 0;
1581 
1582  // maybe the p level
1583  if (read_p_level)
1584  io.data_stream (&input_buffer[pos++], 1);
1585  else
1586  input_buffer[pos++] = 0;
1587 
1588  // and all the nodes
1589  libmesh_assert_less (pos+Elem::type_to_n_nodes_map[input_buffer[0]], input_buffer.size());
1590  io.data_stream (&input_buffer[pos], Elem::type_to_n_nodes_map[input_buffer[0]]);
1591  conn.insert (conn.end(),
1592  input_buffer.begin(),
1593  input_buffer.begin() + pos + Elem::type_to_n_nodes_map[input_buffer[0]]);
1594  }
1595 
1596  std::size_t conn_size = conn.size();
1597  this->comm().broadcast(conn_size);
1598  conn.resize (conn_size);
1599  this->comm().broadcast (conn);
1600 
1601  // All processors now have the connectivity for this block.
1602  typename std::vector<T>::const_iterator it = conn.begin();
1603  for (dof_id_type e=first_elem; e<last_elem; e++)
1604  {
1605  const ElemType elem_type = static_cast<ElemType>(*it); ++it;
1606 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1607  // We are on all processors here, so we can easily assign
1608  // consistent unique ids if the file doesn't specify them
1609  // later.
1610  unique_id_type unique_id = e;
1611 #endif
1612  if (read_unique_id)
1613  {
1614 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1615  unique_id = cast_int<unique_id_type>(*it);
1616 #endif
1617  ++it;
1618  }
1619  const dof_id_type parent_id =
1620  (*it == static_cast<T>(-1)) ?
1622  cast_int<dof_id_type>(*it);
1623  ++it;
1624  const processor_id_type proc_id =
1625  cast_int<processor_id_type>(*it);
1626  ++it;
1627  const subdomain_id_type subdomain_id =
1628  cast_int<subdomain_id_type>(*it);
1629  ++it;
1630 #ifdef LIBMESH_ENABLE_AMR
1631  const unsigned int p_level =
1632  cast_int<unsigned int>(*it);
1633 #endif
1634  ++it;
1635 
1636  Elem * parent = (parent_id == DofObject::invalid_id) ?
1637  nullptr : mesh.elem_ptr(parent_id);
1638 
1639  Elem * elem = Elem::build (elem_type, parent).release();
1640 
1641  elem->set_id() = e;
1642 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1643  elem->set_unique_id() = unique_id;
1644 #endif
1645  elem->processor_id() = proc_id;
1646  elem->subdomain_id() = subdomain_id;
1647 #ifdef LIBMESH_ENABLE_AMR
1648  elem->hack_p_level(p_level);
1649 
1650  if (parent)
1651  {
1652  parent->add_child(elem);
1653  parent->set_refinement_flag (Elem::INACTIVE);
1654  elem->set_refinement_flag (Elem::JUST_REFINED);
1655  }
1656 #endif
1657 
1658  for (unsigned int n=0, n_n = elem->n_nodes(); n != n_n;
1659  n++, ++it)
1660  {
1661  const dof_id_type global_node_number =
1662  cast_int<dof_id_type>(*it);
1663 
1664  elem->set_node(n) =
1665  mesh.add_point (Point(), global_node_number);
1666  }
1667 
1668  elems_of_dimension[elem->dim()] = true;
1669  mesh.add_elem(elem);
1670  }
1671  }
1672 
1673  // Set the mesh dimension to the largest encountered for an element
1674  for (unsigned char i=0; i!=4; ++i)
1675  if (elems_of_dimension[i])
1677 
1678 #if LIBMESH_DIM < 3
1679  if (mesh.mesh_dimension() > LIBMESH_DIM)
1680  libmesh_error_msg("Cannot open dimension " \
1681  << mesh.mesh_dimension() \
1682  << " mesh file when configured without " \
1683  << mesh.mesh_dimension() \
1684  << "D support.");
1685 #endif
1686 }
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:702
std::vector< bool > elems_of_dimension
Definition: mesh_input.h:97
TestClass subdomain_id_type
Definition: id_types.h:43
uint8_t processor_id_type
Definition: id_types.h:99
const Parallel::Communicator & comm() const
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
Definition: elem.h:589
virtual Elem * add_elem(Elem *e)=0
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:245
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:158
static const dof_id_type invalid_id
Definition: dof_object.h:347
void set_mesh_dimension(unsigned char d)
Definition: mesh_base.h:213
const std::string & partition_map_file_name() const
Definition: xdr_io.h:152
virtual const Elem * elem_ptr(const dof_id_type i) const =0
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:164
static const std::size_t io_blksize
Definition: xdr_io.h:350
unsigned int mesh_dimension() const
Definition: mesh_base.C:126
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2153
processor_id_type processor_id() const
long double min(long double a, double b)
uint8_t unique_id_type
Definition: id_types.h:79
void broadcast(T &data, const unsigned int root_id=0) const
uint8_t dof_id_type
Definition: id_types.h:64

◆ read_serialized_edge_bcs()

template<typename T >
void libMesh::XdrIO::read_serialized_edge_bcs ( Xdr io,
value 
)
private

Read the edge boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Returns
The number of bcs read

Definition at line 1958 of file xdr_io.C.

References read_serialized_bcs_helper(), and value.

Referenced by read().

1959 {
1960  read_serialized_bcs_helper(io, value, "edge");
1961 }
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Definition: xdr_io.C:1841
static const bool value
Definition: xdr_io.C:109

◆ read_serialized_nodes()

void libMesh::XdrIO::read_serialized_nodes ( Xdr io,
const dof_id_type  n_nodes 
)
private

Read the nodal locations for a parallel, distributed mesh

Definition at line 1690 of file xdr_io.C.

References _field_width, libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), libMesh::MeshTools::Generation::Private::idx(), io_blksize, libMesh::libmesh_isnan(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshInput< MT >::mesh(), std::min(), n_nodes, libMesh::MeshBase::n_nodes(), libMesh::MeshBase::node_ptr_range(), libMesh::MeshBase::node_ref(), libMesh::ParallelObject::processor_id(), libMesh::Xdr::reading(), libMesh::DofObject::set_unique_id(), and version_at_least_0_9_6().

Referenced by read().

1691 {
1692  libmesh_assert (io.reading());
1693 
1694  // convenient reference to our mesh
1695  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1696 
1697  if (!mesh.n_nodes()) return;
1698 
1699  // At this point the elements have been read from file and placeholder nodes
1700  // have been assigned. These nodes, however, do not have the proper (x,y,z)
1701  // locations or unique_id values. This method will read all the
1702  // nodes from disk, and each processor can then grab the individual
1703  // values it needs.
1704 
1705  // If the file includes unique ids for nodes (as indicated by a
1706  // flag in 0.9.6+ files), those will be read next.
1707 
1708  // build up a list of the nodes contained in our local mesh. These are the nodes
1709  // stored on the local processor whose (x,y,z) and unique_id values
1710  // need to be corrected.
1711  std::vector<dof_id_type> needed_nodes; needed_nodes.reserve (mesh.n_nodes());
1712  {
1713  for (auto & node : mesh.node_ptr_range())
1714  needed_nodes.push_back(node->id());
1715 
1716  std::sort (needed_nodes.begin(), needed_nodes.end());
1717 
1718  // We should not have any duplicate node->id()s
1719  libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
1720  }
1721 
1722  // Get the nodes in blocks.
1723  std::vector<Real> coords;
1724  std::pair<std::vector<dof_id_type>::iterator,
1725  std::vector<dof_id_type>::iterator> pos;
1726  pos.first = needed_nodes.begin();
1727 
1728  // Broadcast node coordinates
1729  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1730  {
1731  first_node = blk*io_blksize;
1732  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1733 
1734  coords.resize(3*(last_node - first_node));
1735 
1736  if (this->processor_id() == 0)
1737  io.data_stream (coords.empty() ? nullptr : coords.data(),
1738  cast_int<unsigned int>(coords.size()));
1739 
1740  // For large numbers of processors the majority of processors at any given
1741  // block may not actually need these data. It may be worth profiling this,
1742  // although it is expected that disk IO will be the bottleneck
1743  this->comm().broadcast (coords);
1744 
1745  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
1746  {
1747  // first see if we need this node. use pos.first as a smart lower
1748  // bound, this will ensure that the size of the searched range
1749  // decreases as we match nodes.
1750  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1751 
1752  if (pos.first != pos.second) // we need this node.
1753  {
1754  libmesh_assert_equal_to (*pos.first, n);
1755  libmesh_assert(!libmesh_isnan(coords[idx+0]));
1756  libmesh_assert(!libmesh_isnan(coords[idx+1]));
1757  libmesh_assert(!libmesh_isnan(coords[idx+2]));
1758  mesh.node_ref(cast_int<dof_id_type>(n)) =
1759  Point (coords[idx+0],
1760  coords[idx+1],
1761  coords[idx+2]);
1762 
1763  }
1764  }
1765  }
1766 
1767  if (version_at_least_0_9_6())
1768  {
1769  // Check for node unique ids
1770  unsigned short read_unique_ids;
1771 
1772  if (this->processor_id() == 0)
1773  io.data (read_unique_ids);
1774 
1775  this->comm().broadcast (read_unique_ids);
1776 
1777  // If no unique ids are in the file, we're done.
1778  if (!read_unique_ids)
1779  return;
1780 
1781  std::vector<uint32_t> unique_32;
1782  std::vector<uint64_t> unique_64;
1783 
1784  // We're starting over from node 0 again
1785  pos.first = needed_nodes.begin();
1786 
1787  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1788  {
1789  first_node = blk*io_blksize;
1790  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1791 
1792  libmesh_assert((_field_width == 8) || (_field_width == 4));
1793 
1794  if (_field_width == 8)
1795  unique_64.resize(last_node - first_node);
1796  else
1797  unique_32.resize(last_node - first_node);
1798 
1799  if (this->processor_id() == 0)
1800  {
1801  if (_field_width == 8)
1802  io.data_stream (unique_64.empty() ? nullptr : unique_64.data(),
1803  cast_int<unsigned int>(unique_64.size()));
1804  else
1805  io.data_stream (unique_32.empty() ? nullptr : unique_32.data(),
1806  cast_int<unsigned int>(unique_32.size()));
1807  }
1808 
1809 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1810  if (_field_width == 8)
1811  this->comm().broadcast (unique_64);
1812  else
1813  this->comm().broadcast (unique_32);
1814 
1815  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
1816  {
1817  // first see if we need this node. use pos.first as a smart lower
1818  // bound, this will ensure that the size of the searched range
1819  // decreases as we match nodes.
1820  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1821 
1822  if (pos.first != pos.second) // we need this node.
1823  {
1824  libmesh_assert_equal_to (*pos.first, n);
1825  if (_field_width == 8)
1826  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1827  = unique_64[idx];
1828  else
1829  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1830  = unique_32[idx];
1831  }
1832  }
1833 #endif // LIBMESH_ENABLE_UNIQUE_ID
1834  }
1835  }
1836 }
unique_id_type & set_unique_id()
Definition: dof_object.h:685
bool version_at_least_0_9_6() const
Definition: xdr_io.C:2162
const Parallel::Communicator & comm() const
const dof_id_type n_nodes
Definition: tecplot_io.C:68
virtual SimpleRange< node_iterator > node_ptr_range()=0
unsigned int _field_width
Definition: xdr_io.h:340
static const std::size_t io_blksize
Definition: xdr_io.h:350
bool libmesh_isnan(float a)
virtual const Node & node_ref(const dof_id_type i) const
Definition: mesh_base.h:434
processor_id_type processor_id() const
long double min(long double a, double b)
void broadcast(T &data, const unsigned int root_id=0) const
virtual dof_id_type n_nodes() const =0
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)

◆ read_serialized_nodesets()

template<typename T >
void libMesh::XdrIO::read_serialized_nodesets ( Xdr io,
 
)
private

Read the nodeset conditions for a parallel, distributed mesh

Returns
The number of nodesets read

Definition at line 1974 of file xdr_io.C.

References libMesh::BoundaryInfo::add_node(), libMesh::as_range(), boundary_condition_file_name(), libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), data, libMesh::Xdr::data_stream(), libMesh::MeshBase::get_boundary_info(), libMesh::MeshTools::Generation::Private::idx(), io_blksize, libMesh::MeshInput< MT >::mesh(), libMesh::MeshInput< MeshBase >::mesh(), std::min(), libMesh::MeshBase::node_ptr_range(), libMesh::ParallelObject::processor_id(), read_serialized_bc_names(), libMesh::Xdr::reading(), and version_at_least_1_3_0().

Referenced by read().

1975 {
1976  if (this->boundary_condition_file_name() == "n/a") return;
1977 
1978  libmesh_assert (io.reading());
1979 
1980  // convenient reference to our mesh
1981  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1982 
1983  // and our boundary info object
1984  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1985 
1986  // Version 0.9.2+ introduces unique ids
1987  read_serialized_bc_names(io, boundary_info, false); // nodeset names
1988 
1989  // TODO: Make a data object that works with both the element and nodal bcs
1990  std::vector<DofBCData> node_bc_data;
1991  std::vector<T> input_buffer;
1992 
1993  new_header_id_type n_nodesets=0;
1994  if (this->processor_id() == 0)
1995  {
1996  if (this->version_at_least_1_3_0())
1997  io.data (n_nodesets);
1998  else
1999  {
2000  old_header_id_type temp;
2001  io.data (temp);
2002  n_nodesets = temp;
2003  }
2004  }
2005  this->comm().broadcast (n_nodesets);
2006 
2007  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
2008  {
2009  first_bc = blk*io_blksize;
2010  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
2011 
2012  input_buffer.resize (2*(last_bc - first_bc));
2013 
2014  if (this->processor_id() == 0)
2015  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
2016  cast_int<unsigned int>(input_buffer.size()));
2017 
2018  this->comm().broadcast (input_buffer);
2019  node_bc_data.clear();
2020  node_bc_data.reserve (input_buffer.size()/2);
2021 
2022  // convert the input_buffer to DofBCData to facilitate searching
2023  for (std::size_t idx=0; idx<input_buffer.size(); idx+=2)
2024  node_bc_data.push_back
2025  (DofBCData(cast_int<dof_id_type>(input_buffer[idx+0]),
2026  0,
2027  cast_int<boundary_id_type>(input_buffer[idx+1])));
2028  input_buffer.clear();
2029  // note that while the files *we* write should already be sorted by
2030  // node id this is not necessarily guaranteed.
2031  std::sort (node_bc_data.begin(), node_bc_data.end());
2032 
2033  // Look for BCs in this block for all nodes we have
2034  // (not just local ones). Do this by finding all the entries
2035  // in node_bc_data whose dof_id(node_id) match the ID of the current node.
2036  for (auto & node : mesh.node_ptr_range())
2037  {
2038  std::pair<std::vector<DofBCData>::iterator,
2039  std::vector<DofBCData>::iterator> bounds =
2040  std::equal_range (node_bc_data.begin(),
2041  node_bc_data.end(),
2042  node->id()
2043 #if defined(__SUNPRO_CC) || defined(__PGI)
2044  , CompareIntDofBCData()
2045 #endif
2046  );
2047 
2048  for (const auto & data : as_range(bounds))
2049  {
2050  // Note: dof_id from ElmeBCData is being used to hold node_id here
2051  libmesh_assert_equal_to (data.dof_id, node->id());
2052 
2053  boundary_info.add_node (node, data.bc_id);
2054  }
2055  }
2056  }
2057 }
uint32_t old_header_id_type
Definition: xdr_io.h:60
const Parallel::Communicator & comm() const
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Definition: xdr_io.C:2061
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:146
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2177
virtual SimpleRange< node_iterator > node_ptr_range()=0
SimpleRange< I > as_range(const std::pair< I, I > &p)
Definition: simple_range.h:57
uint64_t new_header_id_type
Definition: xdr_io.h:63
static const std::size_t io_blksize
Definition: xdr_io.h:350
IterBase * data
processor_id_type processor_id() const
long double min(long double a, double b)
void broadcast(T &data, const unsigned int root_id=0) const
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)

◆ read_serialized_shellface_bcs()

template<typename T >
void libMesh::XdrIO::read_serialized_shellface_bcs ( Xdr io,
value 
)
private

Read the "shell face" boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Returns
The number of bcs read

Definition at line 1966 of file xdr_io.C.

References read_serialized_bcs_helper(), and value.

Referenced by read().

1967 {
1968  read_serialized_bcs_helper(io, value, "shellface");
1969 }
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Definition: xdr_io.C:1841
static const bool value
Definition: xdr_io.C:109

◆ read_serialized_side_bcs()

template<typename T >
void libMesh::XdrIO::read_serialized_side_bcs ( Xdr io,
value 
)
private

Read the side boundary conditions for a parallel, distributed mesh

Returns
The number of bcs read

Definition at line 1950 of file xdr_io.C.

References read_serialized_bcs_helper(), and value.

Referenced by read().

1951 {
1952  read_serialized_bcs_helper(io, value, "side");
1953 }
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Definition: xdr_io.C:1841
static const bool value
Definition: xdr_io.C:109

◆ read_serialized_subdomain_names()

void libMesh::XdrIO::read_serialized_subdomain_names ( Xdr io)
private

Read subdomain name information - NEW in 0.9.2 format

Definition at line 1442 of file xdr_io.C.

References libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::MeshInput< MT >::mesh(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::ParallelObject::processor_id(), libMesh::MeshBase::set_subdomain_name_map(), version_at_least_0_9_2(), and version_at_least_1_3_0().

Referenced by read().

1443 {
1444  const bool read_entity_info = version_at_least_0_9_2();
1445  const bool use_new_header_type (this->version_at_least_1_3_0());
1446  if (read_entity_info)
1447  {
1448  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1449 
1450  new_header_id_type n_subdomain_names = 0;
1451  std::vector<new_header_id_type> subdomain_ids;
1452  std::vector<std::string> subdomain_names;
1453 
1454  // Read the sideset names
1455  if (this->processor_id() == 0)
1456  {
1457  if (use_new_header_type)
1458  io.data(n_subdomain_names);
1459  else
1460  {
1461  old_header_id_type temp;
1462  io.data(temp);
1463  n_subdomain_names = temp;
1464  }
1465 
1466  subdomain_ids.resize(n_subdomain_names);
1467  subdomain_names.resize(n_subdomain_names);
1468 
1469  if (n_subdomain_names)
1470  {
1471  if (use_new_header_type)
1472  io.data(subdomain_ids);
1473  else
1474  {
1475  std::vector<old_header_id_type> temp;
1476  io.data(temp);
1477  subdomain_ids.assign(temp.begin(), temp.end());
1478  }
1479 
1480  io.data(subdomain_names);
1481  }
1482  }
1483 
1484  // Broadcast the subdomain names to all processors
1485  this->comm().broadcast(n_subdomain_names);
1486  if (n_subdomain_names == 0)
1487  return;
1488 
1489  subdomain_ids.resize(n_subdomain_names);
1490  subdomain_names.resize(n_subdomain_names);
1491  this->comm().broadcast(subdomain_ids);
1492  this->comm().broadcast(subdomain_names);
1493 
1494  // Reassemble the named subdomain information
1495  std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1496 
1497  for (unsigned int i=0; i<n_subdomain_names; ++i)
1498  subdomain_map.insert(std::make_pair(subdomain_ids[i], subdomain_names[i]));
1499  }
1500 }
uint32_t old_header_id_type
Definition: xdr_io.h:60
const Parallel::Communicator & comm() const
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2177
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1335
uint64_t new_header_id_type
Definition: xdr_io.h:63
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2153
processor_id_type processor_id() const
void broadcast(T &data, const unsigned int root_id=0) const

◆ set_auto_parallel()

void libMesh::XdrIO::set_auto_parallel ( )
inline

Insist that we should write parallel files if and only if the mesh is an already distributed DistributedMesh.

Definition at line 389 of file xdr_io.h.

References _write_parallel, and _write_serial.

390 {
391  this->_write_serial = false;
392  this->_write_parallel = false;
393 }
bool _write_parallel
Definition: xdr_io.h:338
bool _write_serial
Definition: xdr_io.h:337

◆ set_n_partitions()

void libMesh::MeshInput< MeshBase >::set_n_partitions ( unsigned int  n_parts)
inlineprotectedinherited

Sets the number of partitions in the mesh. Typically this gets done by the partitioner, but some parallel file formats begin "pre-partitioned".

Definition at line 91 of file mesh_input.h.

References libMesh::MeshInput< MT >::mesh().

Referenced by libMesh::Nemesis_IO::read(), and read_header().

91 { this->mesh().set_n_partitions() = n_parts; }
unsigned int & set_n_partitions()
Definition: mesh_base.h:1371

◆ set_write_parallel()

void libMesh::XdrIO::set_write_parallel ( bool  do_parallel = true)
inline

Insist that we should/shouldn't write parallel files.

Definition at line 379 of file xdr_io.h.

References _write_parallel, and _write_serial.

380 {
381  this->_write_parallel = do_parallel;
382 
383  this->_write_serial = !do_parallel;
384 }
bool _write_parallel
Definition: xdr_io.h:338
bool _write_serial
Definition: xdr_io.h:337

◆ skip_comment_lines()

void libMesh::MeshInput< MeshBase >::skip_comment_lines ( std::istream &  in,
const char  comment_start 
)
protectedinherited

Reads input from in, skipping all the lines that start with the character comment_start.

Definition at line 179 of file mesh_input.h.

Referenced by libMesh::TetGenIO::read(), and libMesh::UCDIO::read_implementation().

181 {
182  char c, line[256];
183 
184  while (in.get(c), c==comment_start)
185  in.getline (line, 255);
186 
187  // put back first character of
188  // first non-comment line
189  in.putback (c);
190 }

◆ subdomain_map_file_name() [1/2]

const std::string& libMesh::XdrIO::subdomain_map_file_name ( ) const
inline

Get/Set the subdomain file name.

Definition at line 158 of file xdr_io.h.

References _subdomain_map_file.

Referenced by read_header(), read_serialized_connectivity(), write(), and write_serialized_connectivity().

158 { return _subdomain_map_file; }
std::string _subdomain_map_file
Definition: xdr_io.h:344

◆ subdomain_map_file_name() [2/2]

std::string& libMesh::XdrIO::subdomain_map_file_name ( )
inline

Definition at line 159 of file xdr_io.h.

References _subdomain_map_file.

159 { return _subdomain_map_file; }
std::string _subdomain_map_file
Definition: xdr_io.h:344

◆ version() [1/2]

const std::string& libMesh::XdrIO::version ( ) const
inline

Get/Set the version string. Valid version strings:

* "libMesh-0.7.0+"
* "libMesh-0.7.0+ parallel"
* 

If "libMesh" is not detected in the version string the LegacyXdrIO class will be used to read older (pre version 0.7.0) mesh files.

Definition at line 140 of file xdr_io.h.

References _version.

Referenced by read(), version_at_least_0_9_2(), version_at_least_0_9_6(), version_at_least_1_1_0(), version_at_least_1_3_0(), and write().

140 { return _version; }
std::string _version
Definition: xdr_io.h:341

◆ version() [2/2]

std::string& libMesh::XdrIO::version ( )
inline

Definition at line 141 of file xdr_io.h.

References _version.

141 { return _version; }
std::string _version
Definition: xdr_io.h:341

◆ version_at_least_0_9_2()

bool libMesh::XdrIO::version_at_least_0_9_2 ( ) const
Returns
true if the current file has an XDR/XDA version that matches or exceeds 0.9.2.

As of this version we encode integer field widths, nodesets, subdomain names, boundary names, and element unique_id values (if they exist) into our files.

Definition at line 2153 of file xdr_io.C.

References version().

Referenced by read(), read_header(), read_serialized_bc_names(), read_serialized_connectivity(), and read_serialized_subdomain_names().

2154 {
2155  return
2156  (this->version().find("0.9.2") != std::string::npos) ||
2157  (this->version().find("0.9.6") != std::string::npos) ||
2158  (this->version().find("1.1.0") != std::string::npos) ||
2159  (this->version().find("1.3.0") != std::string::npos);
2160 }
const std::string & version() const
Definition: xdr_io.h:140

◆ version_at_least_0_9_6()

bool libMesh::XdrIO::version_at_least_0_9_6 ( ) const
Returns
true if the current file has an XDR/XDA version that matches or exceeds 0.9.6.

In this version we add node unique_id values to our files, if they exist.

Definition at line 2162 of file xdr_io.C.

References version().

Referenced by read_serialized_nodes().

2163 {
2164  return
2165  (this->version().find("0.9.6") != std::string::npos) ||
2166  (this->version().find("1.1.0") != std::string::npos) ||
2167  (this->version().find("1.3.0") != std::string::npos);
2168 }
const std::string & version() const
Definition: xdr_io.h:140

◆ version_at_least_1_1_0()

bool libMesh::XdrIO::version_at_least_1_1_0 ( ) const
Returns
true if the current file has an XDR/XDA version that matches or exceeds 1.1.0.

In this version we add edge and shellface boundary conditions to our files.

Definition at line 2170 of file xdr_io.C.

References version().

Referenced by read().

2171 {
2172  return
2173  (this->version().find("1.1.0") != std::string::npos) ||
2174  (this->version().find("1.3.0") != std::string::npos);
2175 }
const std::string & version() const
Definition: xdr_io.h:140

◆ version_at_least_1_3_0()

bool libMesh::XdrIO::version_at_least_1_3_0 ( ) const
Returns
true if the current file has an XDR/XDA version that matches or exceeds 1.3.0.

In this version we fix handling of uint64_t binary values on Linux, which were previously miswritten as 32 bit via xdr_long.

Definition at line 2177 of file xdr_io.C.

References version().

Referenced by read(), read_serialized_bc_names(), read_serialized_bcs_helper(), read_serialized_nodesets(), and read_serialized_subdomain_names().

2178 {
2179  return
2180  (this->version().find("1.3.0") != std::string::npos);
2181 }
const std::string & version() const
Definition: xdr_io.h:140

◆ write()

void libMesh::XdrIO::write ( const std::string &  name)
overridevirtual

This method implements writing a mesh to a specified file.

Implements libMesh::MeshOutput< MeshBase >.

Definition at line 168 of file xdr_io.C.

References _write_unique_id, libMesh::Parallel::Communicator::barrier(), binary(), boundary_condition_file_name(), libMesh::Xdr::close(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::ENCODE, libMesh::MeshBase::get_boundary_info(), legacy(), libMesh::MeshBase::max_node_id(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::BoundaryInfo::n_boundary_conds(), libMesh::BoundaryInfo::n_edge_conds(), libMesh::MeshBase::n_elem(), libMesh::MeshTools::n_elem(), libMesh::BoundaryInfo::n_nodeset_conds(), libMesh::MeshTools::n_p_levels(), libMesh::BoundaryInfo::n_shellface_conds(), libMesh::MeshBase::n_subdomains(), libMesh::out, partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), subdomain_map_file_name(), version(), libMesh::WRITE, write_parallel(), write_serialized_connectivity(), write_serialized_edge_bcs(), write_serialized_nodes(), write_serialized_nodesets(), write_serialized_shellface_bcs(), write_serialized_side_bcs(), and write_serialized_subdomain_names().

Referenced by libMesh::ErrorVector::plot_error(), and libMesh::NameBasedIO::write().

169 {
170  if (this->legacy())
171  libmesh_error_msg("We don't support writing parallel files in the legacy format.");
172 
173  Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
174 
175  START_LOG("write()","XdrIO");
176 
177  // convenient reference to our mesh
178  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
179 
181  new_header_id_type max_node_id = mesh.max_node_id();
182 
187  unsigned int n_p_levels = MeshTools::n_p_levels (mesh);
188 
189  bool write_parallel_files = this->write_parallel();
190 
191  //-------------------------------------------------------------
192  // For all the optional files -- the default file name is "n/a".
193  // However, the user may specify an optional external file.
194 
195  // If there are BCs and the user has not already provided a
196  // file name then write to "."
197  if ((n_side_bcs || n_edge_bcs || n_shellface_bcs || n_nodesets) &&
198  this->boundary_condition_file_name() == "n/a")
199  this->boundary_condition_file_name() = ".";
200 
201  // If there are more than one subdomains and the user has not specified an
202  // external file then write the subdomain mapping to the default file "."
203  if ((mesh.n_subdomains() > 0) &&
204  (this->subdomain_map_file_name() == "n/a"))
205  this->subdomain_map_file_name() = ".";
206 
207  // In general we don't write the partition information.
208 
209  // If we have p levels and the user has not already provided
210  // a file name then write to "."
211  if ((n_p_levels > 1) &&
212  (this->polynomial_level_file_name() == "n/a"))
213  this->polynomial_level_file_name() = ".";
214 
215  // write the header
216  if (this->processor_id() == 0)
217  {
218  std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
219  io.data (full_ver);
220 
221  io.data (n_elem, "# number of elements");
222  io.data (max_node_id, "# number of nodes"); // We'll write invalid coords into gaps
223 
224  io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
225  io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
226  io.data (this->partition_map_file_name(), "# processor id specification file");
227  io.data (this->polynomial_level_file_name(), "# p-level specification file");
228 
229  // Version 0.9.2+ introduces sizes for each type
230  new_header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
231 
232  const bool
233  write_p_level = ("." == this->polynomial_level_file_name()),
234  write_partitioning = ("." == this->partition_map_file_name()),
235  write_subdomain_id = ("." == this->subdomain_map_file_name()),
236  write_bcs = ("." == this->boundary_condition_file_name());
237 
238  io.data (write_size, "# type size");
239  io.data (_write_unique_id ? write_size : zero_size, "# uid size");
240  io.data (write_partitioning ? write_size : zero_size, "# pid size");
241  io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
242  io.data (write_p_level ? write_size : zero_size, "# p-level size");
243  // Boundary Condition sizes
244  io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
245  io.data (write_bcs ? write_size : zero_size, "# side size"); // side number
246  io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
247  }
248 
249  if (write_parallel_files)
250  {
251  // Parallel xdr mesh files aren't implemented yet; until they
252  // are we'll just warn the user and write a serial file.
253  libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
254  libMesh::out << "Writing a serialized file instead." << std::endl;
255 
256  // write subdomain names
258 
259  // write connectivity
260  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem));
261 
262  // write the nodal locations
263  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id));
264 
265  // write the side boundary condition information
266  this->write_serialized_side_bcs (io, n_side_bcs);
267 
268  // write the nodeset information
269  this->write_serialized_nodesets (io, n_nodesets);
270 
271  // write the edge boundary condition information
272  this->write_serialized_edge_bcs (io, n_edge_bcs);
273 
274  // write the "shell face" boundary condition information
275  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
276  }
277  else
278  {
279  // write subdomain names
281 
282  // write connectivity
283  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem));
284 
285  // write the nodal locations
286  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id));
287 
288  // write the side boundary condition information
289  this->write_serialized_side_bcs (io, n_side_bcs);
290 
291  // write the nodeset information
292  this->write_serialized_nodesets (io, n_nodesets);
293 
294  // write the edge boundary condition information
295  this->write_serialized_edge_bcs (io, n_edge_bcs);
296 
297  // write the "shell face" boundary condition information
298  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
299  }
300 
301  STOP_LOG("write()","XdrIO");
302 
303  // pause all processes until the writing ends -- this will
304  // protect for the pathological case where a write is
305  // followed immediately by a read. The write must be
306  // guaranteed to complete first.
307  io.close();
308  this->comm().barrier();
309 }
std::string name(const ElemQuality q)
Definition: elem_quality.C:42
void write_serialized_nodesets(Xdr &io, const new_header_id_type n_nodesets) const
Definition: xdr_io.C:1136
const MT & mesh() const
Definition: mesh_output.h:234
std::size_t n_boundary_conds() const
bool write_parallel() const
Definition: xdr_io.h:358
std::size_t n_edge_conds() const
void write_serialized_nodes(Xdr &io, const dof_id_type n_nodes) const
Definition: xdr_io.C:710
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:702
void write_serialized_side_bcs(Xdr &io, const new_header_id_type n_side_bcs) const
Definition: xdr_io.C:1115
bool legacy() const
Definition: xdr_io.h:109
std::size_t n_shellface_conds() const
const Parallel::Communicator & comm() const
void write_serialized_subdomain_names(Xdr &io) const
Definition: xdr_io.C:313
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
largest_id_type xdr_id_type
Definition: xdr_io.h:57
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:146
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:158
uint64_t new_header_id_type
Definition: xdr_io.h:63
const std::string & partition_map_file_name() const
Definition: xdr_io.h:152
bool _write_unique_id
Definition: xdr_io.h:339
const std::string & version() const
Definition: xdr_io.h:140
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:164
std::size_t n_nodeset_conds() const
void write_serialized_shellface_bcs(Xdr &io, const new_header_id_type n_shellface_bcs) const
Definition: xdr_io.C:1129
bool binary() const
Definition: xdr_io.h:103
virtual dof_id_type max_node_id() const =0
virtual dof_id_type n_elem() const =0
processor_id_type processor_id() const
unsigned int n_p_levels(const MeshBase &mesh)
Definition: mesh_tools.C:718
OStreamProxy out(std::cout)
void write_serialized_edge_bcs(Xdr &io, const new_header_id_type n_edge_bcs) const
Definition: xdr_io.C:1122
void write_serialized_connectivity(Xdr &io, const dof_id_type n_elem) const
Definition: xdr_io.C:351

◆ write_discontinuous_equation_systems()

void libMesh::MeshOutput< MeshBase >::write_discontinuous_equation_systems ( const std::string &  fname,
const EquationSystems es,
const std::set< std::string > *  system_names = nullptr 
)
virtualinherited

This method implements writing a mesh with discontinuous data to a specified file where the data is taken from the EquationSystems object.

Definition at line 92 of file mesh_output.C.

References libMesh::EquationSystems::build_discontinuous_solution_vector(), libMesh::EquationSystems::build_variable_names(), libMesh::EquationSystems::get_mesh(), and libMesh::out.

Referenced by libMesh::ExodusII_IO::write_timestep_discontinuous().

95 {
96  LOG_SCOPE("write_discontinuous_equation_systems()", "MeshOutput");
97 
98  // We may need to gather and/or renumber a DistributedMesh to output
99  // it, making that const qualifier in our constructor a dirty lie
100  MT & my_mesh = const_cast<MT &>(*_obj);
101 
102  // If we're asked to write data that's associated with a different
103  // mesh, output files full of garbage are the result.
104  libmesh_assert_equal_to(&es.get_mesh(), _obj);
105 
106  // A non-renumbered mesh may not have a contiguous numbering, and
107  // that needs to be fixed before we can build a solution vector.
108  if (my_mesh.max_elem_id() != my_mesh.n_elem() ||
109  my_mesh.max_node_id() != my_mesh.n_nodes())
110  {
111  // If we were allowed to renumber then we should have already
112  // been properly renumbered...
113  libmesh_assert(!my_mesh.allow_renumbering());
114 
115  libmesh_do_once(libMesh::out <<
116  "Warning: This MeshOutput subclass only supports meshes which are contiguously renumbered!"
117  << std::endl;);
118 
119  my_mesh.allow_renumbering(true);
120 
121  my_mesh.renumber_nodes_and_elements();
122 
123  // Not sure what good going back to false will do here, the
124  // renumbering horses have already left the barn...
125  my_mesh.allow_renumbering(false);
126  }
127 
128  MeshSerializer serialize(const_cast<MT &>(*_obj), !_is_parallel_format, _serial_only_needed_on_proc_0);
129 
130  // Build the list of variable names that will be written.
131  std::vector<std::string> names;
132  es.build_variable_names (names, nullptr, system_names);
133 
134  if (!_is_parallel_format)
135  {
136  // Build the nodal solution values & get the variable
137  // names from the EquationSystems object
138  std::vector<Number> soln;
139  es.build_discontinuous_solution_vector (soln, system_names);
140 
141  this->write_nodal_data_discontinuous (fname, soln, names);
142  }
143  else // _is_parallel_format
144  {
145  libmesh_not_implemented();
146  }
147 }
virtual void write_nodal_data_discontinuous(const std::string &, const std::vector< Number > &, const std::vector< std::string > &)
Definition: mesh_output.h:114
const MeshBase *const _obj
Definition: mesh_output.h:177
OStreamProxy out(std::cout)

◆ write_equation_systems()

void libMesh::MeshOutput< MeshBase >::write_equation_systems ( const std::string &  fname,
const EquationSystems es,
const std::set< std::string > *  system_names = nullptr 
)
virtualinherited

This method implements writing a mesh with data to a specified file where the data is taken from the EquationSystems object.

Reimplemented in libMesh::NameBasedIO.

Definition at line 31 of file mesh_output.C.

References libMesh::EquationSystems::build_parallel_solution_vector(), libMesh::EquationSystems::build_solution_vector(), libMesh::EquationSystems::build_variable_names(), libMesh::EquationSystems::get_mesh(), and libMesh::out.

Referenced by libMesh::Nemesis_IO::write_timestep(), and libMesh::ExodusII_IO::write_timestep().

34 {
35  LOG_SCOPE("write_equation_systems()", "MeshOutput");
36 
37  // We may need to gather and/or renumber a DistributedMesh to output
38  // it, making that const qualifier in our constructor a dirty lie
39  MT & my_mesh = const_cast<MT &>(*_obj);
40 
41  // If we're asked to write data that's associated with a different
42  // mesh, output files full of garbage are the result.
43  libmesh_assert_equal_to(&es.get_mesh(), _obj);
44 
45  // A non-renumbered mesh may not have a contiguous numbering, and
46  // that needs to be fixed before we can build a solution vector.
47  if (my_mesh.max_elem_id() != my_mesh.n_elem() ||
48  my_mesh.max_node_id() != my_mesh.n_nodes())
49  {
50  // If we were allowed to renumber then we should have already
51  // been properly renumbered...
52  libmesh_assert(!my_mesh.allow_renumbering());
53 
54  libmesh_do_once(libMesh::out <<
55  "Warning: This MeshOutput subclass only supports meshes which are contiguously renumbered!"
56  << std::endl;);
57 
58  my_mesh.allow_renumbering(true);
59 
60  my_mesh.renumber_nodes_and_elements();
61 
62  // Not sure what good going back to false will do here, the
63  // renumbering horses have already left the barn...
64  my_mesh.allow_renumbering(false);
65  }
66 
67  MeshSerializer serialize(const_cast<MT &>(*_obj), !_is_parallel_format, _serial_only_needed_on_proc_0);
68 
69  // Build the list of variable names that will be written.
70  std::vector<std::string> names;
71  es.build_variable_names (names, nullptr, system_names);
72 
74  {
75  // Build the nodal solution values & get the variable
76  // names from the EquationSystems object
77  std::vector<Number> soln;
78  es.build_solution_vector (soln, system_names);
79 
80  this->write_nodal_data (fname, soln, names);
81  }
82  else // _is_parallel_format
83  {
84  std::unique_ptr<NumericVector<Number>> parallel_soln =
85  es.build_parallel_solution_vector(system_names);
86 
87  this->write_nodal_data (fname, *parallel_soln, names);
88  }
89 }
virtual void write_nodal_data(const std::string &, const std::vector< Number > &, const std::vector< std::string > &)
Definition: mesh_output.h:105
const MeshBase *const _obj
Definition: mesh_output.h:177
OStreamProxy out(std::cout)

◆ write_nodal_data() [1/2]

virtual void libMesh::MeshOutput< MeshBase >::write_nodal_data ( const std::string &  ,
const std::vector< Number > &  ,
const std::vector< std::string > &   
)
inlinevirtualinherited

This method implements writing a mesh with nodal data to a specified file where the nodal data and variable names are provided.

Reimplemented in libMesh::ExodusII_IO, libMesh::Nemesis_IO, libMesh::UCDIO, libMesh::NameBasedIO, libMesh::GmshIO, libMesh::GMVIO, libMesh::VTKIO, libMesh::MEDITIO, libMesh::GnuPlotIO, and libMesh::TecplotIO.

Definition at line 105 of file mesh_output.h.

108  { libmesh_not_implemented(); }

◆ write_nodal_data() [2/2]

void libMesh::MeshOutput< MeshBase >::write_nodal_data ( const std::string &  fname,
const NumericVector< Number > &  parallel_soln,
const std::vector< std::string > &  names 
)
virtualinherited

This method should be overridden by "parallel" output formats for writing nodal data. Instead of getting a localized copy of the nodal solution vector, it is passed a NumericVector of type=PARALLEL which is in node-major order i.e. (u0,v0,w0, u1,v1,w1, u2,v2,w2, u3,v3,w3, ...) and contains n_nodes*n_vars total entries. Then, it is up to the individual I/O class to extract the required solution values from this vector and write them in parallel.

If not implemented, localizes the parallel vector into a std::vector and calls the other version of this function.

Reimplemented in libMesh::Nemesis_IO.

Definition at line 150 of file mesh_output.C.

References libMesh::NumericVector< T >::localize().

153 {
154  // This is the fallback implementation for parallel I/O formats that
155  // do not yet implement proper writing in parallel, and instead rely
156  // on the full solution vector being available on all processors.
157  std::vector<Number> soln;
158  parallel_soln.localize(soln);
159  this->write_nodal_data(fname, soln, names);
160 }
virtual void write_nodal_data(const std::string &, const std::vector< Number > &, const std::vector< std::string > &)
Definition: mesh_output.h:105
virtual void localize(std::vector< T > &v_local) const =0

◆ write_nodal_data_discontinuous()

virtual void libMesh::MeshOutput< MeshBase >::write_nodal_data_discontinuous ( const std::string &  ,
const std::vector< Number > &  ,
const std::vector< std::string > &   
)
inlinevirtualinherited

This method implements writing a mesh with discontinuous data to a specified file where the nodal data and variables names are provided.

Reimplemented in libMesh::ExodusII_IO.

Definition at line 114 of file mesh_output.h.

117  { libmesh_not_implemented(); }

◆ write_parallel()

bool libMesh::XdrIO::write_parallel ( ) const
inline

Report whether we should write parallel files.

Definition at line 358 of file xdr_io.h.

References _write_parallel, _write_serial, libMesh::MeshBase::is_serial(), libMesh::MeshInput< MeshBase >::mesh(), and libMesh::MeshOutput< MT >::mesh().

Referenced by write().

359 {
360  // We can't insist on both serial and parallel
361  libmesh_assert (!this->_write_serial || !this->_write_parallel);
362 
363  // If we insisted on serial, do that
364  if (this->_write_serial)
365  return false;
366 
367  // If we insisted on parallel, do that
368  if (this->_write_parallel)
369  return true;
370 
371  // If we're doing things automatically, check the mesh
372  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
373  return !mesh.is_serial();
374 }
const MT & mesh() const
Definition: mesh_output.h:234
bool _write_parallel
Definition: xdr_io.h:338
virtual bool is_serial() const
Definition: mesh_base.h:154
bool _write_serial
Definition: xdr_io.h:337

◆ write_serialized_bc_names()

void libMesh::XdrIO::write_serialized_bc_names ( Xdr io,
const BoundaryInfo info,
bool  is_sideset 
) const
private

Write boundary names information (sideset and nodeset) - NEW in 0.9.2 format

Definition at line 1210 of file xdr_io.C.

References libMesh::Xdr::data(), libMesh::BoundaryInfo::get_nodeset_name_map(), libMesh::BoundaryInfo::get_sideset_name_map(), and libMesh::ParallelObject::processor_id().

Referenced by write_serialized_bcs_helper(), and write_serialized_nodesets().

1211 {
1212  if (this->processor_id() == 0)
1213  {
1214  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1215  info.get_sideset_name_map() : info.get_nodeset_name_map();
1216 
1217  std::vector<new_header_id_type> boundary_ids;
1218  boundary_ids.reserve(boundary_map.size());
1219 
1220  std::vector<std::string> boundary_names;
1221  boundary_names.reserve(boundary_map.size());
1222 
1223  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
1224  // return writable references in boundary_info, it's possible for the user to leave some entity names
1225  // blank. We can't write those to the XDA file.
1226  new_header_id_type n_boundary_names = 0;
1227  for (const auto & pr : boundary_map)
1228  if (!pr.second.empty())
1229  {
1230  n_boundary_names++;
1231  boundary_ids.push_back(pr.first);
1232  boundary_names.push_back(pr.second);
1233  }
1234 
1235  if (is_sideset)
1236  io.data(n_boundary_names, "# sideset id to name map");
1237  else
1238  io.data(n_boundary_names, "# nodeset id to name map");
1239  // Write out the ids and names in two vectors
1240  if (n_boundary_names)
1241  {
1242  io.data(boundary_ids);
1243  io.data(boundary_names);
1244  }
1245  }
1246 }
uint64_t new_header_id_type
Definition: xdr_io.h:63
processor_id_type processor_id() const

◆ write_serialized_bcs_helper()

void libMesh::XdrIO::write_serialized_bcs_helper ( Xdr io,
const new_header_id_type  n_side_bcs,
const std::string  bc_type 
) const
private

Helper function used in write_serialized_side_bcs, write_serialized_edge_bcs, and write_serialized_shellface_bcs.

Definition at line 993 of file xdr_io.C.

References libMesh::as_range(), bc_id, libMesh::BoundaryInfo::boundary_ids(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), libMesh::BoundaryInfo::edge_boundary_ids(), libMesh::Parallel::Communicator::gather(), libMesh::MeshBase::get_boundary_info(), libMesh::MeshTools::Generation::Private::idx(), libMesh::BoundaryInfo::invalid_id, libMesh::MeshBase::local_level_elements_begin(), libMesh::MeshBase::local_level_elements_end(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::ParallelObject::n_processors(), libMesh::ParallelObject::processor_id(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), libMesh::BoundaryInfo::shellface_boundary_ids(), write_serialized_bc_names(), and libMesh::Xdr::writing().

Referenced by write_serialized_edge_bcs(), write_serialized_shellface_bcs(), and write_serialized_side_bcs().

994 {
995  libmesh_assert (io.writing());
996 
997  // convenient reference to our mesh
998  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
999 
1000  // and our boundary info object
1001  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1002 
1003  // Version 0.9.2+ introduces entity names
1004  write_serialized_bc_names(io, boundary_info, true); // sideset names
1005 
1006  new_header_id_type n_bcs_out = n_bcs;
1007  if (this->processor_id() == 0)
1008  {
1009  std::stringstream comment_string;
1010  comment_string << "# number of " << bc_type << " boundary conditions";
1011  io.data (n_bcs_out, comment_string.str().c_str());
1012  }
1013  n_bcs_out = 0;
1014 
1015  if (!n_bcs) return;
1016 
1017  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1018  std::vector<std::size_t> bc_sizes(this->n_processors());
1019 
1020  // Container to catch boundary IDs handed back by BoundaryInfo
1021  std::vector<boundary_id_type> bc_ids;
1022 
1023  // Boundary conditions are only specified for level-0 elements
1024  dof_id_type n_local_level_0_elem=0;
1025  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
1027  {
1028  if (bc_type == "side")
1029  {
1030  for (auto s : elem->side_index_range())
1031  {
1032  boundary_info.boundary_ids (elem, s, bc_ids);
1033  for (const auto & bc_id : bc_ids)
1035  {
1036  xfer_bcs.push_back (n_local_level_0_elem);
1037  xfer_bcs.push_back (s) ;
1038  xfer_bcs.push_back (bc_id);
1039  }
1040  }
1041  }
1042  else if (bc_type == "edge")
1043  {
1044  for (auto e : elem->edge_index_range())
1045  {
1046  boundary_info.edge_boundary_ids (elem, e, bc_ids);
1047  for (const auto & bc_id : bc_ids)
1049  {
1050  xfer_bcs.push_back (n_local_level_0_elem);
1051  xfer_bcs.push_back (e) ;
1052  xfer_bcs.push_back (bc_id);
1053  }
1054  }
1055  }
1056  else if (bc_type == "shellface")
1057  {
1058  for (unsigned short sf=0; sf<2; sf++)
1059  {
1060  boundary_info.shellface_boundary_ids (elem, sf, bc_ids);
1061  for (const auto & bc_id : bc_ids)
1063  {
1064  xfer_bcs.push_back (n_local_level_0_elem);
1065  xfer_bcs.push_back (sf) ;
1066  xfer_bcs.push_back (bc_id);
1067  }
1068  }
1069  }
1070  else
1071  {
1072  libmesh_error_msg("bc_type not recognized: " + bc_type);
1073  }
1074 
1075  // Increment the level-0 element counter.
1076  n_local_level_0_elem++;
1077  }
1078 
1079  xfer_bcs.push_back(n_local_level_0_elem);
1080  std::size_t my_size = xfer_bcs.size();
1081  this->comm().gather (0, my_size, bc_sizes);
1082 
1083  // All processors send their xfer buffers to processor 0
1084  // Processor 0 will receive all buffers and write out the bcs
1085  if (this->processor_id() == 0)
1086  {
1087  dof_id_type elem_offset = 0;
1088  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1089  {
1090  recv_bcs.resize(bc_sizes[pid]);
1091  if (pid == 0)
1092  recv_bcs = xfer_bcs;
1093  else
1094  this->comm().receive (pid, recv_bcs);
1095 
1096  const dof_id_type my_n_local_level_0_elem
1097  = cast_int<dof_id_type>(recv_bcs.back());
1098  recv_bcs.pop_back();
1099 
1100  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 3, n_bcs_out++)
1101  recv_bcs[idx+0] += elem_offset;
1102 
1103  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1104  cast_int<unsigned int>(recv_bcs.size()), 3);
1105  elem_offset += my_n_local_level_0_elem;
1106  }
1107  libmesh_assert_equal_to (n_bcs, n_bcs_out);
1108  }
1109  else
1110  this->comm().send (0, xfer_bcs);
1111 }
const MT & mesh() const
Definition: mesh_output.h:234
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
virtual element_iterator local_level_elements_end(unsigned int level)=0
void gather(const unsigned int root_id, const T &send, std::vector< T, A > &recv) const
const Parallel::Communicator & comm() const
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
virtual element_iterator local_level_elements_begin(unsigned int level)=0
boundary_id_type bc_id
Definition: xdr_io.C:51
processor_id_type n_processors() const
static const boundary_id_type invalid_id
SimpleRange< I > as_range(const std::pair< I, I > &p)
Definition: simple_range.h:57
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Definition: xdr_io.C:1210
uint64_t new_header_id_type
Definition: xdr_io.h:63
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
processor_id_type processor_id() const
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
uint8_t dof_id_type
Definition: id_types.h:64

◆ write_serialized_connectivity()

void libMesh::XdrIO::write_serialized_connectivity ( Xdr io,
const dof_id_type  n_elem 
) const
private

Write the connectivity for a parallel, distributed mesh

Definition at line 351 of file xdr_io.C.

References _write_unique_id, libMesh::as_range(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), data, libMesh::Xdr::data_stream(), libMesh::Parallel::Communicator::gather(), libMesh::MeshBase::local_level_elements_begin(), libMesh::MeshBase::local_level_elements_end(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::MeshTools::n_active_levels(), libMesh::MeshBase::n_elem(), libMesh::MeshTools::n_elem(), n_nodes, libMesh::ParallelObject::n_processors(), pack_element(), partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), libMesh::Parallel::pull_parallel_vector_data(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), subdomain_map_file_name(), libMesh::Parallel::Communicator::sum(), and libMesh::Xdr::writing().

Referenced by write().

352 {
353  libmesh_assert (io.writing());
354 
355  const bool
356  write_p_level = ("." == this->polynomial_level_file_name()),
357  write_partitioning = ("." == this->partition_map_file_name()),
358  write_subdomain_id = ("." == this->subdomain_map_file_name());
359 
360  // convenient reference to our mesh
361  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
362  libmesh_assert_equal_to (n_elem, mesh.n_elem());
363 
364  // We will only write active elements and their parents.
365  const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
366  std::vector<xdr_id_type> n_global_elem_at_level(n_active_levels);
367 
368  // Find the number of local and global elements at each level
369 #ifndef NDEBUG
370  xdr_id_type tot_n_elem = 0;
371 #endif
372  for (unsigned int level=0; level<n_active_levels; level++)
373  {
374  n_global_elem_at_level[level] =
377 
378  this->comm().sum(n_global_elem_at_level[level]);
379 #ifndef NDEBUG
380  tot_n_elem += n_global_elem_at_level[level];
381 #endif
382  libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
383  libmesh_assert_less_equal (tot_n_elem, n_elem);
384  }
385 
386  std::vector<xdr_id_type>
387  xfer_conn, recv_conn;
388  std::vector<dof_id_type>
389  n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
390  std::vector<xdr_id_type> output_buffer;
391  std::vector<std::size_t>
392  xfer_buf_sizes(this->n_processors());
393 
394 #ifdef LIBMESH_ENABLE_AMR
395  typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type>> id_map_type;
396  id_map_type parent_id_map, child_id_map;
397 #endif
398 
399  dof_id_type my_next_elem=0, next_global_elem=0;
400 
401  //-------------------------------------------
402  // First write the level-0 elements directly.
403  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
405  {
406  pack_element (xfer_conn, elem);
407 #ifdef LIBMESH_ENABLE_AMR
408  parent_id_map[elem->id()] = std::make_pair(this->processor_id(),
409  my_next_elem);
410 #endif
411  ++my_next_elem;
412  }
413  xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
414 
415  std::size_t my_size = xfer_conn.size();
416  this->comm().gather (0, my_next_elem, n_elem_on_proc);
417  this->comm().gather (0, my_size, xfer_buf_sizes);
418 
419  processor_offsets[0] = 0;
420  for (unsigned int pid=1; pid<this->n_processors(); pid++)
421  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
422 
423  // All processors send their xfer buffers to processor 0.
424  // Processor 0 will receive the data and write out the elements.
425  if (this->processor_id() == 0)
426  {
427  // Write the number of elements at this level.
428  {
429  std::string comment = "# n_elem at level 0", legend = ", [ type ";
430  if (_write_unique_id)
431  legend += "uid ";
432  if (write_partitioning)
433  legend += "pid ";
434  if (write_subdomain_id)
435  legend += "sid ";
436  if (write_p_level)
437  legend += "p_level ";
438  legend += "(n0 ... nN-1) ]";
439  comment += legend;
440  io.data (n_global_elem_at_level[0], comment.c_str());
441  }
442 
443  for (unsigned int pid=0; pid<this->n_processors(); pid++)
444  {
445  recv_conn.resize(xfer_buf_sizes[pid]);
446  if (pid == 0)
447  recv_conn = xfer_conn;
448  else
449  this->comm().receive (pid, recv_conn);
450 
451  // at a minimum, the buffer should contain the number of elements,
452  // which could be 0.
453  libmesh_assert (!recv_conn.empty());
454 
455  {
456  const xdr_id_type n_elem_received = recv_conn.back();
457  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
458 
459  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
460  {
461  output_buffer.clear();
462 
463  // n. nodes
464  const xdr_id_type n_nodes = *recv_conn_iter;
465  ++recv_conn_iter;
466 
467  // type
468  output_buffer.push_back(*recv_conn_iter);
469  ++recv_conn_iter;
470 
471  // unique_id
472  if (_write_unique_id)
473  output_buffer.push_back(*recv_conn_iter);
474  ++recv_conn_iter;
475 
476  // processor id
477  if (write_partitioning)
478  output_buffer.push_back(*recv_conn_iter);
479  ++recv_conn_iter;
480 
481  // subdomain id
482  if (write_subdomain_id)
483  output_buffer.push_back(*recv_conn_iter);
484  ++recv_conn_iter;
485 
486 #ifdef LIBMESH_ENABLE_AMR
487  // p level
488  if (write_p_level)
489  output_buffer.push_back(*recv_conn_iter);
490  ++recv_conn_iter;
491 #endif
492  for (dof_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
493  output_buffer.push_back(*recv_conn_iter);
494 
495  io.data_stream
496  (output_buffer.data(),
497  cast_int<unsigned int>(output_buffer.size()),
498  cast_int<unsigned int>(output_buffer.size()));
499  }
500  }
501  }
502  }
503  else
504  this->comm().send (0, xfer_conn);
505 
506 #ifdef LIBMESH_ENABLE_AMR
507  //--------------------------------------------------------------------
508  // Next write the remaining elements indirectly through their parents.
509  // This will insure that the children are written in the proper order
510  // so they can be reconstructed properly.
511  for (unsigned int level=1; level<n_active_levels; level++)
512  {
513  xfer_conn.clear();
514 
515  dof_id_type my_n_elem_written_at_level = 0;
516  for (const auto & parent : as_range(mesh.local_level_elements_begin(level-1),
517  mesh.local_level_elements_end(level-1)))
518  if (!parent->active()) // we only want the parents elements at this level, and
519  { // there is no direct iterator for this obscure use
520  id_map_type::iterator pos = parent_id_map.find(parent->id());
521  libmesh_assert (pos != parent_id_map.end());
522  const processor_id_type parent_pid = pos->second.first;
523  const dof_id_type parent_id = pos->second.second;
524  parent_id_map.erase(pos);
525 
526  for (auto & child : parent->child_ref_range())
527  {
528  pack_element (xfer_conn, &child, parent_id, parent_pid);
529 
530  // this aproach introduces the possibility that we write
531  // non-local elements. These elements may well be parents
532  // at the next step
533  child_id_map[child.id()] = std::make_pair (child.processor_id(),
534  my_n_elem_written_at_level++);
535  my_next_elem++;
536  }
537  }
538  xfer_conn.push_back(my_n_elem_written_at_level);
539  my_size = xfer_conn.size();
540  this->comm().gather (0, my_size, xfer_buf_sizes);
541 
542  // Processor 0 will receive the data and write the elements.
543  if (this->processor_id() == 0)
544  {
545  // Write the number of elements at this level.
546  {
547  char buf[80];
548  std::sprintf(buf, "# n_elem at level %u", level);
549  std::string comment(buf), legend = ", [ type ";
550 
551  if (_write_unique_id)
552  legend += "uid ";
553  legend += "parent ";
554  if (write_partitioning)
555  legend += "pid ";
556  if (write_subdomain_id)
557  legend += "sid ";
558  if (write_p_level)
559  legend += "p_level ";
560  legend += "(n0 ... nN-1) ]";
561  comment += legend;
562  io.data (n_global_elem_at_level[level], comment.c_str());
563  }
564 
565  for (unsigned int pid=0; pid<this->n_processors(); pid++)
566  {
567  recv_conn.resize(xfer_buf_sizes[pid]);
568  if (pid == 0)
569  recv_conn = xfer_conn;
570  else
571  this->comm().receive (pid, recv_conn);
572 
573  // at a minimum, the buffer should contain the number of elements,
574  // which could be 0.
575  libmesh_assert (!recv_conn.empty());
576 
577  {
578  const xdr_id_type n_elem_received = recv_conn.back();
579  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
580 
581  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
582  {
583  output_buffer.clear();
584 
585  // n. nodes
586  const xdr_id_type n_nodes = *recv_conn_iter;
587  ++recv_conn_iter;
588 
589  // type
590  output_buffer.push_back(*recv_conn_iter);
591  ++recv_conn_iter;
592 
593  // unique_id
594  if (_write_unique_id)
595  output_buffer.push_back(*recv_conn_iter);
596  ++recv_conn_iter;
597 
598  // parent local id
599  const xdr_id_type parent_local_id = *recv_conn_iter;
600  ++recv_conn_iter;
601 
602  // parent processor id
603  const xdr_id_type parent_pid = *recv_conn_iter;
604  ++recv_conn_iter;
605 
606  output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
607 
608  // processor id
609  if (write_partitioning)
610  output_buffer.push_back(*recv_conn_iter);
611  ++recv_conn_iter;
612 
613  // subdomain id
614  if (write_subdomain_id)
615  output_buffer.push_back(*recv_conn_iter);
616  ++recv_conn_iter;
617 
618  // p level
619  if (write_p_level)
620  output_buffer.push_back(*recv_conn_iter);
621  ++recv_conn_iter;
622 
623  for (xdr_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
624  output_buffer.push_back(*recv_conn_iter);
625 
626  io.data_stream
627  (output_buffer.data(),
628  cast_int<unsigned int>(output_buffer.size()),
629  cast_int<unsigned int>(output_buffer.size()));
630  }
631  }
632  }
633  }
634  else
635  this->comm().send (0, xfer_conn);
636 
637  // update the processor_offsets
638  processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
639  this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
640  for (unsigned int pid=1; pid<this->n_processors(); pid++)
641  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
642 
643  // Now, at the next level we will again iterate over local parents. However,
644  // those parents may have been written by other processors (at this step),
645  // so we need to gather them into our *_id_maps.
646  {
647  std::map<processor_id_type, std::vector<dof_id_type>> requested_ids;
648 
649  for (const auto & elem : as_range(mesh.local_level_elements_begin(level),
651  if (!child_id_map.count(elem->id()))
652  {
653  libmesh_assert_not_equal_to (elem->parent()->processor_id(), this->processor_id());
654  const processor_id_type pid = elem->parent()->processor_id();
655  if (pid != this->processor_id())
656  requested_ids[pid].push_back(elem->id());
657  }
658 
659  auto gather_functor =
660  [& child_id_map]
661  (processor_id_type libmesh_dbg_var(pid),
662  const std::vector<dof_id_type> & ids,
663  std::vector<dof_id_type> & data)
664  {
665  const std::size_t ids_size = ids.size();
666  data.resize(ids_size);
667 
668  // Fill those requests by overwriting the requested ids
669  for (std::size_t i=0; i != ids_size; i++)
670  {
671  libmesh_assert (child_id_map.count(ids[i]));
672  libmesh_assert_equal_to (child_id_map[ids[i]].first, pid);
673 
674  data[i] = child_id_map[ids[i]].second;
675  }
676  };
677 
678  auto action_functor =
679  [& child_id_map]
680  (processor_id_type pid,
681  const std::vector<dof_id_type> & ids,
682  const std::vector<dof_id_type> & data)
683  {
684  std::size_t data_size = data.size();
685 
686  for (std::size_t i=0; i != data_size; i++)
687  child_id_map[ids[i]] =
688  std::make_pair (pid, data[i]);
689  };
690 
691  // Trade ids back and forth
692  const dof_id_type * ex = nullptr;
694  (this->comm(), requested_ids, gather_functor, action_functor, ex);
695 
696  // overwrite the parent_id_map with the child_id_map, but
697  // use std::map::swap() for efficiency.
698  parent_id_map.swap(child_id_map);
699  child_id_map.clear();
700  }
701  }
702 #endif // LIBMESH_ENABLE_AMR
703  if (this->processor_id() == 0)
704  libmesh_assert_equal_to (next_global_elem, n_elem);
705 
706 }
const MT & mesh() const
Definition: mesh_output.h:234
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
virtual element_iterator local_level_elements_end(unsigned int level)=0
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:702
void pack_element(std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id=DofObject::invalid_id, const dof_id_type parent_pid=DofObject::invalid_id) const
Definition: xdr_io.C:2120
uint8_t processor_id_type
Definition: id_types.h:99
void gather(const unsigned int root_id, const T &send, std::vector< T, A > &recv) const
const Parallel::Communicator & comm() const
virtual element_iterator local_level_elements_begin(unsigned int level)=0
largest_id_type xdr_id_type
Definition: xdr_io.h:57
processor_id_type n_processors() const
const dof_id_type n_nodes
Definition: tecplot_io.C:68
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:158
SimpleRange< I > as_range(const std::pair< I, I > &p)
Definition: simple_range.h:57
const std::string & partition_map_file_name() const
Definition: xdr_io.h:152
bool _write_unique_id
Definition: xdr_io.h:339
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:164
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
IterBase * data
virtual dof_id_type n_elem() const =0
processor_id_type processor_id() const
uint8_t dof_id_type
Definition: id_types.h:64
unsigned int n_active_levels(const MeshBase &mesh)
Definition: mesh_tools.C:623

◆ write_serialized_edge_bcs()

void libMesh::XdrIO::write_serialized_edge_bcs ( Xdr io,
const new_header_id_type  n_edge_bcs 
) const
private

Write the edge boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Definition at line 1122 of file xdr_io.C.

References write_serialized_bcs_helper().

Referenced by write().

1123 {
1124  write_serialized_bcs_helper(io, n_edge_bcs, "edge");
1125 }
void write_serialized_bcs_helper(Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
Definition: xdr_io.C:993

◆ write_serialized_nodes()

void libMesh::XdrIO::write_serialized_nodes ( Xdr io,
const dof_id_type  n_nodes 
) const
private

Write the nodal locations for a parallel, distributed mesh

Definition at line 710 of file xdr_io.C.

References libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), libMesh::Parallel::Communicator::gather(), libMesh::Parallel::Communicator::get_unique_tag(), libMesh::MeshTools::Generation::Private::idx(), io_blksize, libMesh::MeshBase::local_node_ptr_range(), libMesh::MeshBase::max_node_id(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), std::min(), libMesh::ParallelObject::n_processors(), libMesh::ParallelObject::processor_id(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and libMesh::Parallel::wait().

Referenced by write().

711 {
712  // convenient reference to our mesh
713  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
714  libmesh_assert_equal_to (max_node_id, mesh.max_node_id());
715 
716  std::vector<dof_id_type> xfer_ids;
717  std::vector<Real> xfer_coords;
718  std::vector<Real> & coords=xfer_coords;
719 
720  std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
721  std::vector<std::vector<Real>> recv_coords(this->n_processors());
722 
723 #ifdef LIBMESH_ENABLE_UNIQUE_ID
724  std::vector<xdr_id_type> xfer_unique_ids;
725  std::vector<xdr_id_type> & unique_ids=xfer_unique_ids;
726  std::vector<std::vector<xdr_id_type>> recv_unique_ids (this->n_processors());
727 #endif // LIBMESH_ENABLE_UNIQUE_ID
728 
729  std::size_t n_written=0;
730 
731  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
732  {
733  const std::size_t first_node = blk*io_blksize;
734  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
735 
736  const std::size_t tot_id_size = last_node - first_node;
737 
738  // Build up the xfer buffers on each processor
739  xfer_ids.clear();
740  xfer_coords.clear();
741 
742  for (const auto & node : mesh.local_node_ptr_range())
743  if ((node->id() >= first_node) && // node in [first_node, last_node)
744  (node->id() < last_node))
745  {
746  xfer_ids.push_back(node->id());
747  xfer_coords.push_back((*node)(0));
748 #if LIBMESH_DIM > 1
749  xfer_coords.push_back((*node)(1));
750 #endif
751 #if LIBMESH_DIM > 2
752  xfer_coords.push_back((*node)(2));
753 #endif
754  }
755 
756  //-------------------------------------
757  // Send the xfer buffers to processor 0
758  std::vector<std::size_t> ids_size;
759 
760  const std::size_t my_ids_size = xfer_ids.size();
761 
762  // explicitly gather ids_size
763  this->comm().gather (0, my_ids_size, ids_size);
764 
765  // We will have lots of simultaneous receives if we are
766  // processor 0, so let's use nonblocking receives.
767  std::vector<Parallel::Request>
768  id_request_handles(this->n_processors()-1),
769  coord_request_handles(this->n_processors()-1);
770 
771  Parallel::MessageTag
772  id_tag = mesh.comm().get_unique_tag(1234),
773  coord_tag = mesh.comm().get_unique_tag(1235);
774 
775  // Post the receives -- do this on processor 0 only.
776  if (this->processor_id() == 0)
777  {
778  for (unsigned int pid=0; pid<this->n_processors(); pid++)
779  {
780  recv_ids[pid].resize(ids_size[pid]);
781  recv_coords[pid].resize(ids_size[pid]*LIBMESH_DIM);
782 
783  if (pid == 0)
784  {
785  recv_ids[0] = xfer_ids;
786  recv_coords[0] = xfer_coords;
787  }
788  else
789  {
790  this->comm().receive (pid, recv_ids[pid],
791  id_request_handles[pid-1],
792  id_tag);
793  this->comm().receive (pid, recv_coords[pid],
794  coord_request_handles[pid-1],
795  coord_tag);
796  }
797  }
798  }
799  else
800  {
801  // Send -- do this on all other processors.
802  this->comm().send(0, xfer_ids, id_tag);
803  this->comm().send(0, xfer_coords, coord_tag);
804  }
805 
806  // -------------------------------------------------------
807  // Receive the messages and write the output on processor 0.
808  if (this->processor_id() == 0)
809  {
810  // Wait for all the receives to complete. We have no
811  // need for the statuses since we already know the
812  // buffer sizes.
813  Parallel::wait (id_request_handles);
814  Parallel::wait (coord_request_handles);
815 
816  for (unsigned int pid=0; pid<this->n_processors(); pid++)
817  libmesh_assert_equal_to(recv_coords[pid].size(),
818  recv_ids[pid].size()*LIBMESH_DIM);
819 
820  // Write the coordinates in this block.
821  // Some of these coordinates may correspond to ids for which
822  // no node exists, if we have a discontiguous node
823  // numbering!
824 
825  // Write invalid values for unused node ids
826  coords.clear();
827  coords.resize (3*tot_id_size, std::numeric_limits<Real>::quiet_NaN());
828 
829  for (unsigned int pid=0; pid<this->n_processors(); pid++)
830  for (std::size_t idx=0; idx<recv_ids[pid].size(); idx++)
831  {
832  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
833  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
834  libmesh_assert_less(local_idx, tot_id_size);
835 
836  libmesh_assert_less ((3*local_idx+2), coords.size());
837  libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
838 
839  coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
840 #if LIBMESH_DIM > 1
841  coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
842 #else
843  coords[3*local_idx+1] = 0.;
844 #endif
845 #if LIBMESH_DIM > 2
846  coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
847 #else
848  coords[3*local_idx+2] = 0.;
849 #endif
850 
851  n_written++;
852  }
853 
854  io.data_stream (coords.empty() ? nullptr : coords.data(),
855  cast_int<unsigned int>(coords.size()), 3);
856  }
857  }
858 
859  if (this->processor_id() == 0)
860  libmesh_assert_less_equal (n_written, max_node_id);
861 
862 #ifdef LIBMESH_ENABLE_UNIQUE_ID
863  // XDR unsigned char doesn't work as anticipated
864  unsigned short write_unique_ids = 1;
865 #else
866  unsigned short write_unique_ids = 0;
867 #endif
868  if (this->processor_id() == 0)
869  io.data (write_unique_ids, "# presence of unique ids");
870 
871 #ifdef LIBMESH_ENABLE_UNIQUE_ID
872  n_written = 0;
873 
874  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
875  {
876  const std::size_t first_node = blk*io_blksize;
877  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
878 
879  const std::size_t tot_id_size = last_node - first_node;
880 
881  // Build up the xfer buffers on each processor
882  xfer_ids.clear();
883  xfer_ids.reserve(tot_id_size);
884  xfer_unique_ids.clear();
885  xfer_unique_ids.reserve(tot_id_size);
886 
887  for (const auto & node : mesh.local_node_ptr_range())
888  if ((node->id() >= first_node) && // node in [first_node, last_node)
889  (node->id() < last_node))
890  {
891  xfer_ids.push_back(node->id());
892  xfer_unique_ids.push_back(node->unique_id());
893  }
894 
895  //-------------------------------------
896  // Send the xfer buffers to processor 0
897  std::vector<std::size_t> ids_size;
898 
899  const std::size_t my_ids_size = xfer_ids.size();
900 
901  // explicitly gather ids_size
902  this->comm().gather (0, my_ids_size, ids_size);
903 
904  // We will have lots of simultaneous receives if we are
905  // processor 0, so let's use nonblocking receives.
906  std::vector<Parallel::Request>
907  unique_id_request_handles(this->n_processors()-1),
908  id_request_handles(this->n_processors()-1);
909 
910  Parallel::MessageTag
911  unique_id_tag = mesh.comm().get_unique_tag(1236),
912  id_tag = mesh.comm().get_unique_tag(1237);
913 
914  // Post the receives -- do this on processor 0 only.
915  if (this->processor_id() == 0)
916  {
917  for (unsigned int pid=0; pid<this->n_processors(); pid++)
918  {
919  recv_ids[pid].resize(ids_size[pid]);
920  recv_unique_ids[pid].resize(ids_size[pid]);
921 
922  if (pid == 0)
923  {
924  recv_ids[0] = xfer_ids;
925  recv_unique_ids[0] = xfer_unique_ids;
926  }
927  else
928  {
929  this->comm().receive (pid, recv_ids[pid],
930  id_request_handles[pid-1],
931  id_tag);
932  this->comm().receive (pid, recv_unique_ids[pid],
933  unique_id_request_handles[pid-1],
934  unique_id_tag);
935  }
936  }
937  }
938  else
939  {
940  // Send -- do this on all other processors.
941  this->comm().send(0, xfer_ids, id_tag);
942  this->comm().send(0, xfer_unique_ids, unique_id_tag);
943  }
944 
945  // -------------------------------------------------------
946  // Receive the messages and write the output on processor 0.
947  if (this->processor_id() == 0)
948  {
949  // Wait for all the receives to complete. We have no
950  // need for the statuses since we already know the
951  // buffer sizes.
952  Parallel::wait (id_request_handles);
953  Parallel::wait (unique_id_request_handles);
954 
955  // Write the unique ids in this block.
956  for (unsigned int pid=0; pid<this->n_processors(); pid++)
957  {
958  libmesh_assert_equal_to
959  (recv_ids[pid].size(), recv_unique_ids[pid].size());
960  }
961 
962  libmesh_assert_less_equal
963  (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
964 
965  unique_ids.clear();
966  unique_ids.resize(tot_id_size, unique_id_type(-1));
967 
968  for (unsigned int pid=0; pid<this->n_processors(); pid++)
969  for (std::size_t idx=0; idx<recv_ids[pid].size(); idx++)
970  {
971  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
972  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
973  libmesh_assert_less (local_idx, unique_ids.size());
974 
975  unique_ids[local_idx] = recv_unique_ids[pid][idx];
976 
977  n_written++;
978  }
979 
980  io.data_stream (unique_ids.empty() ? nullptr : unique_ids.data(),
981  cast_int<unsigned int>(unique_ids.size()), 1);
982  }
983  }
984 
985  if (this->processor_id() == 0)
986  libmesh_assert_less_equal (n_written, max_node_id);
987 
988 #endif // LIBMESH_ENABLE_UNIQUE_ID
989 }
const MT & mesh() const
Definition: mesh_output.h:234
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
void wait(std::vector< Request > &r)
Definition: request.C:213
virtual SimpleRange< node_iterator > local_node_ptr_range()=0
void gather(const unsigned int root_id, const T &send, std::vector< T, A > &recv) const
const Parallel::Communicator & comm() const
MessageTag get_unique_tag(int tagvalue) const
Definition: communicator.C:201
processor_id_type n_processors() const
static const std::size_t io_blksize
Definition: xdr_io.h:350
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
virtual dof_id_type max_node_id() const =0
processor_id_type processor_id() const
long double min(long double a, double b)
uint8_t unique_id_type
Definition: id_types.h:79
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)

◆ write_serialized_nodesets()

void libMesh::XdrIO::write_serialized_nodesets ( Xdr io,
const new_header_id_type  n_nodesets 
) const
private

Write the boundary conditions for a parallel, distributed mesh

Definition at line 1136 of file xdr_io.C.

References bc_id, libMesh::BoundaryInfo::boundary_ids(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), libMesh::Parallel::Communicator::gather(), libMesh::MeshBase::get_boundary_info(), libMesh::MeshTools::Generation::Private::idx(), libMesh::BoundaryInfo::invalid_id, libMesh::MeshBase::local_node_ptr_range(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::ParallelObject::n_processors(), libMesh::ParallelObject::processor_id(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), write_serialized_bc_names(), and libMesh::Xdr::writing().

Referenced by write().

1137 {
1138  libmesh_assert (io.writing());
1139 
1140  // convenient reference to our mesh
1141  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1142 
1143  // and our boundary info object
1144  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1145 
1146  // Version 0.9.2+ introduces entity names
1147  write_serialized_bc_names(io, boundary_info, false); // nodeset names
1148 
1149  new_header_id_type n_nodesets_out = n_nodesets;
1150  if (this->processor_id() == 0)
1151  io.data (n_nodesets_out, "# number of nodesets");
1152  n_nodesets_out = 0;
1153 
1154  if (!n_nodesets) return;
1155 
1156  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1157  std::vector<std::size_t> bc_sizes(this->n_processors());
1158 
1159  // Container to catch boundary IDs handed back by BoundaryInfo
1160  std::vector<boundary_id_type> nodeset_ids;
1161 
1162  dof_id_type n_node=0;
1163  for (const auto & node : mesh.local_node_ptr_range())
1164  {
1165  boundary_info.boundary_ids (node, nodeset_ids);
1166  for (const auto & bc_id : nodeset_ids)
1168  {
1169  xfer_bcs.push_back (node->id());
1170  xfer_bcs.push_back (bc_id);
1171  }
1172  }
1173 
1174  xfer_bcs.push_back(n_node);
1175  std::size_t my_size = xfer_bcs.size();
1176  this->comm().gather (0, my_size, bc_sizes);
1177 
1178  // All processors send their xfer buffers to processor 0
1179  // Processor 0 will receive all buffers and write out the bcs
1180  if (this->processor_id() == 0)
1181  {
1182  dof_id_type node_offset = 0;
1183  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1184  {
1185  recv_bcs.resize(bc_sizes[pid]);
1186  if (pid == 0)
1187  recv_bcs = xfer_bcs;
1188  else
1189  this->comm().receive (pid, recv_bcs);
1190 
1191  const dof_id_type my_n_node =
1192  cast_int<dof_id_type>(recv_bcs.back());
1193  recv_bcs.pop_back();
1194 
1195  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 2, n_nodesets_out++)
1196  recv_bcs[idx+0] += node_offset;
1197 
1198  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1199  cast_int<unsigned int>(recv_bcs.size()), 2);
1200  node_offset += my_n_node;
1201  }
1202  libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
1203  }
1204  else
1205  this->comm().send (0, xfer_bcs);
1206 }
const MT & mesh() const
Definition: mesh_output.h:234
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
virtual SimpleRange< node_iterator > local_node_ptr_range()=0
void gather(const unsigned int root_id, const T &send, std::vector< T, A > &recv) const
const Parallel::Communicator & comm() const
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
boundary_id_type bc_id
Definition: xdr_io.C:51
processor_id_type n_processors() const
static const boundary_id_type invalid_id
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Definition: xdr_io.C:1210
uint64_t new_header_id_type
Definition: xdr_io.h:63
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
processor_id_type processor_id() const
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
uint8_t dof_id_type
Definition: id_types.h:64

◆ write_serialized_shellface_bcs()

void libMesh::XdrIO::write_serialized_shellface_bcs ( Xdr io,
const new_header_id_type  n_shellface_bcs 
) const
private

Write the "shell face" boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Definition at line 1129 of file xdr_io.C.

References write_serialized_bcs_helper().

Referenced by write().

1130 {
1131  write_serialized_bcs_helper(io, n_shellface_bcs, "shellface");
1132 }
void write_serialized_bcs_helper(Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
Definition: xdr_io.C:993

◆ write_serialized_side_bcs()

void libMesh::XdrIO::write_serialized_side_bcs ( Xdr io,
const new_header_id_type  n_side_bcs 
) const
private

Write the side boundary conditions for a parallel, distributed mesh

Definition at line 1115 of file xdr_io.C.

References write_serialized_bcs_helper().

Referenced by write().

1116 {
1117  write_serialized_bcs_helper(io, n_side_bcs, "side");
1118 }
void write_serialized_bcs_helper(Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
Definition: xdr_io.C:993

◆ write_serialized_subdomain_names()

void libMesh::XdrIO::write_serialized_subdomain_names ( Xdr io) const
private

Write subdomain name information - NEW in 0.9.2 format

Definition at line 313 of file xdr_io.C.

References libMesh::Xdr::data(), libMesh::MeshBase::get_subdomain_name_map(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), and libMesh::ParallelObject::processor_id().

Referenced by write().

314 {
315  if (this->processor_id() == 0)
316  {
317  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
318 
319  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
320 
321  std::vector<new_header_id_type> subdomain_ids;
322  subdomain_ids.reserve(subdomain_map.size());
323 
324  std::vector<std::string> subdomain_names;
325  subdomain_names.reserve(subdomain_map.size());
326 
327  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
328  // return writable references in mesh_base, it's possible for the user to leave some entity names
329  // blank. We can't write those to the XDA file.
330  new_header_id_type n_subdomain_names = 0;
331  for (const auto & pr : subdomain_map)
332  if (!pr.second.empty())
333  {
334  n_subdomain_names++;
335  subdomain_ids.push_back(pr.first);
336  subdomain_names.push_back(pr.second);
337  }
338 
339  io.data(n_subdomain_names, "# subdomain id to name map");
340  // Write out the ids and names in two vectors
341  if (n_subdomain_names)
342  {
343  io.data(subdomain_ids);
344  io.data(subdomain_names);
345  }
346  }
347 }
const MT & mesh() const
Definition: mesh_output.h:234
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1337
uint64_t new_header_id_type
Definition: xdr_io.h:63
processor_id_type processor_id() const

Member Data Documentation

◆ _bc_file_name

std::string libMesh::XdrIO::_bc_file_name
private

Definition at line 342 of file xdr_io.h.

Referenced by boundary_condition_file_name().

◆ _binary

bool libMesh::XdrIO::_binary
private

Definition at line 335 of file xdr_io.h.

Referenced by binary().

◆ _communicator

◆ _field_width

unsigned int libMesh::XdrIO::_field_width
private

Definition at line 340 of file xdr_io.h.

Referenced by read(), read_header(), and read_serialized_nodes().

◆ _is_parallel_format

const bool libMesh::MeshOutput< MeshBase >::_is_parallel_format
protectedinherited

Flag specifying whether this format is parallel-capable. If this is false (default) I/O is only permitted when the mesh has been serialized.

Definition at line 159 of file mesh_output.h.

Referenced by libMesh::FroIO::write(), libMesh::PostscriptIO::write(), and libMesh::EnsightIO::write().

◆ _legacy

bool libMesh::XdrIO::_legacy
private

Definition at line 336 of file xdr_io.h.

Referenced by legacy().

◆ _p_level_file

std::string libMesh::XdrIO::_p_level_file
private

Definition at line 345 of file xdr_io.h.

Referenced by polynomial_level_file_name().

◆ _partition_map_file

std::string libMesh::XdrIO::_partition_map_file
private

Definition at line 343 of file xdr_io.h.

Referenced by partition_map_file_name().

◆ _serial_only_needed_on_proc_0

const bool libMesh::MeshOutput< MeshBase >::_serial_only_needed_on_proc_0
protectedinherited

Flag specifying whether this format can be written by only serializing the mesh to processor zero

If this is false (default) the mesh will be serialized to all processors

Definition at line 168 of file mesh_output.h.

◆ _subdomain_map_file

std::string libMesh::XdrIO::_subdomain_map_file
private

Definition at line 344 of file xdr_io.h.

Referenced by subdomain_map_file_name().

◆ _version

std::string libMesh::XdrIO::_version
private

Definition at line 341 of file xdr_io.h.

Referenced by version().

◆ _write_parallel

bool libMesh::XdrIO::_write_parallel
private

Definition at line 338 of file xdr_io.h.

Referenced by set_auto_parallel(), set_write_parallel(), and write_parallel().

◆ _write_serial

bool libMesh::XdrIO::_write_serial
private

Definition at line 337 of file xdr_io.h.

Referenced by set_auto_parallel(), set_write_parallel(), and write_parallel().

◆ _write_unique_id

bool libMesh::XdrIO::_write_unique_id
private

Definition at line 339 of file xdr_io.h.

Referenced by write(), and write_serialized_connectivity().

◆ elems_of_dimension

◆ io_blksize

const std::size_t libMesh::XdrIO::io_blksize = 128000
staticprivate

Define the block size to use for chunked IO.

Definition at line 350 of file xdr_io.h.

Referenced by read_serialized_bcs_helper(), read_serialized_connectivity(), read_serialized_nodes(), read_serialized_nodesets(), and write_serialized_nodes().


The documentation for this class was generated from the following files: