libMesh::XdrIO Class Reference

#include <xdr_io.h>

Inheritance diagram for libMesh::XdrIO:

Public Types

typedef largest_id_type xdr_id_type
 
typedef uint32_t header_id_type
 

Public Member Functions

 XdrIO (MeshBase &, const bool=false)
 
 XdrIO (const MeshBase &, const bool=false)
 
virtual ~XdrIO ()
 
virtual void read (const std::string &) libmesh_override
 
virtual void write (const std::string &) libmesh_override
 
bool binary () const
 
bool & binary ()
 
bool legacy () const
 
bool & legacy ()
 
bool write_parallel () const
 
void set_write_parallel (bool do_parallel=true)
 
void set_auto_parallel ()
 
const std::string & version () const
 
std::string & version ()
 
const std::string & boundary_condition_file_name () const
 
std::string & boundary_condition_file_name ()
 
const std::string & partition_map_file_name () const
 
std::string & partition_map_file_name ()
 
const std::string & subdomain_map_file_name () const
 
std::string & subdomain_map_file_name ()
 
const std::string & polynomial_level_file_name () const
 
std::string & polynomial_level_file_name ()
 
bool version_at_least_0_9_2 () const
 
bool version_at_least_0_9_6 () const
 
bool version_at_least_1_1_0 () const
 
virtual void write_equation_systems (const std::string &, const EquationSystems &, const std::set< std::string > *system_names=libmesh_nullptr)
 
virtual void write_nodal_data (const std::string &, const std::vector< Number > &, const std::vector< std::string > &)
 
virtual void write_nodal_data (const std::string &, const NumericVector< Number > &, const std::vector< std::string > &)
 
unsigned int & ascii_precision ()
 
const Parallel::Communicatorcomm () const
 
processor_id_type n_processors () const
 
processor_id_type processor_id () const
 

Protected Member Functions

MeshBasemesh ()
 
void set_n_partitions (unsigned int n_parts)
 
void skip_comment_lines (std::istream &in, const char comment_start)
 
const MeshBasemesh () const
 

Protected Attributes

std::vector< bool > elems_of_dimension
 
const bool _is_parallel_format
 
const bool _serial_only_needed_on_proc_0
 
const Parallel::Communicator_communicator
 

Private Member Functions

void write_serialized_subdomain_names (Xdr &io) const
 
void write_serialized_connectivity (Xdr &io, const dof_id_type n_elem) const
 
void write_serialized_nodes (Xdr &io, const dof_id_type n_nodes) const
 
void write_serialized_bcs_helper (Xdr &io, const header_id_type n_side_bcs, const std::string bc_type) const
 
void write_serialized_side_bcs (Xdr &io, const header_id_type n_side_bcs) const
 
void write_serialized_edge_bcs (Xdr &io, const header_id_type n_edge_bcs) const
 
void write_serialized_shellface_bcs (Xdr &io, const header_id_type n_shellface_bcs) const
 
void write_serialized_nodesets (Xdr &io, const header_id_type n_nodesets) const
 
void write_serialized_bc_names (Xdr &io, const BoundaryInfo &info, bool is_sideset) const
 
void read_serialized_subdomain_names (Xdr &io)
 
template<typename T >
void read_serialized_connectivity (Xdr &io, const dof_id_type n_elem, std::vector< header_id_type > &sizes, T)
 
void read_serialized_nodes (Xdr &io, const dof_id_type n_nodes)
 
template<typename T >
void read_serialized_bcs_helper (Xdr &io, T, const std::string bc_type)
 
template<typename T >
void read_serialized_side_bcs (Xdr &io, T)
 
template<typename T >
void read_serialized_edge_bcs (Xdr &io, T)
 
template<typename T >
void read_serialized_shellface_bcs (Xdr &io, T)
 
template<typename T >
void read_serialized_nodesets (Xdr &io, T)
 
void read_serialized_bc_names (Xdr &io, BoundaryInfo &info, bool is_sideset)
 
void pack_element (std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id=DofObject::invalid_id, const dof_id_type parent_pid=DofObject::invalid_id) const
 

Private Attributes

bool _binary
 
bool _legacy
 
bool _write_serial
 
bool _write_parallel
 
bool _write_unique_id
 
header_id_type _field_width
 
std::string _version
 
std::string _bc_file_name
 
std::string _partition_map_file
 
std::string _subdomain_map_file
 
std::string _p_level_file
 

Static Private Attributes

static const std::size_t io_blksize = 128000
 

Detailed Description

MeshIO class used for writing XDR (eXternal Data Representation) and XDA mesh files. XDR/XDA is libmesh's internal data format, and allows the full refinement tree structure of the mesh to be written to file.

Author
Benjamin Kirk
John Peterson
Date
2004

Definition at line 51 of file xdr_io.h.

Member Typedef Documentation

Definition at line 60 of file xdr_io.h.

Definition at line 57 of file xdr_io.h.

Constructor & Destructor Documentation

libMesh::XdrIO::XdrIO ( MeshBase mesh,
const bool  binary_in = false 
)
explicit

Constructor. Takes a writable reference to a mesh object. This is the constructor required to read a mesh. The optional parameter binary can be used to switch between ASCII (false, the default) or binary (true) files.

Definition at line 117 of file xdr_io.C.

117  :
118  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
119  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
121  _binary (binary_in),
122  _legacy (false),
123  _write_serial (false),
124  _write_parallel (false),
125 #ifdef LIBMESH_ENABLE_UNIQUE_ID
126  _write_unique_id (true),
127 #else
128  _write_unique_id (false),
129 #endif
130  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
131  _version ("libMesh-1.1.0"),
132  _bc_file_name ("n/a"),
133  _partition_map_file ("n/a"),
134  _subdomain_map_file ("n/a"),
135  _p_level_file ("n/a")
136 {
137 }
ParallelObject(const Parallel::Communicator &comm_in)
bool _binary
Definition: xdr_io.h:305
bool _write_parallel
Definition: xdr_io.h:308
std::string _partition_map_file
Definition: xdr_io.h:313
std::string _bc_file_name
Definition: xdr_io.h:312
bool _write_unique_id
Definition: xdr_io.h:309
std::string _subdomain_map_file
Definition: xdr_io.h:314
header_id_type _field_width
Definition: xdr_io.h:310
std::string _p_level_file
Definition: xdr_io.h:315
bool _write_serial
Definition: xdr_io.h:307
std::string _version
Definition: xdr_io.h:311
bool _legacy
Definition: xdr_io.h:306
libMesh::XdrIO::XdrIO ( const MeshBase mesh,
const bool  binary_in = false 
)
explicit

Constructor. Takes a reference to a constant mesh object. This constructor will only allow us to write the mesh. The optional parameter binary can be used to switch between ASCII (false, the default) or binary (true) files.

Definition at line 141 of file xdr_io.C.

141  :
142  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
144  _binary (binary_in)
145 {
146 }
ParallelObject(const Parallel::Communicator &comm_in)
bool _binary
Definition: xdr_io.h:305
libMesh::XdrIO::~XdrIO ( )
virtual

Destructor.

Definition at line 150 of file xdr_io.C.

151 {
152 }

Member Function Documentation

unsigned int& libMesh::MeshOutput< MeshBase >::ascii_precision ( )
inherited

Return/set the precision to use when writing ASCII files.

By default we use numeric_limits<Real>::digits10 + 2, which should be enough to write out to ASCII and get the exact same Real back when reading in.

Referenced by libMesh::TecplotIO::write_ascii(), libMesh::GMVIO::write_ascii_new_impl(), and libMesh::GMVIO::write_ascii_old_impl().

bool libMesh::XdrIO::binary ( ) const
inline

Get/Set the flag indicating if we should read/write binary.

Definition at line 100 of file xdr_io.h.

References _binary.

Referenced by libMesh::NameBasedIO::read(), read(), and write().

100 { return _binary; }
bool _binary
Definition: xdr_io.h:305
bool& libMesh::XdrIO::binary ( )
inline

Definition at line 101 of file xdr_io.h.

References _binary.

101 { return _binary; }
bool _binary
Definition: xdr_io.h:305
const std::string& libMesh::XdrIO::boundary_condition_file_name ( ) const
inline

Get/Set the boundary condition file name.

Definition at line 143 of file xdr_io.h.

References _bc_file_name.

Referenced by read(), read_serialized_bcs_helper(), read_serialized_nodesets(), and write().

143 { return _bc_file_name; }
std::string _bc_file_name
Definition: xdr_io.h:312
std::string& libMesh::XdrIO::boundary_condition_file_name ( )
inline

Definition at line 144 of file xdr_io.h.

References _bc_file_name.

144 { return _bc_file_name; }
std::string _bc_file_name
Definition: xdr_io.h:312
const Parallel::Communicator& libMesh::ParallelObject::comm ( ) const
inlineinherited
Returns
A reference to the Parallel::Communicator object used by this mesh.

Definition at line 87 of file parallel_object.h.

References libMesh::ParallelObject::_communicator.

Referenced by libMesh::__libmesh_petsc_diff_solver_monitor(), libMesh::__libmesh_petsc_diff_solver_residual(), libMesh::__libmesh_petsc_snes_jacobian(), libMesh::__libmesh_petsc_snes_postcheck(), libMesh::__libmesh_petsc_snes_residual(), libMesh::__libmesh_tao_equality_constraints(), libMesh::__libmesh_tao_equality_constraints_jacobian(), libMesh::__libmesh_tao_gradient(), libMesh::__libmesh_tao_hessian(), libMesh::__libmesh_tao_inequality_constraints(), libMesh::__libmesh_tao_inequality_constraints_jacobian(), libMesh::__libmesh_tao_objective(), libMesh::MeshRefinement::_coarsen_elements(), libMesh::ExactSolution::_compute_error(), libMesh::ParmetisPartitioner::_do_repartition(), libMesh::UniformRefinementEstimator::_estimate_error(), libMesh::BoundaryInfo::_find_id_maps(), libMesh::PetscLinearSolver< T >::_petsc_shell_matrix_get_diagonal(), libMesh::SlepcEigenSolver< T >::_petsc_shell_matrix_get_diagonal(), libMesh::PetscLinearSolver< T >::_petsc_shell_matrix_mult(), libMesh::SlepcEigenSolver< T >::_petsc_shell_matrix_mult(), libMesh::PetscLinearSolver< T >::_petsc_shell_matrix_mult_add(), libMesh::EquationSystems::_read_impl(), libMesh::MeshRefinement::_refine_elements(), libMesh::MeshRefinement::_smooth_flags(), libMesh::ImplicitSystem::add_matrix(), libMesh::System::add_vector(), libMesh::EigenSparseLinearSolver< T >::adjoint_solve(), libMesh::UnstructuredMesh::all_second_order(), libMesh::MeshTools::Modification::all_tri(), libMesh::LaplaceMeshSmoother::allgather_graph(), libMesh::FEMSystem::assemble_qoi(), libMesh::MeshCommunication::assign_global_indices(), libMesh::ParmetisPartitioner::assign_partitioning(), libMesh::DofMap::attach_matrix(), libMesh::Parallel::BinSorter< KeyType, IdxType >::binsort(), libMesh::Parallel::Sort< KeyType, IdxType >::binsort(), libMesh::MeshCommunication::broadcast(), libMesh::SparseMatrix< T >::build(), libMesh::MeshTools::Generation::build_extrusion(), libMesh::Parallel::Histogram< KeyType, IdxType >::build_histogram(), libMesh::PetscNonlinearSolver< T >::build_mat_null_space(), libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::EquationSystems::build_parallel_solution_vector(), libMesh::MeshBase::cache_elem_dims(), libMesh::System::calculate_norm(), libMesh::DofMap::check_dirichlet_bcid_consistency(), libMesh::DistributedVector< T >::clone(), libMesh::EigenSparseVector< T >::clone(), libMesh::LaspackVector< T >::clone(), libMesh::EpetraVector< T >::clone(), libMesh::PetscVector< T >::clone(), libMesh::EpetraVector< T >::close(), libMesh::Parallel::Sort< KeyType, IdxType >::communicate_bins(), libMesh::Nemesis_IO_Helper::compute_num_global_elem_blocks(), libMesh::Nemesis_IO_Helper::compute_num_global_nodesets(), libMesh::Nemesis_IO_Helper::compute_num_global_sidesets(), libMesh::Problem_Interface::computeF(), libMesh::Problem_Interface::computeJacobian(), libMesh::Problem_Interface::computePreconditioner(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::MeshTools::create_bounding_box(), libMesh::MeshTools::create_nodal_bounding_box(), libMesh::MeshRefinement::create_parent_error_vector(), libMesh::MeshTools::create_processor_bounding_box(), libMesh::MeshTools::create_subdomain_bounding_box(), libMesh::MeshCommunication::delete_remote_elements(), libMesh::DofMap::distribute_dofs(), DMlibMeshFunction(), DMlibMeshJacobian(), DMlibMeshSetSystem_libMesh(), DMVariableBounds_libMesh(), libMesh::MeshRefinement::eliminate_unrefined_patches(), libMesh::EpetraVector< T >::EpetraVector(), libMesh::WeightedPatchRecoveryErrorEstimator::estimate_error(), libMesh::PatchRecoveryErrorEstimator::estimate_error(), libMesh::JumpErrorEstimator::estimate_error(), libMesh::AdjointRefinementEstimator::estimate_error(), libMesh::ExactErrorEstimator::estimate_error(), libMesh::MeshRefinement::flag_elements_by_elem_fraction(), libMesh::MeshRefinement::flag_elements_by_error_fraction(), libMesh::MeshRefinement::flag_elements_by_nelem_target(), libMesh::MeshCommunication::gather(), libMesh::MeshCommunication::gather_neighboring_elements(), libMesh::CondensedEigenSystem::get_eigenpair(), libMesh::DofMap::get_info(), libMesh::ImplicitSystem::get_linear_solver(), libMesh::EquationSystems::get_solution(), libMesh::LocationMap< T >::init(), libMesh::PetscDiffSolver::init(), libMesh::TimeSolver::init(), libMesh::TopologyMap::init(), libMesh::TaoOptimizationSolver< T >::init(), libMesh::PetscNonlinearSolver< T >::init(), libMesh::DistributedVector< T >::init(), libMesh::EpetraVector< T >::init(), libMesh::PetscVector< T >::init(), libMesh::SystemSubsetBySubdomain::init(), libMesh::EigenSystem::init_data(), libMesh::EigenSystem::init_matrices(), libMesh::ParmetisPartitioner::initialize(), libMesh::OptimizationSystem::initialize_equality_constraints_storage(), libMesh::OptimizationSystem::initialize_inequality_constraints_storage(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_topology_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_valid_boundary_ids(), libMesh::MeshTools::libmesh_assert_valid_dof_ids(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_flags(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_object_ids(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_p_levels(), libMesh::MeshTools::libmesh_assert_valid_refinement_flags(), libMesh::MeshTools::libmesh_assert_valid_unique_ids(), libMesh::MeshRefinement::limit_level_mismatch_at_edge(), libMesh::MeshRefinement::limit_level_mismatch_at_node(), libMesh::MeshRefinement::limit_overrefined_boundary(), libMesh::MeshRefinement::limit_underrefined_boundary(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshCommunication::make_elems_parallel_consistent(), libMesh::MeshRefinement::make_flags_parallel_consistent(), libMesh::MeshCommunication::make_new_node_proc_ids_parallel_consistent(), libMesh::MeshCommunication::make_new_nodes_parallel_consistent(), libMesh::MeshCommunication::make_node_ids_parallel_consistent(), libMesh::MeshCommunication::make_node_proc_ids_parallel_consistent(), libMesh::MeshCommunication::make_node_unique_ids_parallel_consistent(), libMesh::MeshCommunication::make_nodes_parallel_consistent(), libMesh::MeshCommunication::make_p_levels_parallel_consistent(), libMesh::MeshRefinement::make_refinement_compatible(), libMesh::DistributedVector< T >::max(), libMesh::FEMSystem::mesh_position_set(), libMesh::MeshSerializer::MeshSerializer(), libMesh::DistributedVector< T >::min(), libMesh::DistributedMesh::n_active_elem(), libMesh::MeshTools::n_active_levels(), libMesh::BoundaryInfo::n_boundary_conds(), libMesh::BoundaryInfo::n_edge_conds(), libMesh::CondensedEigenSystem::n_global_non_condensed_dofs(), libMesh::MeshTools::n_levels(), libMesh::BoundaryInfo::n_nodeset_conds(), libMesh::MeshTools::n_p_levels(), libMesh::BoundaryInfo::n_shellface_conds(), libMesh::DistributedMesh::parallel_max_elem_id(), libMesh::DistributedMesh::parallel_max_node_id(), libMesh::ReplicatedMesh::parallel_max_unique_id(), libMesh::DistributedMesh::parallel_max_unique_id(), libMesh::DistributedMesh::parallel_n_elem(), libMesh::DistributedMesh::parallel_n_nodes(), libMesh::SparsityPattern::Build::parallel_sync(), libMesh::MeshTools::paranoid_n_levels(), libMesh::Partitioner::partition(), libMesh::MetisPartitioner::partition_range(), libMesh::Partitioner::partition_unpartitioned_elements(), libMesh::petsc_auto_fieldsplit(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::MeshBase::prepare_for_use(), libMesh::SparseMatrix< T >::print(), libMesh::Nemesis_IO::read(), libMesh::CheckpointIO::read(), read(), read_serialized_bc_names(), read_serialized_bcs_helper(), read_serialized_connectivity(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_subdomain_names(), libMesh::MeshBase::recalculate_n_partitions(), libMesh::MeshCommunication::redistribute(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::MeshCommunication::send_coarse_ghosts(), libMesh::Partitioner::set_node_processor_ids(), libMesh::DofMap::set_nonlocal_dof_objects(), libMesh::Partitioner::set_parent_processor_ids(), libMesh::LaplaceMeshSmoother::smooth(), libMesh::Parallel::Sort< KeyType, IdxType >::sort(), libMesh::MeshBase::subdomain_ids(), libMesh::BoundaryInfo::sync(), libMesh::Parallel::sync_element_data_by_parent_id(), libMesh::Parallel::sync_node_data_by_element_id(), libMesh::MeshRefinement::test_level_one(), libMesh::MeshRefinement::test_unflagged(), libMesh::MeshTools::total_weight(), libMesh::MeshRefinement::uniformly_coarsen(), libMesh::NameBasedIO::write(), write(), write_serialized_bcs_helper(), write_serialized_connectivity(), write_serialized_nodes(), write_serialized_nodesets(), libMesh::DistributedVector< T >::zero_clone(), libMesh::LaspackVector< T >::zero_clone(), libMesh::EigenSparseVector< T >::zero_clone(), libMesh::EpetraVector< T >::zero_clone(), and libMesh::PetscVector< T >::zero_clone().

88  { return _communicator; }
const Parallel::Communicator & _communicator
bool libMesh::XdrIO::legacy ( ) const
inline

Get/Set the flag indicating if we should read/write legacy.

Definition at line 106 of file xdr_io.h.

References _legacy.

Referenced by libMesh::NameBasedIO::read(), read(), and write().

106 { return _legacy; }
bool _legacy
Definition: xdr_io.h:306
bool& libMesh::XdrIO::legacy ( )
inline

Definition at line 107 of file xdr_io.h.

References _legacy, set_auto_parallel(), set_write_parallel(), and write_parallel().

107 { return _legacy; }
bool _legacy
Definition: xdr_io.h:306
MeshBase & libMesh::MeshInput< MeshBase >::mesh ( )
protectedinherited
Returns
The object as a writable reference.

Referenced by libMesh::GMVIO::_read_one_cell(), libMesh::TetGenIO::element_in(), libMesh::UNVIO::elements_in(), libMesh::UNVIO::elements_out(), libMesh::UNVIO::groups_in(), libMesh::TetGenIO::node_in(), libMesh::UNVIO::nodes_in(), libMesh::UNVIO::nodes_out(), libMesh::GMVIO::read(), libMesh::ExodusII_IO::read(), libMesh::CheckpointIO::read(), read(), libMesh::CheckpointIO::read_bcs(), libMesh::CheckpointIO::read_connectivity(), libMesh::UCDIO::read_implementation(), libMesh::UNVIO::read_implementation(), libMesh::GmshIO::read_mesh(), libMesh::CheckpointIO::read_nodes(), libMesh::CheckpointIO::read_nodesets(), libMesh::CheckpointIO::read_remote_elem(), read_serialized_bcs_helper(), read_serialized_connectivity(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_subdomain_names(), libMesh::OFFIO::read_stream(), libMesh::MatlabIO::read_stream(), libMesh::CheckpointIO::read_subdomain_names(), libMesh::UCDIO::UCDIO(), libMesh::VTKIO::VTKIO(), libMesh::TetGenIO::write(), libMesh::ExodusII_IO::write(), libMesh::CheckpointIO::write(), write(), libMesh::GMVIO::write_ascii_new_impl(), libMesh::GMVIO::write_ascii_old_impl(), libMesh::CheckpointIO::write_bcs(), libMesh::GMVIO::write_binary(), libMesh::GMVIO::write_discontinuous_gmv(), libMesh::ExodusII_IO::write_element_data(), libMesh::UCDIO::write_implementation(), libMesh::GmshIO::write_mesh(), libMesh::UCDIO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data_common(), libMesh::ExodusII_IO::write_nodal_data_discontinuous(), libMesh::CheckpointIO::write_nodesets(), write_parallel(), libMesh::GmshIO::write_post(), write_serialized_bcs_helper(), write_serialized_connectivity(), write_serialized_nodes(), write_serialized_nodesets(), write_serialized_subdomain_names(), and libMesh::CheckpointIO::write_subdomain_names().

processor_id_type libMesh::ParallelObject::n_processors ( ) const
inlineinherited
Returns
The number of processors in the group.

Definition at line 93 of file parallel_object.h.

References libMesh::ParallelObject::_communicator, and libMesh::Parallel::Communicator::size().

Referenced by libMesh::ParmetisPartitioner::_do_repartition(), libMesh::BoundaryInfo::_find_id_maps(), libMesh::DistributedMesh::add_elem(), libMesh::DistributedMesh::add_node(), libMesh::LaplaceMeshSmoother::allgather_graph(), libMesh::FEMSystem::assembly(), libMesh::ParmetisPartitioner::assign_partitioning(), libMesh::AztecLinearSolver< T >::AztecLinearSolver(), libMesh::MeshCommunication::broadcast(), libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::DistributedMesh::clear(), libMesh::Nemesis_IO_Helper::compute_border_node_ids(), libMesh::Nemesis_IO_Helper::construct_nemesis_filename(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::UnstructuredMesh::create_pid_mesh(), libMesh::MeshTools::create_processor_bounding_box(), libMesh::DofMap::distribute_dofs(), libMesh::DofMap::distribute_local_dofs_node_major(), libMesh::DofMap::distribute_local_dofs_var_major(), libMesh::DistributedMesh::DistributedMesh(), libMesh::EnsightIO::EnsightIO(), libMesh::MeshCommunication::gather(), libMesh::MeshCommunication::gather_neighboring_elements(), libMesh::MeshBase::get_info(), libMesh::EquationSystems::get_solution(), libMesh::DistributedVector< T >::init(), libMesh::SystemSubsetBySubdomain::init(), libMesh::ParmetisPartitioner::initialize(), libMesh::Nemesis_IO_Helper::initialize(), libMesh::DistributedMesh::insert_elem(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_topology_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_valid_boundary_ids(), libMesh::MeshTools::libmesh_assert_valid_dof_ids(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::MeshTools::libmesh_assert_valid_refinement_flags(), libMesh::DofMap::local_variable_indices(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshBase::n_active_elem_on_proc(), libMesh::MeshBase::n_elem_on_proc(), libMesh::MeshBase::n_nodes_on_proc(), libMesh::SparsityPattern::Build::parallel_sync(), libMesh::Partitioner::partition(), libMesh::MeshBase::partition(), libMesh::Partitioner::partition_unpartitioned_elements(), libMesh::PetscLinearSolver< T >::PetscLinearSolver(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::SparseMatrix< T >::print(), libMesh::NameBasedIO::read(), libMesh::Nemesis_IO::read(), libMesh::CheckpointIO::read(), read(), libMesh::CheckpointIO::read_connectivity(), libMesh::CheckpointIO::read_nodes(), libMesh::MeshCommunication::redistribute(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::Partitioner::repartition(), libMesh::MeshCommunication::send_coarse_ghosts(), libMesh::Partitioner::set_node_processor_ids(), libMesh::DofMap::set_nonlocal_dof_objects(), libMesh::Parallel::Sort< KeyType, IdxType >::sort(), libMesh::MeshRefinement::uniformly_coarsen(), libMesh::DistributedMesh::update_parallel_id_counts(), libMesh::GMVIO::write_binary(), libMesh::GMVIO::write_discontinuous_gmv(), write_serialized_bcs_helper(), write_serialized_connectivity(), write_serialized_nodes(), and write_serialized_nodesets().

94  { return cast_int<processor_id_type>(_communicator.size()); }
unsigned int size() const
Definition: parallel.h:722
const Parallel::Communicator & _communicator
void libMesh::XdrIO::pack_element ( std::vector< xdr_id_type > &  conn,
const Elem elem,
const dof_id_type  parent_id = DofObject::invalid_id,
const dof_id_type  parent_pid = DofObject::invalid_id 
) const
private

Pack an element into a transfer buffer for parallel communication.

Definition at line 2085 of file xdr_io.C.

References libMesh::DofObject::invalid_id, libMesh::libmesh_assert(), libMesh::Elem::n_nodes(), libMesh::Elem::node_id(), libMesh::Elem::p_level(), libMesh::DofObject::processor_id(), libMesh::Elem::subdomain_id(), libMesh::Elem::type(), libMesh::Elem::type_to_n_nodes_map, and libMesh::DofObject::unique_id().

Referenced by polynomial_level_file_name(), and write_serialized_connectivity().

2087 {
2088  libmesh_assert(elem);
2089  libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
2090 
2091  conn.push_back(elem->n_nodes());
2092 
2093  conn.push_back (elem->type());
2094 
2095  // In version 0.7.0+ "id" is stored but it not used. In version 0.9.2+
2096  // we will store unique_id instead, therefore there is no need to
2097  // check for the older version when writing the unique_id.
2098  conn.push_back (elem->unique_id());
2099 
2100  if (parent_id != DofObject::invalid_id)
2101  {
2102  conn.push_back (parent_id);
2103  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
2104  conn.push_back (parent_pid);
2105  }
2106 
2107  conn.push_back (elem->processor_id());
2108  conn.push_back (elem->subdomain_id());
2109 
2110 #ifdef LIBMESH_ENABLE_AMR
2111  conn.push_back (elem->p_level());
2112 #endif
2113 
2114  for (unsigned int n=0; n<elem->n_nodes(); n++)
2115  conn.push_back (elem->node_id(n));
2116 }
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
Definition: elem.h:521
libmesh_assert(j)
static const dof_id_type invalid_id
Definition: dof_object.h:334
const std::string& libMesh::XdrIO::partition_map_file_name ( ) const
inline

Get/Set the partitioning file name.

Definition at line 149 of file xdr_io.h.

References _partition_map_file.

Referenced by read(), read_serialized_connectivity(), write(), and write_serialized_connectivity().

149 { return _partition_map_file; }
std::string _partition_map_file
Definition: xdr_io.h:313
std::string& libMesh::XdrIO::partition_map_file_name ( )
inline

Definition at line 150 of file xdr_io.h.

References _partition_map_file.

150 { return _partition_map_file; }
std::string _partition_map_file
Definition: xdr_io.h:313
const std::string& libMesh::XdrIO::polynomial_level_file_name ( ) const
inline

Get/Set the polynomial degree file name.

Definition at line 161 of file xdr_io.h.

References _p_level_file.

Referenced by read(), read_serialized_connectivity(), write(), and write_serialized_connectivity().

161 { return _p_level_file; }
std::string _p_level_file
Definition: xdr_io.h:315
processor_id_type libMesh::ParallelObject::processor_id ( ) const
inlineinherited
Returns
The rank of this processor in the group.

Definition at line 99 of file parallel_object.h.

References libMesh::ParallelObject::_communicator, and libMesh::Parallel::Communicator::rank().

Referenced by libMesh::BoundaryInfo::_find_id_maps(), libMesh::EquationSystems::_read_impl(), libMesh::DistributedMesh::add_elem(), libMesh::BoundaryInfo::add_elements(), libMesh::DofMap::add_neighbors_to_send_list(), libMesh::DistributedMesh::add_node(), libMesh::MeshRefinement::add_node(), libMesh::MeshTools::Modification::all_tri(), libMesh::FEMSystem::assembly(), libMesh::ParmetisPartitioner::assign_partitioning(), libMesh::MeshCommunication::broadcast(), libMesh::EquationSystems::build_discontinuous_solution_vector(), libMesh::Nemesis_IO_Helper::build_element_and_node_maps(), libMesh::ParmetisPartitioner::build_graph(), libMesh::InfElemBuilder::build_inf_elem(), libMesh::BoundaryInfo::build_node_list_from_side_list(), libMesh::DofMap::build_sparsity(), libMesh::DistributedMesh::clear(), libMesh::ExodusII_IO_Helper::close(), libMesh::Nemesis_IO_Helper::compute_border_node_ids(), libMesh::Nemesis_IO_Helper::compute_communication_map_parameters(), libMesh::Nemesis_IO_Helper::compute_internal_and_border_elems_and_internal_nodes(), libMesh::Nemesis_IO_Helper::compute_node_communication_maps(), libMesh::Nemesis_IO_Helper::compute_num_global_elem_blocks(), libMesh::Nemesis_IO_Helper::compute_num_global_nodesets(), libMesh::Nemesis_IO_Helper::compute_num_global_sidesets(), libMesh::Nemesis_IO_Helper::construct_nemesis_filename(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::ExodusII_IO_Helper::create(), libMesh::DistributedMesh::delete_elem(), libMesh::DistributedMesh::delete_node(), libMesh::MeshCommunication::delete_remote_elements(), libMesh::DofMap::distribute_dofs(), libMesh::DofMap::distribute_local_dofs_node_major(), libMesh::DofMap::distribute_local_dofs_var_major(), libMesh::DistributedMesh::DistributedMesh(), libMesh::EnsightIO::EnsightIO(), libMesh::MeshFunction::find_element(), libMesh::MeshFunction::find_elements(), libMesh::UnstructuredMesh::find_neighbors(), libMesh::MeshCommunication::gather(), libMesh::MeshCommunication::gather_neighboring_elements(), libMesh::Nemesis_IO_Helper::get_cmap_params(), libMesh::Nemesis_IO_Helper::get_eb_info_global(), libMesh::Nemesis_IO_Helper::get_elem_cmap(), libMesh::Nemesis_IO_Helper::get_elem_map(), libMesh::MeshBase::get_info(), libMesh::DofMap::get_info(), libMesh::Nemesis_IO_Helper::get_init_global(), libMesh::Nemesis_IO_Helper::get_init_info(), libMesh::Nemesis_IO_Helper::get_loadbal_param(), libMesh::Nemesis_IO_Helper::get_node_cmap(), libMesh::Nemesis_IO_Helper::get_node_map(), libMesh::Nemesis_IO_Helper::get_ns_param_global(), libMesh::EquationSystems::get_solution(), libMesh::Nemesis_IO_Helper::get_ss_param_global(), libMesh::SparsityPattern::Build::handle_vi_vj(), libMesh::DistributedVector< T >::init(), libMesh::SystemSubsetBySubdomain::init(), libMesh::ParmetisPartitioner::initialize(), libMesh::ExodusII_IO_Helper::initialize(), libMesh::ExodusII_IO_Helper::initialize_element_variables(), libMesh::ExodusII_IO_Helper::initialize_global_variables(), libMesh::ExodusII_IO_Helper::initialize_nodal_variables(), libMesh::DistributedMesh::insert_elem(), libMesh::DofMap::is_evaluable(), libMesh::SparsityPattern::Build::join(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Elem >(), libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >(), libMesh::MeshTools::libmesh_assert_valid_neighbors(), libMesh::DistributedMesh::libmesh_assert_valid_parallel_object_ids(), libMesh::DofMap::local_variable_indices(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshBase::n_active_local_elem(), libMesh::BoundaryInfo::n_boundary_conds(), libMesh::BoundaryInfo::n_edge_conds(), libMesh::System::n_local_dofs(), libMesh::MeshBase::n_local_elem(), libMesh::MeshBase::n_local_nodes(), libMesh::BoundaryInfo::n_nodeset_conds(), libMesh::BoundaryInfo::n_shellface_conds(), libMesh::WeightedPatchRecoveryErrorEstimator::EstimateError::operator()(), libMesh::SparsityPattern::Build::operator()(), libMesh::PatchRecoveryErrorEstimator::EstimateError::operator()(), libMesh::SparsityPattern::Build::parallel_sync(), libMesh::MetisPartitioner::partition_range(), libMesh::System::point_gradient(), libMesh::System::point_hessian(), libMesh::System::point_value(), libMesh::SparseMatrix< T >::print(), libMesh::NumericVector< T >::print_global(), libMesh::Nemesis_IO_Helper::put_cmap_params(), libMesh::Nemesis_IO_Helper::put_elem_cmap(), libMesh::Nemesis_IO_Helper::put_elem_map(), libMesh::Nemesis_IO_Helper::put_loadbal_param(), libMesh::Nemesis_IO_Helper::put_node_cmap(), libMesh::Nemesis_IO_Helper::put_node_map(), libMesh::NameBasedIO::read(), libMesh::Nemesis_IO::read(), libMesh::CheckpointIO::read(), read(), libMesh::ExodusII_IO_Helper::read_elem_num_map(), libMesh::ExodusII_IO_Helper::read_node_num_map(), read_serialized_bc_names(), read_serialized_bcs_helper(), read_serialized_connectivity(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_subdomain_names(), libMesh::MeshCommunication::redistribute(), libMesh::DistributedMesh::renumber_dof_objects(), libMesh::MeshCommunication::send_coarse_ghosts(), libMesh::Partitioner::set_node_processor_ids(), libMesh::DofMap::set_nonlocal_dof_objects(), libMesh::LaplaceMeshSmoother::smooth(), libMesh::MeshTools::total_weight(), libMesh::MeshRefinement::uniformly_coarsen(), libMesh::Parallel::Packing< Node * >::unpack(), libMesh::Parallel::Packing< Elem * >::unpack(), libMesh::DistributedMesh::update_parallel_id_counts(), libMesh::MeshTools::weight(), libMesh::NameBasedIO::write(), libMesh::CheckpointIO::write(), write(), libMesh::EquationSystems::write(), libMesh::GMVIO::write_discontinuous_gmv(), libMesh::ExodusII_IO::write_element_data(), libMesh::ExodusII_IO_Helper::write_element_values(), libMesh::ExodusII_IO_Helper::write_elements(), libMesh::ExodusII_IO::write_global_data(), libMesh::ExodusII_IO_Helper::write_global_values(), libMesh::ExodusII_IO::write_information_records(), libMesh::ExodusII_IO_Helper::write_information_records(), libMesh::ExodusII_IO_Helper::write_nodal_coordinates(), libMesh::UCDIO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data(), libMesh::ExodusII_IO::write_nodal_data_discontinuous(), libMesh::ExodusII_IO_Helper::write_nodal_values(), libMesh::Nemesis_IO_Helper::write_nodesets(), libMesh::ExodusII_IO_Helper::write_nodesets(), write_serialized_bc_names(), write_serialized_bcs_helper(), write_serialized_connectivity(), write_serialized_nodes(), write_serialized_nodesets(), write_serialized_subdomain_names(), libMesh::Nemesis_IO_Helper::write_sidesets(), libMesh::ExodusII_IO_Helper::write_sidesets(), libMesh::ExodusII_IO::write_timestep(), and libMesh::ExodusII_IO_Helper::write_timestep().

100  { return cast_int<processor_id_type>(_communicator.rank()); }
const Parallel::Communicator & _communicator
unsigned int rank() const
Definition: parallel.h:720
void libMesh::XdrIO::read ( const std::string &  name)
virtual

This method implements reading a mesh from a specified file.

We are future proofing the layout of this file by adding in size information for all stored types. TODO: All types are stored as the same size. Use the size information to pack things efficiently. For now we will assume that "type size" is how the entire file will be encoded.

Implements libMesh::MeshInput< MeshBase >.

Definition at line 1311 of file xdr_io.C.

References _field_width, binary(), boundary_condition_file_name(), libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::DECODE, legacy(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshInput< MT >::mesh(), libMesh::MeshTools::n_elem(), n_nodes, libMesh::ParallelObject::n_processors(), partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), libMesh::READ, read_serialized_connectivity(), read_serialized_edge_bcs(), read_serialized_nodes(), read_serialized_nodesets(), read_serialized_shellface_bcs(), read_serialized_side_bcs(), read_serialized_subdomain_names(), libMesh::MeshBase::reserve_elem(), libMesh::MeshBase::reserve_nodes(), libMesh::MeshInput< MeshBase >::set_n_partitions(), libMesh::Partitioner::set_node_processor_ids(), subdomain_map_file_name(), version(), version_at_least_0_9_2(), and version_at_least_1_1_0().

Referenced by libMesh::NameBasedIO::read().

1312 {
1313  // Only open the file on processor 0 -- this is especially important because
1314  // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1315  // calls will produce a race condition.
1316  Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1317 
1318  // convenient reference to our mesh
1319  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1320 
1321  // get the version string.
1322  if (this->processor_id() == 0)
1323  io.data (this->version());
1324  this->comm().broadcast (this->version());
1325 
1326  // note that for "legacy" files the first entry is an
1327  // integer -- not a string at all.
1328  this->legacy() = !(this->version().find("libMesh") < this->version().size());
1329 
1330  // Check for a legacy version format.
1331  if (this->legacy())
1332  libmesh_error_msg("We no longer support reading files in the legacy format.");
1333 
1334  START_LOG("read()","XdrIO");
1335 
1336  std::vector<header_id_type> meta_data(10, sizeof(xdr_id_type));
1337  // type_size, uid_size, pid_size, sid_size, p_level_size, eid_size, side_size, bid_size;
1338  header_id_type pos=0;
1339 
1340  if (this->processor_id() == 0)
1341  {
1342  io.data (meta_data[pos++]);
1343  io.data (meta_data[pos++]);
1344  io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1345  io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1346  io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1347  io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1348 
1349  if (version_at_least_0_9_2())
1350  {
1351  io.data (meta_data[pos++], "# type size");
1352  io.data (meta_data[pos++], "# uid size");
1353  io.data (meta_data[pos++], "# pid size");
1354  io.data (meta_data[pos++], "# sid size");
1355  io.data (meta_data[pos++], "# p-level size");
1356  // Boundary Condition sizes
1357  io.data (meta_data[pos++], "# eid size"); // elem id
1358  io.data (meta_data[pos++], "# side size"); // side number
1359  io.data (meta_data[pos++], "# bid size"); // boundary id
1360  }
1361  }
1362 
1363  // broadcast the n_elems, n_nodes, and size information
1364  this->comm().broadcast (meta_data);
1365 
1366  this->comm().broadcast (this->boundary_condition_file_name());
1367  this->comm().broadcast (this->subdomain_map_file_name());
1368  this->comm().broadcast (this->partition_map_file_name());
1369  this->comm().broadcast (this->polynomial_level_file_name());
1370 
1371  // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1372  // this may allow for efficient adding of nodes/elements.
1373  header_id_type n_elem = meta_data[0];
1374  header_id_type n_nodes = meta_data[1];
1375 
1376  mesh.reserve_elem(n_elem);
1377  mesh.reserve_nodes(n_nodes);
1378 
1379  // Our mesh is pre-partitioned as it's created
1380  this->set_n_partitions(this->n_processors());
1381 
1387  if (version_at_least_0_9_2())
1388  _field_width = meta_data[2];
1389 
1390  if (_field_width == 4)
1391  {
1392  uint32_t type_size = 0;
1393 
1394  // read subdomain names
1396 
1397  // read connectivity
1398  this->read_serialized_connectivity (io, n_elem, meta_data, type_size);
1399 
1400  // read the nodal locations
1401  this->read_serialized_nodes (io, n_nodes);
1402 
1403  // read the side boundary conditions
1404  this->read_serialized_side_bcs (io, type_size);
1405 
1406  if (version_at_least_0_9_2())
1407  // read the nodesets
1408  this->read_serialized_nodesets (io, type_size);
1409 
1410  if (version_at_least_1_1_0())
1411  {
1412  // read the edge boundary conditions
1413  this->read_serialized_edge_bcs (io, type_size);
1414 
1415  // read the "shell face" boundary conditions
1416  this->read_serialized_shellface_bcs (io, type_size);
1417  }
1418  }
1419  else if (_field_width == 8)
1420  {
1421  uint64_t type_size = 0;
1422 
1423  // read subdomain names
1425 
1426  // read connectivity
1427  this->read_serialized_connectivity (io, n_elem, meta_data, type_size);
1428 
1429  // read the nodal locations
1430  this->read_serialized_nodes (io, n_nodes);
1431 
1432  // read the boundary conditions
1433  this->read_serialized_side_bcs (io, type_size);
1434 
1435  if (version_at_least_0_9_2())
1436  // read the nodesets
1437  this->read_serialized_nodesets (io, type_size);
1438 
1439  if (version_at_least_1_1_0())
1440  {
1441  // read the edge boundary conditions
1442  this->read_serialized_edge_bcs (io, type_size);
1443 
1444  // read the "shell face" boundary conditions
1445  this->read_serialized_shellface_bcs (io, type_size);
1446  }
1447  }
1448 
1449 
1450  STOP_LOG("read()","XdrIO");
1451 
1452  // set the node processor ids
1454 }
std::string name(const ElemQuality q)
Definition: elem_quality.C:39
void read_serialized_shellface_bcs(Xdr &io, T)
Definition: xdr_io.C:1955
bool binary() const
Definition: xdr_io.h:100
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:161
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:676
void read_serialized_subdomain_names(Xdr &io)
Definition: xdr_io.C:1458
static void set_node_processor_ids(MeshBase &mesh)
Definition: partitioner.C:431
processor_id_type n_processors() const
bool version_at_least_1_1_0() const
Definition: xdr_io.C:2133
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2118
largest_id_type xdr_id_type
Definition: xdr_io.h:57
void read_serialized_connectivity(Xdr &io, const dof_id_type n_elem, std::vector< header_id_type > &sizes, T)
Definition: xdr_io.C:1504
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:143
const dof_id_type n_nodes
Definition: tecplot_io.C:67
void read_serialized_nodesets(Xdr &io, T)
Definition: xdr_io.C:1963
uint32_t header_id_type
Definition: xdr_io.h:60
void broadcast(T &data, const unsigned int root_id=0) const
void read_serialized_nodes(Xdr &io, const dof_id_type n_nodes)
Definition: xdr_io.C:1689
void read_serialized_edge_bcs(Xdr &io, T)
Definition: xdr_io.C:1947
const Parallel::Communicator & comm() const
void set_n_partitions(unsigned int n_parts)
Definition: mesh_input.h:91
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:155
const std::string & partition_map_file_name() const
Definition: xdr_io.h:149
bool legacy() const
Definition: xdr_io.h:106
const std::string & version() const
Definition: xdr_io.h:137
header_id_type _field_width
Definition: xdr_io.h:310
void read_serialized_side_bcs(Xdr &io, T)
Definition: xdr_io.C:1939
processor_id_type processor_id() const
void libMesh::XdrIO::read_serialized_bc_names ( Xdr io,
BoundaryInfo info,
bool  is_sideset 
)
private

Read boundary names information (sideset and nodeset) - NEW in 0.9.2 format

Definition at line 2040 of file xdr_io.C.

References libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::ParallelObject::processor_id(), libMesh::BoundaryInfo::set_nodeset_name_map(), libMesh::BoundaryInfo::set_sideset_name_map(), and version_at_least_0_9_2().

Referenced by polynomial_level_file_name(), read_serialized_bcs_helper(), and read_serialized_nodesets().

2041 {
2042  const bool read_entity_info = version_at_least_0_9_2();
2043  if (read_entity_info)
2044  {
2045  header_id_type n_boundary_names = 0;
2046  std::vector<header_id_type> boundary_ids;
2047  std::vector<std::string> boundary_names;
2048 
2049  // Read the sideset names
2050  if (this->processor_id() == 0)
2051  {
2052  io.data(n_boundary_names);
2053 
2054  boundary_ids.resize(n_boundary_names);
2055  boundary_names.resize(n_boundary_names);
2056 
2057  if (n_boundary_names)
2058  {
2059  io.data(boundary_ids);
2060  io.data(boundary_names);
2061  }
2062  }
2063 
2064  // Broadcast the boundary names to all processors
2065  this->comm().broadcast(n_boundary_names);
2066  if (n_boundary_names == 0)
2067  return;
2068 
2069  boundary_ids.resize(n_boundary_names);
2070  boundary_names.resize(n_boundary_names);
2071  this->comm().broadcast(boundary_ids);
2072  this->comm().broadcast(boundary_names);
2073 
2074  // Reassemble the named boundary information
2075  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
2076  info.set_sideset_name_map() : info.set_nodeset_name_map();
2077 
2078  for (unsigned int i=0; i<n_boundary_names; ++i)
2079  boundary_map.insert(std::make_pair(boundary_ids[i], boundary_names[i]));
2080  }
2081 }
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2118
uint32_t header_id_type
Definition: xdr_io.h:60
void broadcast(T &data, const unsigned int root_id=0) const
const Parallel::Communicator & comm() const
processor_id_type processor_id() const
template<typename T >
void libMesh::XdrIO::read_serialized_bcs_helper ( Xdr io,
,
const std::string  bc_type 
)
private

Helper function used in read_serialized_side_bcs, read_serialized_edge_bcs, and read_serialized_shellface_bcs.

Definition at line 1841 of file xdr_io.C.

References libMesh::BoundaryInfo::add_edge(), libMesh::BoundaryInfo::add_shellface(), libMesh::BoundaryInfo::add_side(), boundary_condition_file_name(), libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), end, libMesh::MeshTools::Generation::Private::idx(), io_blksize, libMesh::libmesh_assert(), libmesh_nullptr, libMesh::MeshInput< MT >::mesh(), libMesh::MeshInput< MeshBase >::mesh(), std::min(), libMesh::ParallelObject::processor_id(), read_serialized_bc_names(), and libMesh::Xdr::reading().

Referenced by polynomial_level_file_name(), read_serialized_edge_bcs(), read_serialized_shellface_bcs(), and read_serialized_side_bcs().

1842 {
1843  if (this->boundary_condition_file_name() == "n/a") return;
1844 
1845  libmesh_assert (io.reading());
1846 
1847  // convenient reference to our mesh
1848  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1849 
1850  // and our boundary info object
1851  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1852 
1853  // Version 0.9.2+ introduces unique ids
1854  read_serialized_bc_names(io, boundary_info, true); // sideset names
1855 
1856  std::vector<DofBCData> dof_bc_data;
1857  std::vector<T> input_buffer;
1858 
1859  header_id_type n_bcs=0;
1860  if (this->processor_id() == 0)
1861  io.data (n_bcs);
1862  this->comm().broadcast (n_bcs);
1863 
1864  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
1865  {
1866  first_bc = blk*io_blksize;
1867  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
1868 
1869  input_buffer.resize (3*(last_bc - first_bc));
1870 
1871  if (this->processor_id() == 0)
1872  io.data_stream (input_buffer.empty() ? libmesh_nullptr : &input_buffer[0],
1873  cast_int<unsigned int>(input_buffer.size()));
1874 
1875  this->comm().broadcast (input_buffer);
1876  dof_bc_data.clear();
1877  dof_bc_data.reserve (input_buffer.size()/3);
1878 
1879  // convert the input_buffer to DofBCData to facilitate searching
1880  for (std::size_t idx=0; idx<input_buffer.size(); idx+=3)
1881  dof_bc_data.push_back
1882  (DofBCData(cast_int<dof_id_type>(input_buffer[idx+0]),
1883  cast_int<unsigned short>(input_buffer[idx+1]),
1884  cast_int<boundary_id_type>(input_buffer[idx+2])));
1885  input_buffer.clear();
1886  // note that while the files *we* write should already be sorted by
1887  // element id this is not necessarily guaranteed.
1888  std::sort (dof_bc_data.begin(), dof_bc_data.end());
1889 
1890  MeshBase::const_element_iterator
1891  it = mesh.level_elements_begin(0),
1892  end = mesh.level_elements_end(0);
1893 
1894  // Look for BCs in this block for all the level-0 elements we have
1895  // (not just local ones). Do this by finding all the entries
1896  // in dof_bc_data whose elem_id match the ID of the current element.
1897  // We cannot rely on libmesh_nullptr neighbors at this point since the neighbor
1898  // data structure has not been initialized.
1899  for (std::pair<std::vector<DofBCData>::iterator,
1900  std::vector<DofBCData>::iterator> pos; it!=end; ++it)
1901 #if defined(__SUNPRO_CC) || defined(__PGI)
1902  for (pos = std::equal_range (dof_bc_data.begin(), dof_bc_data.end(), (*it)->id(), CompareIntDofBCData());
1903  pos.first != pos.second; ++pos.first)
1904 #else
1905  for (pos = std::equal_range (dof_bc_data.begin(), dof_bc_data.end(), (*it)->id());
1906  pos.first != pos.second; ++pos.first)
1907 #endif
1908  {
1909  libmesh_assert_equal_to (pos.first->dof_id, (*it)->id());
1910 
1911  if (bc_type == "side")
1912  {
1913  libmesh_assert_less (pos.first->side, (*it)->n_sides());
1914  boundary_info.add_side (*it, pos.first->side, pos.first->bc_id);
1915  }
1916  else if (bc_type == "edge")
1917  {
1918  libmesh_assert_less (pos.first->side, (*it)->n_edges());
1919  boundary_info.add_edge (*it, pos.first->side, pos.first->bc_id);
1920  }
1921  else if (bc_type == "shellface")
1922  {
1923  // Shell face IDs can only be 0 or 1.
1924  libmesh_assert_less(pos.first->side, 2);
1925 
1926  boundary_info.add_shellface (*it, pos.first->side, pos.first->bc_id);
1927  }
1928  else
1929  {
1930  libmesh_error_msg("bc_type not recognized: " + bc_type);
1931  }
1932  }
1933  }
1934 }
const class libmesh_nullptr_t libmesh_nullptr
IterBase * end
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Definition: xdr_io.C:2040
libmesh_assert(j)
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:143
uint32_t header_id_type
Definition: xdr_io.h:60
void broadcast(T &data, const unsigned int root_id=0) const
static const std::size_t io_blksize
Definition: xdr_io.h:320
const Parallel::Communicator & comm() const
long double min(long double a, double b)
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
processor_id_type processor_id() const
template<typename T >
void libMesh::XdrIO::read_serialized_connectivity ( Xdr io,
const dof_id_type  n_elem,
std::vector< header_id_type > &  sizes,
 
)
private

Read the connectivity for a parallel, distributed mesh

Definition at line 1504 of file xdr_io.C.

References libMesh::Elem::add_child(), libMesh::MeshBase::add_elem(), libMesh::MeshBase::add_point(), libMesh::Parallel::Communicator::broadcast(), libMesh::Elem::build(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), libMesh::Elem::dim(), libMesh::MeshBase::elem_ptr(), libMesh::MeshInput< MeshBase >::elems_of_dimension, libMesh::Elem::hack_p_level(), libMesh::Elem::INACTIVE, libMesh::DofObject::invalid_id, io_blksize, libMesh::Elem::JUST_REFINED, libMesh::libmesh_assert(), libmesh_nullptr, libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshInput< MT >::mesh(), libMesh::MeshBase::mesh_dimension(), std::min(), libMesh::MeshTools::n_elem(), libMesh::Elem::n_nodes(), partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), libMesh::DofObject::processor_id(), libMesh::Xdr::reading(), libMesh::DofObject::set_id(), libMesh::MeshBase::set_mesh_dimension(), libMesh::Elem::set_node(), libMesh::Elem::set_refinement_flag(), libMesh::DofObject::set_unique_id(), libMesh::Elem::subdomain_id(), subdomain_map_file_name(), libMesh::Elem::type_to_n_nodes_map, and version_at_least_0_9_2().

Referenced by polynomial_level_file_name(), and read().

1505 {
1506  libmesh_assert (io.reading());
1507 
1508  if (!n_elem) return;
1509 
1510  const bool
1511  read_p_level = ("." == this->polynomial_level_file_name()),
1512  read_partitioning = ("." == this->partition_map_file_name()),
1513  read_subdomain_id = ("." == this->subdomain_map_file_name());
1514 
1515  // convenient reference to our mesh
1516  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1517 
1518  // Keep track of what kinds of elements this file contains
1519  elems_of_dimension.clear();
1520  elems_of_dimension.resize(4, false);
1521 
1522  std::vector<T> conn, input_buffer(100 /* oversized ! */);
1523 
1524  int level=-1;
1525 
1526  // Version 0.9.2+ introduces unique ids
1527  const size_t unique_id_size_index = 3;
1528 
1529  const bool read_unique_id =
1530  (version_at_least_0_9_2()) &&
1531  sizes[unique_id_size_index];
1532 
1533  T n_elem_at_level=0, n_processed_at_level=0;
1534  for (dof_id_type blk=0, first_elem=0, last_elem=0;
1535  last_elem<n_elem; blk++)
1536  {
1537  first_elem = cast_int<dof_id_type>(blk*io_blksize);
1538  last_elem = cast_int<dof_id_type>(std::min(cast_int<std::size_t>((blk+1)*io_blksize),
1539  cast_int<std::size_t>(n_elem)));
1540 
1541  conn.clear();
1542 
1543  if (this->processor_id() == 0)
1544  for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1545  {
1546  if (n_processed_at_level == n_elem_at_level)
1547  {
1548  // get the number of elements to read at this level
1549  io.data (n_elem_at_level);
1550  n_processed_at_level = 0;
1551  level++;
1552  }
1553 
1554  unsigned int pos = 0;
1555  // get the element type,
1556  io.data_stream (&input_buffer[pos++], 1);
1557 
1558  if (read_unique_id)
1559  io.data_stream (&input_buffer[pos++], 1);
1560  // Older versions won't have this field at all (no increment on pos)
1561 
1562  // maybe the parent
1563  if (level)
1564  io.data_stream (&input_buffer[pos++], 1);
1565  else
1566  // We can't always fit DofObject::invalid_id in an
1567  // xdr_id_type
1568  input_buffer[pos++] = static_cast<T>(-1);
1569 
1570  // maybe the processor id
1571  if (read_partitioning)
1572  io.data_stream (&input_buffer[pos++], 1);
1573  else
1574  input_buffer[pos++] = 0;
1575 
1576  // maybe the subdomain id
1577  if (read_subdomain_id)
1578  io.data_stream (&input_buffer[pos++], 1);
1579  else
1580  input_buffer[pos++] = 0;
1581 
1582  // maybe the p level
1583  if (read_p_level)
1584  io.data_stream (&input_buffer[pos++], 1);
1585  else
1586  input_buffer[pos++] = 0;
1587 
1588  // and all the nodes
1589  libmesh_assert_less (pos+Elem::type_to_n_nodes_map[input_buffer[0]], input_buffer.size());
1590  io.data_stream (&input_buffer[pos], Elem::type_to_n_nodes_map[input_buffer[0]]);
1591  conn.insert (conn.end(),
1592  input_buffer.begin(),
1593  input_buffer.begin() + pos + Elem::type_to_n_nodes_map[input_buffer[0]]);
1594  }
1595 
1596  std::size_t conn_size = conn.size();
1597  this->comm().broadcast(conn_size);
1598  conn.resize (conn_size);
1599  this->comm().broadcast (conn);
1600 
1601  // All processors now have the connectivity for this block.
1602  typename std::vector<T>::const_iterator it = conn.begin();
1603  for (dof_id_type e=first_elem; e<last_elem; e++)
1604  {
1605  const ElemType elem_type = static_cast<ElemType>(*it); ++it;
1606 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1607  // We are on all processors here, so we can easily assign
1608  // consistent unique ids if the file doesn't specify them
1609  // later.
1610  unique_id_type unique_id = e;
1611 #endif
1612  if (read_unique_id)
1613  {
1614 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1615  unique_id = cast_int<unique_id_type>(*it);
1616 #endif
1617  ++it;
1618  }
1619  const dof_id_type parent_id =
1620  (*it == static_cast<T>(-1)) ?
1622  cast_int<dof_id_type>(*it);
1623  ++it;
1624  const processor_id_type proc_id =
1625  cast_int<processor_id_type>(*it);
1626  ++it;
1627  const subdomain_id_type subdomain_id =
1628  cast_int<subdomain_id_type>(*it);
1629  ++it;
1630 #ifdef LIBMESH_ENABLE_AMR
1631  const unsigned int p_level =
1632  cast_int<unsigned int>(*it);
1633 #endif
1634  ++it;
1635 
1636  Elem * parent = (parent_id == DofObject::invalid_id) ?
1637  libmesh_nullptr : mesh.elem_ptr(parent_id);
1638 
1639  Elem * elem = Elem::build (elem_type, parent).release();
1640 
1641  elem->set_id() = e;
1642 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1643  elem->set_unique_id() = unique_id;
1644 #endif
1645  elem->processor_id() = proc_id;
1646  elem->subdomain_id() = subdomain_id;
1647 #ifdef LIBMESH_ENABLE_AMR
1648  elem->hack_p_level(p_level);
1649 
1650  if (parent)
1651  {
1652  parent->add_child(elem);
1653  parent->set_refinement_flag (Elem::INACTIVE);
1654  elem->set_refinement_flag (Elem::JUST_REFINED);
1655  }
1656 #endif
1657 
1658  for (unsigned int n=0; n<elem->n_nodes(); n++, ++it)
1659  {
1660  const dof_id_type global_node_number =
1661  cast_int<dof_id_type>(*it);
1662 
1663  elem->set_node(n) =
1664  mesh.add_point (Point(), global_node_number);
1665  }
1666 
1667  elems_of_dimension[elem->dim()] = true;
1668  mesh.add_elem(elem);
1669  }
1670  }
1671 
1672  // Set the mesh dimension to the largest encountered for an element
1673  for (unsigned char i=0; i!=4; ++i)
1674  if (elems_of_dimension[i])
1675  mesh.set_mesh_dimension(i);
1676 
1677 #if LIBMESH_DIM < 3
1678  if (mesh.mesh_dimension() > LIBMESH_DIM)
1679  libmesh_error_msg("Cannot open dimension " \
1680  << mesh.mesh_dimension() \
1681  << " mesh file when configured without " \
1682  << mesh.mesh_dimension() \
1683  << "D support.");
1684 #endif
1685 }
static UniquePtr< Elem > build(const ElemType type, Elem *p=libmesh_nullptr)
Definition: elem.C:234
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:161
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:676
std::vector< bool > elems_of_dimension
Definition: mesh_input.h:97
TestClass subdomain_id_type
Definition: id_types.h:43
uint8_t processor_id_type
Definition: id_types.h:99
const class libmesh_nullptr_t libmesh_nullptr
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
Definition: elem.h:521
libmesh_assert(j)
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2118
static const dof_id_type invalid_id
Definition: dof_object.h:334
void broadcast(T &data, const unsigned int root_id=0) const
static const std::size_t io_blksize
Definition: xdr_io.h:320
const Parallel::Communicator & comm() const
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:155
const std::string & partition_map_file_name() const
Definition: xdr_io.h:149
long double min(long double a, double b)
uint8_t unique_id_type
Definition: id_types.h:79
processor_id_type processor_id() const
uint8_t dof_id_type
Definition: id_types.h:64
template<typename T >
void libMesh::XdrIO::read_serialized_edge_bcs ( Xdr io,
value 
)
private

Read the edge boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Returns
The number of bcs read

Definition at line 1947 of file xdr_io.C.

References read_serialized_bcs_helper().

Referenced by polynomial_level_file_name(), and read().

1948 {
1949  read_serialized_bcs_helper(io, value, "edge");
1950 }
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Definition: xdr_io.C:1841
void libMesh::XdrIO::read_serialized_nodes ( Xdr io,
const dof_id_type  n_nodes 
)
private

Read the nodal locations for a parallel, distributed mesh

Definition at line 1689 of file xdr_io.C.

References _field_width, libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), end, libMesh::MeshTools::Generation::Private::idx(), io_blksize, libMesh::libmesh_assert(), libmesh_nullptr, libMesh::MeshInput< MT >::mesh(), libMesh::MeshInput< MeshBase >::mesh(), std::min(), n_nodes, libMesh::ParallelObject::processor_id(), libMesh::Xdr::reading(), and version_at_least_0_9_6().

Referenced by polynomial_level_file_name(), and read().

1690 {
1691  libmesh_assert (io.reading());
1692 
1693  // convenient reference to our mesh
1694  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1695 
1696  if (!mesh.n_nodes()) return;
1697 
1698  // At this point the elements have been read from file and placeholder nodes
1699  // have been assigned. These nodes, however, do not have the proper (x,y,z)
1700  // locations or unique_id values. This method will read all the
1701  // nodes from disk, and each processor can then grab the individual
1702  // values it needs.
1703 
1704  // If the file includes unique ids for nodes (as indicated by a
1705  // flag in 0.9.6+ files), those will be read next.
1706 
1707  // build up a list of the nodes contained in our local mesh. These are the nodes
1708  // stored on the local processor whose (x,y,z) and unique_id values
1709  // need to be corrected.
1710  std::vector<dof_id_type> needed_nodes; needed_nodes.reserve (mesh.n_nodes());
1711  {
1712  MeshBase::node_iterator
1713  it = mesh.nodes_begin(),
1714  end = mesh.nodes_end();
1715 
1716  for (; it!=end; ++it)
1717  needed_nodes.push_back((*it)->id());
1718 
1719  std::sort (needed_nodes.begin(), needed_nodes.end());
1720 
1721  // We should not have any duplicate node->id()s
1722  libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
1723  }
1724 
1725  // Get the nodes in blocks.
1726  std::vector<Real> coords;
1727  std::pair<std::vector<dof_id_type>::iterator,
1728  std::vector<dof_id_type>::iterator> pos;
1729  pos.first = needed_nodes.begin();
1730 
1731  // Broadcast node coordinates
1732  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1733  {
1734  first_node = blk*io_blksize;
1735  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1736 
1737  coords.resize(3*(last_node - first_node));
1738 
1739  if (this->processor_id() == 0)
1740  io.data_stream (coords.empty() ? libmesh_nullptr : &coords[0],
1741  cast_int<unsigned int>(coords.size()));
1742 
1743  // For large numbers of processors the majority of processors at any given
1744  // block may not actually need these data. It may be worth profiling this,
1745  // although it is expected that disk IO will be the bottleneck
1746  this->comm().broadcast (coords);
1747 
1748  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
1749  {
1750  // first see if we need this node. use pos.first as a smart lower
1751  // bound, this will ensure that the size of the searched range
1752  // decreases as we match nodes.
1753  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1754 
1755  if (pos.first != pos.second) // we need this node.
1756  {
1757  libmesh_assert_equal_to (*pos.first, n);
1758  mesh.node_ref(cast_int<dof_id_type>(n)) =
1759  Point (coords[idx+0],
1760  coords[idx+1],
1761  coords[idx+2]);
1762 
1763  }
1764  }
1765  }
1766 
1767  if (version_at_least_0_9_6())
1768  {
1769  // Check for node unique ids
1770  unsigned short read_unique_ids;
1771 
1772  if (this->processor_id() == 0)
1773  io.data (read_unique_ids);
1774 
1775  this->comm().broadcast (read_unique_ids);
1776 
1777  // If no unique ids are in the file, we're done.
1778  if (!read_unique_ids)
1779  return;
1780 
1781  std::vector<uint32_t> unique_32;
1782  std::vector<uint64_t> unique_64;
1783 
1784  // We're starting over from node 0 again
1785  pos.first = needed_nodes.begin();
1786 
1787  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1788  {
1789  first_node = blk*io_blksize;
1790  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1791 
1792  libmesh_assert((_field_width == 8) || (_field_width == 4));
1793 
1794  if (_field_width == 8)
1795  unique_64.resize(last_node - first_node);
1796  else
1797  unique_32.resize(last_node - first_node);
1798 
1799  if (this->processor_id() == 0)
1800  {
1801  if (_field_width == 8)
1802  io.data_stream (unique_64.empty() ? libmesh_nullptr : &unique_64[0],
1803  cast_int<unsigned int>(unique_64.size()));
1804  else
1805  io.data_stream (unique_32.empty() ? libmesh_nullptr : &unique_32[0],
1806  cast_int<unsigned int>(unique_32.size()));
1807  }
1808 
1809 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1810  if (_field_width == 8)
1811  this->comm().broadcast (unique_64);
1812  else
1813  this->comm().broadcast (unique_32);
1814 
1815  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
1816  {
1817  // first see if we need this node. use pos.first as a smart lower
1818  // bound, this will ensure that the size of the searched range
1819  // decreases as we match nodes.
1820  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1821 
1822  if (pos.first != pos.second) // we need this node.
1823  {
1824  libmesh_assert_equal_to (*pos.first, n);
1825  if (_field_width == 8)
1826  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1827  = unique_64[idx];
1828  else
1829  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1830  = unique_32[idx];
1831  }
1832  }
1833 #endif // LIBMESH_ENABLE_UNIQUE_ID
1834  }
1835  }
1836 }
const class libmesh_nullptr_t libmesh_nullptr
bool version_at_least_0_9_6() const
Definition: xdr_io.C:2126
IterBase * end
libmesh_assert(j)
const dof_id_type n_nodes
Definition: tecplot_io.C:67
void broadcast(T &data, const unsigned int root_id=0) const
static const std::size_t io_blksize
Definition: xdr_io.h:320
const Parallel::Communicator & comm() const
header_id_type _field_width
Definition: xdr_io.h:310
long double min(long double a, double b)
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
processor_id_type processor_id() const
template<typename T >
void libMesh::XdrIO::read_serialized_nodesets ( Xdr io,
 
)
private

Read the nodeset conditions for a parallel, distributed mesh

Returns
The number of nodesets read

Definition at line 1963 of file xdr_io.C.

References libMesh::BoundaryInfo::add_node(), boundary_condition_file_name(), libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), end, libMesh::MeshTools::Generation::Private::idx(), io_blksize, libMesh::libmesh_assert(), libmesh_nullptr, libMesh::MeshInput< MT >::mesh(), libMesh::MeshInput< MeshBase >::mesh(), std::min(), libMesh::ParallelObject::processor_id(), read_serialized_bc_names(), and libMesh::Xdr::reading().

Referenced by polynomial_level_file_name(), and read().

1964 {
1965  if (this->boundary_condition_file_name() == "n/a") return;
1966 
1967  libmesh_assert (io.reading());
1968 
1969  // convenient reference to our mesh
1970  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1971 
1972  // and our boundary info object
1973  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1974 
1975  // Version 0.9.2+ introduces unique ids
1976  read_serialized_bc_names(io, boundary_info, false); // nodeset names
1977 
1978  // TODO: Make a data object that works with both the element and nodal bcs
1979  std::vector<DofBCData> node_bc_data;
1980  std::vector<T> input_buffer;
1981 
1982  header_id_type n_nodesets=0;
1983  if (this->processor_id() == 0)
1984  io.data (n_nodesets);
1985  this->comm().broadcast (n_nodesets);
1986 
1987  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
1988  {
1989  first_bc = blk*io_blksize;
1990  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
1991 
1992  input_buffer.resize (2*(last_bc - first_bc));
1993 
1994  if (this->processor_id() == 0)
1995  io.data_stream (input_buffer.empty() ? libmesh_nullptr : &input_buffer[0],
1996  cast_int<unsigned int>(input_buffer.size()));
1997 
1998  this->comm().broadcast (input_buffer);
1999  node_bc_data.clear();
2000  node_bc_data.reserve (input_buffer.size()/2);
2001 
2002  // convert the input_buffer to DofBCData to facilitate searching
2003  for (std::size_t idx=0; idx<input_buffer.size(); idx+=2)
2004  node_bc_data.push_back
2005  (DofBCData(cast_int<dof_id_type>(input_buffer[idx+0]),
2006  0,
2007  cast_int<boundary_id_type>(input_buffer[idx+1])));
2008  input_buffer.clear();
2009  // note that while the files *we* write should already be sorted by
2010  // node id this is not necessarily guaranteed.
2011  std::sort (node_bc_data.begin(), node_bc_data.end());
2012 
2013  MeshBase::const_node_iterator
2014  it = mesh.nodes_begin(),
2015  end = mesh.nodes_end();
2016 
2017  // Look for BCs in this block for all nodes we have
2018  // (not just local ones). Do this by finding all the entries
2019  // in node_bc_data whose dof_id(node_id) match the ID of the current node.
2020  for (std::pair<std::vector<DofBCData>::iterator,
2021  std::vector<DofBCData>::iterator> pos; it!=end; ++it)
2022 #if defined(__SUNPRO_CC) || defined(__PGI)
2023  for (pos = std::equal_range (node_bc_data.begin(), node_bc_data.end(), (*it)->id(), CompareIntDofBCData());
2024  pos.first != pos.second; ++pos.first)
2025 #else
2026  for (pos = std::equal_range (node_bc_data.begin(), node_bc_data.end(), (*it)->id());
2027  pos.first != pos.second; ++pos.first)
2028 #endif
2029  {
2030  // Note: dof_id from ElmeBCData is being used to hold node_id here
2031  libmesh_assert_equal_to (pos.first->dof_id, (*it)->id());
2032 
2033  boundary_info.add_node (*it, pos.first->bc_id);
2034  }
2035  }
2036 }
const class libmesh_nullptr_t libmesh_nullptr
IterBase * end
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Definition: xdr_io.C:2040
libmesh_assert(j)
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:143
uint32_t header_id_type
Definition: xdr_io.h:60
void broadcast(T &data, const unsigned int root_id=0) const
static const std::size_t io_blksize
Definition: xdr_io.h:320
const Parallel::Communicator & comm() const
long double min(long double a, double b)
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
processor_id_type processor_id() const
template<typename T >
void libMesh::XdrIO::read_serialized_shellface_bcs ( Xdr io,
value 
)
private

Read the "shell face" boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Returns
The number of bcs read

Definition at line 1955 of file xdr_io.C.

References read_serialized_bcs_helper().

Referenced by polynomial_level_file_name(), and read().

1956 {
1957  read_serialized_bcs_helper(io, value, "shellface");
1958 }
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Definition: xdr_io.C:1841
template<typename T >
void libMesh::XdrIO::read_serialized_side_bcs ( Xdr io,
value 
)
private

Read the side boundary conditions for a parallel, distributed mesh

Returns
The number of bcs read

Definition at line 1939 of file xdr_io.C.

References read_serialized_bcs_helper().

Referenced by polynomial_level_file_name(), and read().

1940 {
1941  read_serialized_bcs_helper(io, value, "side");
1942 }
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Definition: xdr_io.C:1841
void libMesh::XdrIO::read_serialized_subdomain_names ( Xdr io)
private

Read subdomain name information - NEW in 0.9.2 format

Definition at line 1458 of file xdr_io.C.

References libMesh::Parallel::Communicator::broadcast(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshInput< MT >::mesh(), libMesh::ParallelObject::processor_id(), libMesh::MeshBase::set_subdomain_name_map(), and version_at_least_0_9_2().

Referenced by polynomial_level_file_name(), and read().

1459 {
1460  const bool read_entity_info = version_at_least_0_9_2();
1461  if (read_entity_info)
1462  {
1463  MeshBase & mesh = MeshInput<MeshBase>::mesh();
1464 
1465  unsigned int n_subdomain_names = 0;
1466  std::vector<header_id_type> subdomain_ids;
1467  std::vector<std::string> subdomain_names;
1468 
1469  // Read the sideset names
1470  if (this->processor_id() == 0)
1471  {
1472  io.data(n_subdomain_names);
1473 
1474  subdomain_ids.resize(n_subdomain_names);
1475  subdomain_names.resize(n_subdomain_names);
1476 
1477  if (n_subdomain_names)
1478  {
1479  io.data(subdomain_ids);
1480  io.data(subdomain_names);
1481  }
1482  }
1483 
1484  // Broadcast the subdomain names to all processors
1485  this->comm().broadcast(n_subdomain_names);
1486  if (n_subdomain_names == 0)
1487  return;
1488 
1489  subdomain_ids.resize(n_subdomain_names);
1490  subdomain_names.resize(n_subdomain_names);
1491  this->comm().broadcast(subdomain_ids);
1492  this->comm().broadcast(subdomain_names);
1493 
1494  // Reassemble the named subdomain information
1495  std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1496 
1497  for (unsigned int i=0; i<n_subdomain_names; ++i)
1498  subdomain_map.insert(std::make_pair(subdomain_ids[i], subdomain_names[i]));
1499  }
1500 }
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2118
void broadcast(T &data, const unsigned int root_id=0) const
const Parallel::Communicator & comm() const
processor_id_type processor_id() const
void libMesh::XdrIO::set_auto_parallel ( )
inline

Insist that we should write parallel files if and only if the mesh is an already distributed DistributedMesh.

Definition at line 359 of file xdr_io.h.

References _write_parallel, and _write_serial.

Referenced by legacy().

360 {
361  this->_write_serial = false;
362  this->_write_parallel = false;
363 }
bool _write_parallel
Definition: xdr_io.h:308
bool _write_serial
Definition: xdr_io.h:307
void libMesh::MeshInput< MeshBase >::set_n_partitions ( unsigned int  n_parts)
inlineprotectedinherited

Sets the number of partitions in the mesh. Typically this gets done by the partitioner, but some parallel file formats begin "pre-partitioned".

Definition at line 91 of file mesh_input.h.

References libMesh::MeshInput< MT >::mesh().

Referenced by libMesh::Nemesis_IO::read(), and read().

91 { this->mesh().set_n_partitions() = n_parts; }
unsigned int & set_n_partitions()
Definition: mesh_base.h:1295
void libMesh::XdrIO::set_write_parallel ( bool  do_parallel = true)
inline

Insist that we should/shouldn't write parallel files.

Definition at line 349 of file xdr_io.h.

References _write_parallel, and _write_serial.

Referenced by legacy().

350 {
351  this->_write_parallel = do_parallel;
352 
353  this->_write_serial = !do_parallel;
354 }
bool _write_parallel
Definition: xdr_io.h:308
bool _write_serial
Definition: xdr_io.h:307
void libMesh::MeshInput< MeshBase >::skip_comment_lines ( std::istream &  in,
const char  comment_start 
)
protectedinherited

Reads input from in, skipping all the lines that start with the character comment_start.

Referenced by libMesh::TetGenIO::read(), and libMesh::UCDIO::read_implementation().

const std::string& libMesh::XdrIO::subdomain_map_file_name ( ) const
inline

Get/Set the subdomain file name.

Definition at line 155 of file xdr_io.h.

References _subdomain_map_file.

Referenced by read(), read_serialized_connectivity(), write(), and write_serialized_connectivity().

155 { return _subdomain_map_file; }
std::string _subdomain_map_file
Definition: xdr_io.h:314
std::string& libMesh::XdrIO::subdomain_map_file_name ( )
inline

Definition at line 156 of file xdr_io.h.

References _subdomain_map_file.

156 { return _subdomain_map_file; }
std::string _subdomain_map_file
Definition: xdr_io.h:314
const std::string& libMesh::XdrIO::version ( ) const
inline

Get/Set the version string. Valid version strings:

* "libMesh-0.7.0+"
* "libMesh-0.7.0+ parallel"
* 

If "libMesh" is not detected in the version string the LegacyXdrIO class will be used to read older (pre version 0.7.0) mesh files.

Definition at line 137 of file xdr_io.h.

References _version.

Referenced by read(), version_at_least_0_9_2(), version_at_least_0_9_6(), version_at_least_1_1_0(), and write().

137 { return _version; }
std::string _version
Definition: xdr_io.h:311
std::string& libMesh::XdrIO::version ( )
inline

Definition at line 138 of file xdr_io.h.

References _version.

138 { return _version; }
std::string _version
Definition: xdr_io.h:311
bool libMesh::XdrIO::version_at_least_0_9_2 ( ) const
Returns
true if the current file has an XDR/XDA version that matches or exceeds 0.9.2.

Definition at line 2118 of file xdr_io.C.

References version().

Referenced by polynomial_level_file_name(), read(), read_serialized_bc_names(), read_serialized_connectivity(), and read_serialized_subdomain_names().

2119 {
2120  return
2121  (this->version().find("0.9.2") != std::string::npos) ||
2122  (this->version().find("0.9.6") != std::string::npos) ||
2123  (this->version().find("1.1.0") != std::string::npos);
2124 }
const std::string & version() const
Definition: xdr_io.h:137
bool libMesh::XdrIO::version_at_least_0_9_6 ( ) const
Returns
true if the current file has an XDR/XDA version that matches or exceeds 0.9.6.

Definition at line 2126 of file xdr_io.C.

References version().

Referenced by polynomial_level_file_name(), and read_serialized_nodes().

2127 {
2128  return
2129  (this->version().find("0.9.6") != std::string::npos) ||
2130  (this->version().find("1.1.0") != std::string::npos);
2131 }
const std::string & version() const
Definition: xdr_io.h:137
bool libMesh::XdrIO::version_at_least_1_1_0 ( ) const
Returns
true if the current file has an XDR/XDA version that matches or exceeds 1.1.0.

Definition at line 2133 of file xdr_io.C.

References version().

Referenced by polynomial_level_file_name(), and read().

2134 {
2135  return
2136  (this->version().find("1.1.0") != std::string::npos);
2137 }
const std::string & version() const
Definition: xdr_io.h:137
void libMesh::XdrIO::write ( const std::string &  name)
virtual

This method implements writing a mesh to a specified file.

Implements libMesh::MeshOutput< MeshBase >.

Definition at line 156 of file xdr_io.C.

References _write_unique_id, libMesh::Parallel::Communicator::barrier(), binary(), boundary_condition_file_name(), libMesh::Xdr::close(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::ENCODE, libMesh::MeshBase::get_boundary_info(), legacy(), libMesh::libmesh_assert(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::BoundaryInfo::n_boundary_conds(), libMesh::BoundaryInfo::n_edge_conds(), libMesh::MeshBase::n_elem(), libMesh::MeshTools::n_elem(), n_nodes, libMesh::MeshBase::n_nodes(), libMesh::BoundaryInfo::n_nodeset_conds(), libMesh::MeshTools::n_p_levels(), libMesh::BoundaryInfo::n_shellface_conds(), libMesh::MeshBase::n_subdomains(), libMesh::out, partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), subdomain_map_file_name(), version(), libMesh::WRITE, write_parallel(), write_serialized_connectivity(), write_serialized_edge_bcs(), write_serialized_nodes(), write_serialized_nodesets(), write_serialized_shellface_bcs(), write_serialized_side_bcs(), and write_serialized_subdomain_names().

Referenced by libMesh::ErrorVector::plot_error(), and libMesh::NameBasedIO::write().

157 {
158  if (this->legacy())
159  libmesh_error_msg("We don't support writing parallel files in the legacy format.");
160 
161  Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
162 
163  START_LOG("write()","XdrIO");
164 
165  // convenient reference to our mesh
166  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
167 
168  header_id_type n_elem = mesh.n_elem();
169  header_id_type n_nodes = mesh.n_nodes();
170 
171  libmesh_assert(n_elem == mesh.n_elem());
172  libmesh_assert(n_nodes == mesh.n_nodes());
173 
174  header_id_type n_side_bcs = cast_int<header_id_type>(mesh.get_boundary_info().n_boundary_conds());
175  header_id_type n_edge_bcs = cast_int<header_id_type>(mesh.get_boundary_info().n_edge_conds());
176  header_id_type n_shellface_bcs = cast_int<header_id_type>(mesh.get_boundary_info().n_shellface_conds());
177  header_id_type n_nodesets = cast_int<header_id_type>(mesh.get_boundary_info().n_nodeset_conds());
178  unsigned int n_p_levels = MeshTools::n_p_levels (mesh);
179 
180  bool write_parallel_files = this->write_parallel();
181 
182  //-------------------------------------------------------------
183  // For all the optional files -- the default file name is "n/a".
184  // However, the user may specify an optional external file.
185 
186  // If there are BCs and the user has not already provided a
187  // file name then write to "."
188  if ((n_side_bcs || n_edge_bcs || n_shellface_bcs || n_nodesets) &&
189  this->boundary_condition_file_name() == "n/a")
190  this->boundary_condition_file_name() = ".";
191 
192  // If there are more than one subdomains and the user has not specified an
193  // external file then write the subdomain mapping to the default file "."
194  if ((mesh.n_subdomains() > 0) &&
195  (this->subdomain_map_file_name() == "n/a"))
196  this->subdomain_map_file_name() = ".";
197 
198  // In general we don't write the partition information.
199 
200  // If we have p levels and the user has not already provided
201  // a file name then write to "."
202  if ((n_p_levels > 1) &&
203  (this->polynomial_level_file_name() == "n/a"))
204  this->polynomial_level_file_name() = ".";
205 
206  // write the header
207  if (this->processor_id() == 0)
208  {
209  std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
210  io.data (full_ver);
211 
212  io.data (n_elem, "# number of elements");
213  io.data (n_nodes, "# number of nodes");
214 
215  io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
216  io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
217  io.data (this->partition_map_file_name(), "# processor id specification file");
218  io.data (this->polynomial_level_file_name(), "# p-level specification file");
219 
220  // Version 0.9.2+ introduces sizes for each type
221  header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
222 
223  const bool
224  write_p_level = ("." == this->polynomial_level_file_name()),
225  write_partitioning = ("." == this->partition_map_file_name()),
226  write_subdomain_id = ("." == this->subdomain_map_file_name()),
227  write_bcs = ("." == this->boundary_condition_file_name());
228 
229  io.data (write_size, "# type size");
230  io.data (_write_unique_id ? write_size : zero_size, "# uid size");
231  io.data (write_partitioning ? write_size : zero_size, "# pid size");
232  io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
233  io.data (write_p_level ? write_size : zero_size, "# p-level size");
234  // Boundary Condition sizes
235  io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
236  io.data (write_bcs ? write_size : zero_size, "# side size"); // side number
237  io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
238  }
239 
240  if (write_parallel_files)
241  {
242  // Parallel xdr mesh files aren't implemented yet; until they
243  // are we'll just warn the user and write a serial file.
244  libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
245  libMesh::out << "Writing a serialized file instead." << std::endl;
246 
247  // write subdomain names
249 
250  // write connectivity
251  this->write_serialized_connectivity (io, n_elem);
252 
253  // write the nodal locations
254  this->write_serialized_nodes (io, n_nodes);
255 
256  // write the side boundary condition information
257  this->write_serialized_side_bcs (io, n_side_bcs);
258 
259  // write the nodeset information
260  this->write_serialized_nodesets (io, n_nodesets);
261 
262  // write the edge boundary condition information
263  this->write_serialized_edge_bcs (io, n_edge_bcs);
264 
265  // write the "shell face" boundary condition information
266  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
267  }
268  else
269  {
270  // write subdomain names
272 
273  // write connectivity
274  this->write_serialized_connectivity (io, n_elem);
275 
276  // write the nodal locations
277  this->write_serialized_nodes (io, n_nodes);
278 
279  // write the side boundary condition information
280  this->write_serialized_side_bcs (io, n_side_bcs);
281 
282  // write the nodeset information
283  this->write_serialized_nodesets (io, n_nodesets);
284 
285  // write the edge boundary condition information
286  this->write_serialized_edge_bcs (io, n_edge_bcs);
287 
288  // write the "shell face" boundary condition information
289  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
290  }
291 
292  STOP_LOG("write()","XdrIO");
293 
294  // pause all processes until the writing ends -- this will
295  // protect for the pathological case where a write is
296  // followed immediately by a read. The write must be
297  // guaranteed to complete first.
298  io.close();
299  this->comm().barrier();
300 }
std::string name(const ElemQuality q)
Definition: elem_quality.C:39
bool binary() const
Definition: xdr_io.h:100
bool write_parallel() const
Definition: xdr_io.h:328
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:161
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:676
const MT & mesh() const
Definition: mesh_output.h:216
void write_serialized_shellface_bcs(Xdr &io, const header_id_type n_shellface_bcs) const
Definition: xdr_io.C:1182
libmesh_assert(j)
void write_serialized_edge_bcs(Xdr &io, const header_id_type n_edge_bcs) const
Definition: xdr_io.C:1175
largest_id_type xdr_id_type
Definition: xdr_io.h:57
void write_serialized_side_bcs(Xdr &io, const header_id_type n_side_bcs) const
Definition: xdr_io.C:1168
const std::string & boundary_condition_file_name() const
Definition: xdr_io.h:143
const dof_id_type n_nodes
Definition: tecplot_io.C:67
void write_serialized_connectivity(Xdr &io, const dof_id_type n_elem) const
Definition: xdr_io.C:342
void write_serialized_nodes(Xdr &io, const dof_id_type n_nodes) const
Definition: xdr_io.C:705
uint32_t header_id_type
Definition: xdr_io.h:60
bool _write_unique_id
Definition: xdr_io.h:309
void write_serialized_subdomain_names(Xdr &io) const
Definition: xdr_io.C:304
const Parallel::Communicator & comm() const
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:155
const std::string & partition_map_file_name() const
Definition: xdr_io.h:149
bool legacy() const
Definition: xdr_io.h:106
const std::string & version() const
Definition: xdr_io.h:137
unsigned int n_p_levels(const MeshBase &mesh)
Definition: mesh_tools.C:692
OStreamProxy out(std::cout)
void write_serialized_nodesets(Xdr &io, const header_id_type n_nodesets) const
Definition: xdr_io.C:1189
processor_id_type processor_id() const
virtual void libMesh::MeshOutput< MeshBase >::write_equation_systems ( const std::string &  ,
const EquationSystems ,
const std::set< std::string > *  system_names = libmesh_nullptr 
)
virtualinherited

This method implements writing a mesh with data to a specified file where the data is taken from the EquationSystems object.

Reimplemented in libMesh::NameBasedIO.

Referenced by libMesh::Nemesis_IO::write_timestep(), and libMesh::ExodusII_IO::write_timestep().

virtual void libMesh::MeshOutput< MeshBase >::write_nodal_data ( const std::string &  ,
const std::vector< Number > &  ,
const std::vector< std::string > &   
)
inlinevirtualinherited

This method implements writing a mesh with nodal data to a specified file where the nodal data and variable names are provided.

Reimplemented in libMesh::ExodusII_IO, libMesh::NameBasedIO, libMesh::GmshIO, libMesh::Nemesis_IO, libMesh::VTKIO, libMesh::UCDIO, libMesh::GMVIO, libMesh::MEDITIO, libMesh::GnuPlotIO, and libMesh::TecplotIO.

Definition at line 96 of file mesh_output.h.

References libMesh::MeshOutput< MT >::ascii_precision(), libMesh::MeshOutput< MT >::mesh(), and libMesh::MeshOutput< MT >::write_nodal_data().

99  { libmesh_not_implemented(); }
virtual void libMesh::MeshOutput< MeshBase >::write_nodal_data ( const std::string &  ,
const NumericVector< Number > &  ,
const std::vector< std::string > &   
)
virtualinherited

This method should be overridden by "parallel" output formats for writing nodal data. Instead of getting a localized copy of the nodal solution vector, it is passed a NumericVector of type=PARALLEL which is in node-major order i.e. (u0,v0,w0, u1,v1,w1, u2,v2,w2, u3,v3,w3, ...) and contains n_nodes*n_vars total entries. Then, it is up to the individual I/O class to extract the required solution values from this vector and write them in parallel.

If not implemented, localizes the parallel vector into a std::vector and calls the other version of this function.

Reimplemented in libMesh::Nemesis_IO.

bool libMesh::XdrIO::write_parallel ( ) const
inline

Report whether we should write parallel files.

Definition at line 328 of file xdr_io.h.

References _write_parallel, _write_serial, libMesh::MeshBase::is_serial(), libMesh::libmesh_assert(), libMesh::MeshInput< MeshBase >::mesh(), and libMesh::MeshOutput< MT >::mesh().

Referenced by legacy(), and write().

329 {
330  // We can't insist on both serial and parallel
331  libmesh_assert (!this->_write_serial || !this->_write_parallel);
332 
333  // If we insisted on serial, do that
334  if (this->_write_serial)
335  return false;
336 
337  // If we insisted on parallel, do that
338  if (this->_write_parallel)
339  return true;
340 
341  // If we're doing things automatically, check the mesh
342  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
343  return !mesh.is_serial();
344 }
bool _write_parallel
Definition: xdr_io.h:308
const MT & mesh() const
Definition: mesh_output.h:216
libmesh_assert(j)
bool _write_serial
Definition: xdr_io.h:307
void libMesh::XdrIO::write_serialized_bc_names ( Xdr io,
const BoundaryInfo info,
bool  is_sideset 
) const
private

Write boundary names information (sideset and nodeset) - NEW in 0.9.2 format

Definition at line 1271 of file xdr_io.C.

References libMesh::Xdr::data(), libMesh::BoundaryInfo::get_nodeset_name_map(), libMesh::BoundaryInfo::get_sideset_name_map(), and libMesh::ParallelObject::processor_id().

Referenced by polynomial_level_file_name(), write_serialized_bcs_helper(), and write_serialized_nodesets().

1272 {
1273  if (this->processor_id() == 0)
1274  {
1275  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1276  info.get_sideset_name_map() : info.get_nodeset_name_map();
1277 
1278  std::vector<header_id_type> boundary_ids; boundary_ids.reserve(boundary_map.size());
1279  std::vector<std::string> boundary_names; boundary_names.reserve(boundary_map.size());
1280 
1281  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
1282  // return writable references in boundary_info, it's possible for the user to leave some entity names
1283  // blank. We can't write those to the XDA file.
1284  header_id_type n_boundary_names = 0;
1285  std::map<boundary_id_type, std::string>::const_iterator it_end = boundary_map.end();
1286  for (std::map<boundary_id_type, std::string>::const_iterator it = boundary_map.begin(); it != it_end; ++it)
1287  {
1288  if (!it->second.empty())
1289  {
1290  n_boundary_names++;
1291  boundary_ids.push_back(it->first);
1292  boundary_names.push_back(it->second);
1293  }
1294  }
1295 
1296  if (is_sideset)
1297  io.data(n_boundary_names, "# sideset id to name map");
1298  else
1299  io.data(n_boundary_names, "# nodeset id to name map");
1300  // Write out the ids and names in two vectors
1301  if (n_boundary_names)
1302  {
1303  io.data(boundary_ids);
1304  io.data(boundary_names);
1305  }
1306  }
1307 }
uint32_t header_id_type
Definition: xdr_io.h:60
processor_id_type processor_id() const
void libMesh::XdrIO::write_serialized_bcs_helper ( Xdr io,
const header_id_type  n_side_bcs,
const std::string  bc_type 
) const
private

Helper function used in write_serialized_side_bcs, write_serialized_edge_bcs, and write_serialized_shellface_bcs.

Definition at line 1035 of file xdr_io.C.

References bc_id, libMesh::BoundaryInfo::boundary_ids(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), libMesh::BoundaryInfo::edge_boundary_ids(), end, libMesh::Parallel::Communicator::gather(), libMesh::MeshTools::Generation::Private::idx(), libMesh::BoundaryInfo::invalid_id, libMesh::libmesh_assert(), libmesh_nullptr, libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::Elem::n_edges(), libMesh::ParallelObject::n_processors(), libMesh::Elem::n_sides(), libMesh::ParallelObject::processor_id(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), libMesh::BoundaryInfo::shellface_boundary_ids(), write_serialized_bc_names(), and libMesh::Xdr::writing().

Referenced by polynomial_level_file_name(), write_serialized_edge_bcs(), write_serialized_shellface_bcs(), and write_serialized_side_bcs().

1036 {
1037  libmesh_assert (io.writing());
1038 
1039  // convenient reference to our mesh
1040  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1041 
1042  // and our boundary info object
1043  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1044 
1045  // Version 0.9.2+ introduces entity names
1046  write_serialized_bc_names(io, boundary_info, true); // sideset names
1047 
1048  header_id_type n_bcs_out = n_bcs;
1049  if (this->processor_id() == 0)
1050  {
1051  std::stringstream comment_string;
1052  comment_string << "# number of " << bc_type << " boundary conditions";
1053  io.data (n_bcs_out, comment_string.str().c_str());
1054  }
1055  n_bcs_out = 0;
1056 
1057  if (!n_bcs) return;
1058 
1059  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1060  std::vector<std::size_t> bc_sizes(this->n_processors());
1061 
1062  // Boundary conditions are only specified for level-0 elements
1063  MeshBase::const_element_iterator
1064  it = mesh.local_level_elements_begin(0),
1065  end = mesh.local_level_elements_end(0);
1066 
1067  // Container to catch boundary IDs handed back by BoundaryInfo
1068  std::vector<boundary_id_type> bc_ids;
1069 
1070  dof_id_type n_local_level_0_elem=0;
1071  for (; it!=end; ++it, n_local_level_0_elem++)
1072  {
1073  const Elem * elem = *it;
1074 
1075  if (bc_type == "side")
1076  {
1077  for (unsigned short s=0; s<elem->n_sides(); s++)
1078  {
1079  boundary_info.boundary_ids (elem, s, bc_ids);
1080  for (std::vector<boundary_id_type>::const_iterator id_it=bc_ids.begin(); id_it!=bc_ids.end(); ++id_it)
1081  {
1082  const boundary_id_type bc_id = *id_it;
1083  if (bc_id != BoundaryInfo::invalid_id)
1084  {
1085  xfer_bcs.push_back (n_local_level_0_elem);
1086  xfer_bcs.push_back (s) ;
1087  xfer_bcs.push_back (bc_id);
1088  }
1089  }
1090  }
1091  }
1092  else if (bc_type == "edge")
1093  {
1094  for (unsigned short e=0; e<elem->n_edges(); e++)
1095  {
1096  boundary_info.edge_boundary_ids (elem, e, bc_ids);
1097  for (std::vector<boundary_id_type>::const_iterator id_it=bc_ids.begin(); id_it!=bc_ids.end(); ++id_it)
1098  {
1099  const boundary_id_type bc_id = *id_it;
1100  if (bc_id != BoundaryInfo::invalid_id)
1101  {
1102  xfer_bcs.push_back (n_local_level_0_elem);
1103  xfer_bcs.push_back (e) ;
1104  xfer_bcs.push_back (bc_id);
1105  }
1106  }
1107  }
1108  }
1109  else if (bc_type == "shellface")
1110  {
1111  for (unsigned short sf=0; sf<2; sf++)
1112  {
1113  boundary_info.shellface_boundary_ids (elem, sf, bc_ids);
1114  for (std::vector<boundary_id_type>::const_iterator id_it=bc_ids.begin(); id_it!=bc_ids.end(); ++id_it)
1115  {
1116  const boundary_id_type bc_id = *id_it;
1117  if (bc_id != BoundaryInfo::invalid_id)
1118  {
1119  xfer_bcs.push_back (n_local_level_0_elem);
1120  xfer_bcs.push_back (sf) ;
1121  xfer_bcs.push_back (bc_id);
1122  }
1123  }
1124  }
1125  }
1126  else
1127  {
1128  libmesh_error_msg("bc_type not recognized: " + bc_type);
1129  }
1130  }
1131 
1132  xfer_bcs.push_back(n_local_level_0_elem);
1133  std::size_t my_size = xfer_bcs.size();
1134  this->comm().gather (0, my_size, bc_sizes);
1135 
1136  // All processors send their xfer buffers to processor 0
1137  // Processor 0 will receive all buffers and write out the bcs
1138  if (this->processor_id() == 0)
1139  {
1140  dof_id_type elem_offset = 0;
1141  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1142  {
1143  recv_bcs.resize(bc_sizes[pid]);
1144  if (pid == 0)
1145  recv_bcs = xfer_bcs;
1146  else
1147  this->comm().receive (pid, recv_bcs);
1148 
1149  const dof_id_type my_n_local_level_0_elem
1150  = cast_int<dof_id_type>(recv_bcs.back());
1151  recv_bcs.pop_back();
1152 
1153  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 3, n_bcs_out++)
1154  recv_bcs[idx+0] += elem_offset;
1155 
1156  io.data_stream (recv_bcs.empty() ? libmesh_nullptr : &recv_bcs[0],
1157  cast_int<unsigned int>(recv_bcs.size()), 3);
1158  elem_offset += my_n_local_level_0_elem;
1159  }
1160  libmesh_assert_equal_to (n_bcs, n_bcs_out);
1161  }
1162  else
1163  this->comm().send (0, xfer_bcs);
1164 }
processor_id_type n_processors() const
const class libmesh_nullptr_t libmesh_nullptr
IterBase * end
const MT & mesh() const
Definition: mesh_output.h:216
boundary_id_type bc_id
Definition: xdr_io.C:50
libmesh_assert(j)
int8_t boundary_id_type
Definition: id_types.h:51
static const boundary_id_type invalid_id
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Definition: xdr_io.C:1271
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
uint32_t header_id_type
Definition: xdr_io.h:60
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
const Parallel::Communicator & comm() const
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
uint8_t dof_id_type
Definition: id_types.h:64
processor_id_type processor_id() const
void libMesh::XdrIO::write_serialized_connectivity ( Xdr io,
const dof_id_type  n_elem 
) const
private

Write the connectivity for a parallel, distributed mesh

Definition at line 342 of file xdr_io.C.

References _write_unique_id, libMesh::Elem::child_ptr(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), end, libMesh::Parallel::Communicator::gather(), libMesh::DofObject::id(), libMesh::libmesh_assert(), libMesh::MeshBase::local_elements_end(), libMesh::MeshBase::local_level_elements_begin(), libMesh::MeshBase::local_level_elements_end(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::MeshTools::n_active_levels(), libMesh::Elem::n_children(), libMesh::MeshBase::n_elem(), libMesh::MeshTools::n_elem(), n_nodes, libMesh::ParallelObject::n_processors(), pack_element(), partition_map_file_name(), polynomial_level_file_name(), libMesh::ParallelObject::processor_id(), libMesh::DofObject::processor_id(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), libMesh::Parallel::Communicator::send_receive(), subdomain_map_file_name(), libMesh::Parallel::Communicator::sum(), and libMesh::Xdr::writing().

Referenced by polynomial_level_file_name(), and write().

343 {
344  libmesh_assert (io.writing());
345 
346  const bool
347  write_p_level = ("." == this->polynomial_level_file_name()),
348  write_partitioning = ("." == this->partition_map_file_name()),
349  write_subdomain_id = ("." == this->subdomain_map_file_name());
350 
351  // convenient reference to our mesh
352  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
353  libmesh_assert_equal_to (n_elem, mesh.n_elem());
354 
355  // We will only write active elements and their parents.
356  const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
357  std::vector<xdr_id_type> n_global_elem_at_level(n_active_levels);
358 
359  MeshBase::const_element_iterator it = mesh.local_elements_end(), end=it;
360 
361  // Find the number of local and global elements at each level
362 #ifndef NDEBUG
363  xdr_id_type tot_n_elem = 0;
364 #endif
365  for (unsigned int level=0; level<n_active_levels; level++)
366  {
367  it = mesh.local_level_elements_begin(level);
368  end = mesh.local_level_elements_end(level);
369 
370  n_global_elem_at_level[level] = MeshTools::n_elem(it, end);
371 
372  this->comm().sum(n_global_elem_at_level[level]);
373 #ifndef NDEBUG
374  tot_n_elem += n_global_elem_at_level[level];
375 #endif
376  libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
377  libmesh_assert_less_equal (tot_n_elem, n_elem);
378  }
379 
380  std::vector<xdr_id_type>
381  xfer_conn, recv_conn;
382  std::vector<dof_id_type>
383  n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
384  std::vector<xdr_id_type> output_buffer;
385  std::vector<std::size_t>
386  xfer_buf_sizes(this->n_processors());
387 
388 #ifdef LIBMESH_ENABLE_AMR
389  typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type> > id_map_type;
390  id_map_type parent_id_map, child_id_map;
391 #endif
392 
393  dof_id_type my_next_elem=0, next_global_elem=0;
394 
395  //-------------------------------------------
396  // First write the level-0 elements directly.
397  it = mesh.local_level_elements_begin(0);
398  end = mesh.local_level_elements_end(0);
399  for (; it != end; ++it, ++my_next_elem)
400  {
401  pack_element (xfer_conn, *it);
402 #ifdef LIBMESH_ENABLE_AMR
403  parent_id_map[(*it)->id()] = std::make_pair(this->processor_id(),
404  my_next_elem);
405 #endif
406  }
407  xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
408 
409  std::size_t my_size = xfer_conn.size();
410  this->comm().gather (0, my_next_elem, n_elem_on_proc);
411  this->comm().gather (0, my_size, xfer_buf_sizes);
412 
413  processor_offsets[0] = 0;
414  for (unsigned int pid=1; pid<this->n_processors(); pid++)
415  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
416 
417  // All processors send their xfer buffers to processor 0.
418  // Processor 0 will receive the data and write out the elements.
419  if (this->processor_id() == 0)
420  {
421  // Write the number of elements at this level.
422  {
423  std::string comment = "# n_elem at level 0", legend = ", [ type ";
424  if (_write_unique_id)
425  legend += "uid ";
426  if (write_partitioning)
427  legend += "pid ";
428  if (write_subdomain_id)
429  legend += "sid ";
430  if (write_p_level)
431  legend += "p_level ";
432  legend += "(n0 ... nN-1) ]";
433  comment += legend;
434  io.data (n_global_elem_at_level[0], comment.c_str());
435  }
436 
437  for (unsigned int pid=0; pid<this->n_processors(); pid++)
438  {
439  recv_conn.resize(xfer_buf_sizes[pid]);
440  if (pid == 0)
441  recv_conn = xfer_conn;
442  else
443  this->comm().receive (pid, recv_conn);
444 
445  // at a minimum, the buffer should contain the number of elements,
446  // which could be 0.
447  libmesh_assert (!recv_conn.empty());
448 
449  {
450  const xdr_id_type n_elem_received = recv_conn.back();
451  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
452 
453  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
454  {
455  output_buffer.clear();
456 
457  // n. nodes
458  const xdr_id_type n_nodes = *recv_conn_iter;
459  ++recv_conn_iter;
460 
461  // type
462  output_buffer.push_back(*recv_conn_iter);
463  ++recv_conn_iter;
464 
465  // unique_id
466  if (_write_unique_id)
467  output_buffer.push_back(*recv_conn_iter);
468  ++recv_conn_iter;
469 
470  // processor id
471  if (write_partitioning)
472  output_buffer.push_back(*recv_conn_iter);
473  ++recv_conn_iter;
474 
475  // subdomain id
476  if (write_subdomain_id)
477  output_buffer.push_back(*recv_conn_iter);
478  ++recv_conn_iter;
479 
480 #ifdef LIBMESH_ENABLE_AMR
481  // p level
482  if (write_p_level)
483  output_buffer.push_back(*recv_conn_iter);
484  ++recv_conn_iter;
485 #endif
486  for (dof_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
487  output_buffer.push_back(*recv_conn_iter);
488 
489  io.data_stream
490  (&output_buffer[0],
491  cast_int<unsigned int>(output_buffer.size()),
492  cast_int<unsigned int>(output_buffer.size()));
493  }
494  }
495  }
496  }
497  else
498  this->comm().send (0, xfer_conn);
499 
500 #ifdef LIBMESH_ENABLE_AMR
501  //--------------------------------------------------------------------
502  // Next write the remaining elements indirectly through their parents.
503  // This will insure that the children are written in the proper order
504  // so they can be reconstructed properly.
505  for (unsigned int level=1; level<n_active_levels; level++)
506  {
507  xfer_conn.clear();
508 
509  it = mesh.local_level_elements_begin(level-1);
510  end = mesh.local_level_elements_end (level-1);
511 
512  dof_id_type my_n_elem_written_at_level = 0;
513  for (; it != end; ++it)
514  if (!(*it)->active()) // we only want the parents elements at this level, and
515  { // there is no direct iterator for this obscure use
516  const Elem * parent = *it;
517  id_map_type::iterator pos = parent_id_map.find(parent->id());
518  libmesh_assert (pos != parent_id_map.end());
519  const processor_id_type parent_pid = pos->second.first;
520  const dof_id_type parent_id = pos->second.second;
521  parent_id_map.erase(pos);
522 
523  for (unsigned int c=0; c<parent->n_children(); c++, my_next_elem++)
524  {
525  const Elem * child = parent->child_ptr(c);
526  pack_element (xfer_conn, child, parent_id, parent_pid);
527 
528  // this aproach introduces the possibility that we write
529  // non-local elements. These elements may well be parents
530  // at the next step
531  child_id_map[child->id()] = std::make_pair (child->processor_id(),
532  my_n_elem_written_at_level++);
533  }
534  }
535  xfer_conn.push_back(my_n_elem_written_at_level);
536  my_size = xfer_conn.size();
537  this->comm().gather (0, my_size, xfer_buf_sizes);
538 
539  // Processor 0 will receive the data and write the elements.
540  if (this->processor_id() == 0)
541  {
542  // Write the number of elements at this level.
543  {
544  char buf[80];
545  std::sprintf(buf, "# n_elem at level %u", level);
546  std::string comment(buf), legend = ", [ type ";
547 
548  if (_write_unique_id)
549  legend += "uid ";
550  legend += "parent ";
551  if (write_partitioning)
552  legend += "pid ";
553  if (write_subdomain_id)
554  legend += "sid ";
555  if (write_p_level)
556  legend += "p_level ";
557  legend += "(n0 ... nN-1) ]";
558  comment += legend;
559  io.data (n_global_elem_at_level[level], comment.c_str());
560  }
561 
562  for (unsigned int pid=0; pid<this->n_processors(); pid++)
563  {
564  recv_conn.resize(xfer_buf_sizes[pid]);
565  if (pid == 0)
566  recv_conn = xfer_conn;
567  else
568  this->comm().receive (pid, recv_conn);
569 
570  // at a minimum, the buffer should contain the number of elements,
571  // which could be 0.
572  libmesh_assert (!recv_conn.empty());
573 
574  {
575  const xdr_id_type n_elem_received = recv_conn.back();
576  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
577 
578  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
579  {
580  output_buffer.clear();
581 
582  // n. nodes
583  const xdr_id_type n_nodes = *recv_conn_iter;
584  ++recv_conn_iter;
585 
586  // type
587  output_buffer.push_back(*recv_conn_iter);
588  ++recv_conn_iter;
589 
590  // unique_id
591  if (_write_unique_id)
592  output_buffer.push_back(*recv_conn_iter);
593  ++recv_conn_iter;
594 
595  // parent local id
596  const xdr_id_type parent_local_id = *recv_conn_iter;
597  ++recv_conn_iter;
598 
599  // parent processor id
600  const xdr_id_type parent_pid = *recv_conn_iter;
601  ++recv_conn_iter;
602 
603  output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
604 
605  // processor id
606  if (write_partitioning)
607  output_buffer.push_back(*recv_conn_iter);
608  ++recv_conn_iter;
609 
610  // subdomain id
611  if (write_subdomain_id)
612  output_buffer.push_back(*recv_conn_iter);
613  ++recv_conn_iter;
614 
615  // p level
616  if (write_p_level)
617  output_buffer.push_back(*recv_conn_iter);
618  ++recv_conn_iter;
619 
620  for (xdr_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
621  output_buffer.push_back(*recv_conn_iter);
622 
623  io.data_stream
624  (&output_buffer[0],
625  cast_int<unsigned int>(output_buffer.size()),
626  cast_int<unsigned int>(output_buffer.size()));
627  }
628  }
629  }
630  }
631  else
632  this->comm().send (0, xfer_conn);
633 
634  // update the processor_offsets
635  processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
636  this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
637  for (unsigned int pid=1; pid<this->n_processors(); pid++)
638  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
639 
640  // Now, at the next level we will again iterate over local parents. However,
641  // those parents may have been written by other processors (at this step),
642  // so we need to gather them into our *_id_maps.
643  {
644  std::vector<std::vector<dof_id_type> > requested_ids(this->n_processors());
645  std::vector<dof_id_type> request_to_fill;
646 
647  it = mesh.local_level_elements_begin(level);
648  end = mesh.local_level_elements_end(level);
649 
650  for (; it!=end; ++it)
651  if (!child_id_map.count((*it)->id()))
652  {
653  libmesh_assert_not_equal_to ((*it)->parent()->processor_id(), this->processor_id());
654  requested_ids[(*it)->parent()->processor_id()].push_back((*it)->id());
655  }
656 
657  // Next set the child_ids
658  for (unsigned int p=1; p != this->n_processors(); ++p)
659  {
660  // Trade my requests with processor procup and procdown
661  unsigned int procup = (this->processor_id() + p) %
662  this->n_processors();
663  unsigned int procdown = (this->n_processors() +
664  this->processor_id() - p) %
665  this->n_processors();
666 
667  this->comm().send_receive(procup, requested_ids[procup],
668  procdown, request_to_fill);
669 
670  // Fill those requests by overwriting the requested ids
671  for (std::size_t i=0; i<request_to_fill.size(); i++)
672  {
673  libmesh_assert (child_id_map.count(request_to_fill[i]));
674  libmesh_assert_equal_to (child_id_map[request_to_fill[i]].first, procdown);
675 
676  request_to_fill[i] = child_id_map[request_to_fill[i]].second;
677  }
678 
679  // Trade back the results
680  std::vector<dof_id_type> filled_request;
681  this->comm().send_receive(procdown, request_to_fill,
682  procup, filled_request);
683 
684  libmesh_assert_equal_to (filled_request.size(), requested_ids[procup].size());
685 
686  for (std::size_t i=0; i<filled_request.size(); i++)
687  child_id_map[requested_ids[procup][i]] =
688  std::make_pair (procup,
689  filled_request[i]);
690  }
691  // overwrite the parent_id_map with the child_id_map, but
692  // use std::map::swap() for efficiency.
693  parent_id_map.swap(child_id_map);
694  child_id_map.clear();
695  }
696  }
697 #endif // LIBMESH_ENABLE_AMR
698  if (this->processor_id() == 0)
699  libmesh_assert_equal_to (next_global_elem, n_elem);
700 
701 }
const std::string & polynomial_level_file_name() const
Definition: xdr_io.h:161
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Definition: mesh_tools.C:676
processor_id_type n_processors() const
uint8_t processor_id_type
Definition: id_types.h:99
IterBase * end
const MT & mesh() const
Definition: mesh_output.h:216
libmesh_assert(j)
largest_id_type xdr_id_type
Definition: xdr_io.h:57
void send_receive(const unsigned int dest_processor_id, const T1 &send, const unsigned int source_processor_id, T2 &recv, const MessageTag &send_tag=no_tag, const MessageTag &recv_tag=any_tag) const
const dof_id_type n_nodes
Definition: tecplot_io.C:67
void pack_element(std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id=DofObject::invalid_id, const dof_id_type parent_pid=DofObject::invalid_id) const
Definition: xdr_io.C:2085
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
bool _write_unique_id
Definition: xdr_io.h:309
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
const Parallel::Communicator & comm() const
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
const std::string & subdomain_map_file_name() const
Definition: xdr_io.h:155
const std::string & partition_map_file_name() const
Definition: xdr_io.h:149
processor_id_type processor_id() const
uint8_t dof_id_type
Definition: id_types.h:64
unsigned int n_active_levels(const MeshBase &mesh)
Definition: mesh_tools.C:577
void libMesh::XdrIO::write_serialized_edge_bcs ( Xdr io,
const header_id_type  n_edge_bcs 
) const
private

Write the edge boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Definition at line 1175 of file xdr_io.C.

References write_serialized_bcs_helper().

Referenced by polynomial_level_file_name(), and write().

1176 {
1177  write_serialized_bcs_helper(io, n_edge_bcs, "edge");
1178 }
void write_serialized_bcs_helper(Xdr &io, const header_id_type n_side_bcs, const std::string bc_type) const
Definition: xdr_io.C:1035
void libMesh::XdrIO::write_serialized_nodes ( Xdr io,
const dof_id_type  n_nodes 
) const
private

Write the nodal locations for a parallel, distributed mesh

Definition at line 705 of file xdr_io.C.

References libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), end, libMesh::Parallel::Communicator::gather(), libMesh::Parallel::Communicator::get_unique_tag(), libMesh::MeshTools::Generation::Private::idx(), io_blksize, libmesh_nullptr, libMesh::MeshBase::local_nodes_begin(), libMesh::MeshBase::local_nodes_end(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), std::min(), n_nodes, libMesh::MeshBase::n_nodes(), libMesh::ParallelObject::n_processors(), libMesh::ParallelObject::processor_id(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and libMesh::Parallel::wait().

Referenced by polynomial_level_file_name(), and write().

706 {
707  // convenient reference to our mesh
708  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
709  libmesh_assert_equal_to (n_nodes, mesh.n_nodes());
710 
711  std::vector<dof_id_type> xfer_ids;
712  std::vector<Real> xfer_coords;
713  std::vector<Real> & coords=xfer_coords;
714 
715  std::vector<std::vector<dof_id_type> > recv_ids (this->n_processors());
716  std::vector<std::vector<Real> > recv_coords(this->n_processors());
717 
718 #ifdef LIBMESH_ENABLE_UNIQUE_ID
719  std::vector<xdr_id_type> xfer_unique_ids;
720  std::vector<xdr_id_type> & unique_ids=xfer_unique_ids;
721  std::vector<std::vector<xdr_id_type> > recv_unique_ids (this->n_processors());
722 #endif // LIBMESH_ENABLE_UNIQUE_ID
723 
724  std::size_t n_written=0;
725 
726  for (std::size_t blk=0, last_node=0; last_node<n_nodes; blk++)
727  {
728  const std::size_t first_node = blk*io_blksize;
729  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
730 
731  // Build up the xfer buffers on each processor
732  MeshBase::const_node_iterator
733  it = mesh.local_nodes_begin(),
734  end = mesh.local_nodes_end();
735 
736  xfer_ids.clear();
737  xfer_coords.clear();
738 #ifdef LIBMESH_ENABLE_UNIQUE_ID
739  xfer_unique_ids.clear();
740 #endif // LIBMESH_ENABLE_UNIQUE_ID
741 
742  for (; it!=end; ++it)
743  if (((*it)->id() >= first_node) && // node in [first_node, last_node)
744  ((*it)->id() < last_node))
745  {
746  xfer_ids.push_back((*it)->id());
747 #ifdef LIBMESH_ENABLE_UNIQUE_ID
748  xfer_unique_ids.push_back((*it)->unique_id());
749 #endif // LIBMESH_ENABLE_UNIQUE_ID
750  const Point & p = **it;
751  xfer_coords.push_back(p(0));
752 #if LIBMESH_DIM > 1
753  xfer_coords.push_back(p(1));
754 #endif
755 #if LIBMESH_DIM > 2
756  xfer_coords.push_back(p(2));
757 #endif
758  }
759 
760  //-------------------------------------
761  // Send the xfer buffers to processor 0
762  std::vector<std::size_t> ids_size;
763 
764  const std::size_t my_ids_size = xfer_ids.size();
765 
766  // explicitly gather ids_size
767  this->comm().gather (0, my_ids_size, ids_size);
768 
769  // We will have lots of simultaneous receives if we are
770  // processor 0, so let's use nonblocking receives.
771  std::vector<Parallel::Request>
772 #ifdef LIBMESH_ENABLE_UNIQUE_ID
773  unique_id_request_handles(this->n_processors()-1),
774 #endif // LIBMESH_ENABLE_UNIQUE_ID
775  id_request_handles(this->n_processors()-1),
776  coord_request_handles(this->n_processors()-1);
777 
778  Parallel::MessageTag
779 #ifdef LIBMESH_ENABLE_UNIQUE_ID
780  unique_id_tag = mesh.comm().get_unique_tag(1233),
781 #endif // LIBMESH_ENABLE_UNIQUE_ID
782  id_tag = mesh.comm().get_unique_tag(1234),
783  coord_tag = mesh.comm().get_unique_tag(1235);
784 
785  // Post the receives -- do this on processor 0 only.
786  if (this->processor_id() == 0)
787  {
788  for (unsigned int pid=0; pid<this->n_processors(); pid++)
789  {
790  recv_ids[pid].resize(ids_size[pid]);
791  recv_coords[pid].resize(ids_size[pid]*LIBMESH_DIM);
792 #ifdef LIBMESH_ENABLE_UNIQUE_ID
793  recv_unique_ids[pid].resize(ids_size[pid]);
794 #endif // LIBMESH_ENABLE_UNIQUE_ID
795 
796  if (pid == 0)
797  {
798  recv_ids[0] = xfer_ids;
799  recv_coords[0] = xfer_coords;
800 #ifdef LIBMESH_ENABLE_UNIQUE_ID
801  recv_unique_ids[0] = xfer_unique_ids;
802 #endif // LIBMESH_ENABLE_UNIQUE_ID
803  }
804  else
805  {
806  this->comm().receive (pid, recv_ids[pid],
807  id_request_handles[pid-1],
808  id_tag);
809  this->comm().receive (pid, recv_coords[pid],
810  coord_request_handles[pid-1],
811  coord_tag);
812 #ifdef LIBMESH_ENABLE_UNIQUE_ID
813  this->comm().receive (pid, recv_unique_ids[pid],
814  unique_id_request_handles[pid-1],
815  unique_id_tag);
816 #endif // LIBMESH_ENABLE_UNIQUE_ID
817  }
818  }
819  }
820  else
821  {
822  // Send -- do this on all other processors.
823  this->comm().send(0, xfer_ids, id_tag);
824  this->comm().send(0, xfer_coords, coord_tag);
825 #ifdef LIBMESH_ENABLE_UNIQUE_ID
826  this->comm().send(0, xfer_unique_ids, unique_id_tag);
827 #endif // LIBMESH_ENABLE_UNIQUE_ID
828  }
829 
830  // -------------------------------------------------------
831  // Receive the messages and write the output on processor 0.
832  if (this->processor_id() == 0)
833  {
834  // Wait for all the receives to complete. We have no
835  // need for the statuses since we already know the
836  // buffer sizes.
837  Parallel::wait (id_request_handles);
838  Parallel::wait (coord_request_handles);
839 #ifdef LIBMESH_ENABLE_UNIQUE_ID
840  Parallel::wait (unique_id_request_handles);
841 #endif // LIBMESH_ENABLE_UNIQUE_ID
842 
843  // Write the coordinates in this block.
844  std::size_t tot_id_size=0;
845 
846  for (unsigned int pid=0; pid<this->n_processors(); pid++)
847  {
848  tot_id_size += recv_ids[pid].size();
849  libmesh_assert_equal_to(recv_coords[pid].size(),
850  recv_ids[pid].size()*LIBMESH_DIM);
851 #ifdef LIBMESH_ENABLE_UNIQUE_ID
852  libmesh_assert_equal_to
853  (recv_ids[pid].size(), recv_unique_ids[pid].size());
854 #endif // LIBMESH_ENABLE_UNIQUE_ID
855  }
856 
857  libmesh_assert_less_equal
858  (tot_id_size, std::min(io_blksize, std::size_t(n_nodes)));
859 
860  coords.resize (3*tot_id_size);
861 #ifdef LIBMESH_ENABLE_UNIQUE_ID
862  unique_ids.resize(tot_id_size);
863 #endif
864 
865  for (unsigned int pid=0; pid<this->n_processors(); pid++)
866  for (std::size_t idx=0; idx<recv_ids[pid].size(); idx++)
867  {
868  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
869 
870 #ifdef LIBMESH_ENABLE_UNIQUE_ID
871  libmesh_assert_less (local_idx, unique_ids.size());
872 
873  unique_ids[local_idx] = recv_unique_ids[pid][idx];
874 #endif
875 
876  libmesh_assert_less ((3*local_idx+2), coords.size());
877  libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
878 
879  coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
880 #if LIBMESH_DIM > 1
881  coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
882 #else
883  coords[3*local_idx+1] = 0.;
884 #endif
885 #if LIBMESH_DIM > 2
886  coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
887 #else
888  coords[3*local_idx+2] = 0.;
889 #endif
890 
891  n_written++;
892  }
893 
894  io.data_stream (coords.empty() ? libmesh_nullptr : &coords[0],
895  cast_int<unsigned int>(coords.size()), 3);
896  }
897  }
898 
899  if (this->processor_id() == 0)
900  libmesh_assert_equal_to (n_written, n_nodes);
901 
902 #ifdef LIBMESH_ENABLE_UNIQUE_ID
903  // XDR unsigned char doesn't work as anticipated
904  unsigned short write_unique_ids = 1;
905 #else
906  unsigned short write_unique_ids = 0;
907 #endif
908  if (this->processor_id() == 0)
909  io.data (write_unique_ids, "# presence of unique ids");
910 
911 #ifdef LIBMESH_ENABLE_UNIQUE_ID
912  n_written = 0;
913 
914  for (std::size_t blk=0, last_node=0; last_node<n_nodes; blk++)
915  {
916  const std::size_t first_node = blk*io_blksize;
917  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
918 
919  // Build up the xfer buffers on each processor
920  MeshBase::const_node_iterator
921  it = mesh.local_nodes_begin(),
922  end = mesh.local_nodes_end();
923 
924  xfer_ids.clear();
925  xfer_unique_ids.clear();
926 
927  for (; it!=end; ++it)
928  if (((*it)->id() >= first_node) && // node in [first_node, last_node)
929  ((*it)->id() < last_node))
930  {
931  xfer_ids.push_back((*it)->id());
932  xfer_unique_ids.push_back((*it)->unique_id());
933  }
934 
935  //-------------------------------------
936  // Send the xfer buffers to processor 0
937  std::vector<std::size_t> ids_size;
938 
939  const std::size_t my_ids_size = xfer_ids.size();
940 
941  // explicitly gather ids_size
942  this->comm().gather (0, my_ids_size, ids_size);
943 
944  // We will have lots of simultaneous receives if we are
945  // processor 0, so let's use nonblocking receives.
946  std::vector<Parallel::Request>
947  unique_id_request_handles(this->n_processors()-1),
948  id_request_handles(this->n_processors()-1);
949 
950  Parallel::MessageTag
951  unique_id_tag = mesh.comm().get_unique_tag(1236),
952  id_tag = mesh.comm().get_unique_tag(1237);
953 
954  // Post the receives -- do this on processor 0 only.
955  if (this->processor_id() == 0)
956  {
957  for (unsigned int pid=0; pid<this->n_processors(); pid++)
958  {
959  recv_ids[pid].resize(ids_size[pid]);
960  recv_unique_ids[pid].resize(ids_size[pid]);
961 
962  if (pid == 0)
963  {
964  recv_ids[0] = xfer_ids;
965  recv_unique_ids[0] = xfer_unique_ids;
966  }
967  else
968  {
969  this->comm().receive (pid, recv_ids[pid],
970  id_request_handles[pid-1],
971  id_tag);
972  this->comm().receive (pid, recv_unique_ids[pid],
973  unique_id_request_handles[pid-1],
974  unique_id_tag);
975  }
976  }
977  }
978  else
979  {
980  // Send -- do this on all other processors.
981  this->comm().send(0, xfer_ids, id_tag);
982  this->comm().send(0, xfer_unique_ids, unique_id_tag);
983  }
984 
985  // -------------------------------------------------------
986  // Receive the messages and write the output on processor 0.
987  if (this->processor_id() == 0)
988  {
989  // Wait for all the receives to complete. We have no
990  // need for the statuses since we already know the
991  // buffer sizes.
992  Parallel::wait (id_request_handles);
993  Parallel::wait (unique_id_request_handles);
994 
995  // Write the unique ids in this block.
996  std::size_t tot_id_size=0;
997 
998  for (unsigned int pid=0; pid<this->n_processors(); pid++)
999  {
1000  tot_id_size += recv_ids[pid].size();
1001  libmesh_assert_equal_to
1002  (recv_ids[pid].size(), recv_unique_ids[pid].size());
1003  }
1004 
1005  libmesh_assert_less_equal
1006  (tot_id_size, std::min(io_blksize, std::size_t(n_nodes)));
1007 
1008  unique_ids.resize(tot_id_size);
1009 
1010  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1011  for (std::size_t idx=0; idx<recv_ids[pid].size(); idx++)
1012  {
1013  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1014 
1015  libmesh_assert_less (local_idx, unique_ids.size());
1016 
1017  unique_ids[local_idx] = recv_unique_ids[pid][idx];
1018 
1019  n_written++;
1020  }
1021 
1022  io.data_stream (unique_ids.empty() ? libmesh_nullptr : &unique_ids[0],
1023  cast_int<unsigned int>(unique_ids.size()), 1);
1024  }
1025  }
1026 
1027  if (this->processor_id() == 0)
1028  libmesh_assert_equal_to (n_written, n_nodes);
1029 
1030 #endif // LIBMESH_ENABLE_UNIQUE_ID
1031 }
Status wait(Request &r)
Definition: parallel.h:561
processor_id_type n_processors() const
const class libmesh_nullptr_t libmesh_nullptr
IterBase * end
const MT & mesh() const
Definition: mesh_output.h:216
const dof_id_type n_nodes
Definition: tecplot_io.C:67
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
static const std::size_t io_blksize
Definition: xdr_io.h:320
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
const Parallel::Communicator & comm() const
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
long double min(long double a, double b)
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
processor_id_type processor_id() const
void libMesh::XdrIO::write_serialized_nodesets ( Xdr io,
const header_id_type  n_nodesets 
) const
private

Write the boundary conditions for a parallel, distributed mesh

Definition at line 1189 of file xdr_io.C.

References bc_id, libMesh::BoundaryInfo::boundary_ids(), libMesh::ParallelObject::comm(), libMesh::Xdr::data(), libMesh::Xdr::data_stream(), end, libMesh::Parallel::Communicator::gather(), libMesh::MeshTools::Generation::Private::idx(), libMesh::BoundaryInfo::invalid_id, libMesh::libmesh_assert(), libmesh_nullptr, libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), libMesh::ParallelObject::n_processors(), libMesh::ParallelObject::processor_id(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), write_serialized_bc_names(), and libMesh::Xdr::writing().

Referenced by polynomial_level_file_name(), and write().

1190 {
1191  libmesh_assert (io.writing());
1192 
1193  // convenient reference to our mesh
1194  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1195 
1196  // and our boundary info object
1197  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1198 
1199  // Version 0.9.2+ introduces entity names
1200  write_serialized_bc_names(io, boundary_info, false); // nodeset names
1201 
1202  header_id_type n_nodesets_out = n_nodesets;
1203  if (this->processor_id() == 0)
1204  io.data (n_nodesets_out, "# number of nodesets");
1205  n_nodesets_out = 0;
1206 
1207  if (!n_nodesets) return;
1208 
1209  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1210  std::vector<std::size_t> bc_sizes(this->n_processors());
1211 
1212  // Container to catch boundary IDs handed back by BoundaryInfo
1213  std::vector<boundary_id_type> nodeset_ids;
1214 
1215  MeshBase::const_node_iterator
1216  it = mesh.local_nodes_begin(),
1217  end = mesh.local_nodes_end();
1218 
1219  dof_id_type n_node=0;
1220  for (; it!=end; ++it)
1221  {
1222  const Node * node = *it;
1223  boundary_info.boundary_ids (node, nodeset_ids);
1224  for (std::vector<boundary_id_type>::const_iterator id_it=nodeset_ids.begin(); id_it!=nodeset_ids.end(); ++id_it)
1225  {
1226  const boundary_id_type bc_id = *id_it;
1227  if (bc_id != BoundaryInfo::invalid_id)
1228  {
1229  xfer_bcs.push_back ((*it)->id());
1230  xfer_bcs.push_back (bc_id);
1231  }
1232  }
1233  }
1234 
1235  xfer_bcs.push_back(n_node);
1236  std::size_t my_size = xfer_bcs.size();
1237  this->comm().gather (0, my_size, bc_sizes);
1238 
1239  // All processors send their xfer buffers to processor 0
1240  // Processor 0 will receive all buffers and write out the bcs
1241  if (this->processor_id() == 0)
1242  {
1243  dof_id_type node_offset = 0;
1244  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1245  {
1246  recv_bcs.resize(bc_sizes[pid]);
1247  if (pid == 0)
1248  recv_bcs = xfer_bcs;
1249  else
1250  this->comm().receive (pid, recv_bcs);
1251 
1252  const dof_id_type my_n_node =
1253  cast_int<dof_id_type>(recv_bcs.back());
1254  recv_bcs.pop_back();
1255 
1256  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 2, n_nodesets_out++)
1257  recv_bcs[idx+0] += node_offset;
1258 
1259  io.data_stream (recv_bcs.empty() ? libmesh_nullptr : &recv_bcs[0],
1260  cast_int<unsigned int>(recv_bcs.size()), 2);
1261  node_offset += my_n_node;
1262  }
1263  libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
1264  }
1265  else
1266  this->comm().send (0, xfer_bcs);
1267 }
processor_id_type n_processors() const
const class libmesh_nullptr_t libmesh_nullptr
IterBase * end
const MT & mesh() const
Definition: mesh_output.h:216
boundary_id_type bc_id
Definition: xdr_io.C:50
libmesh_assert(j)
int8_t boundary_id_type
Definition: id_types.h:51
static const boundary_id_type invalid_id
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Definition: xdr_io.C:1271
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
uint32_t header_id_type
Definition: xdr_io.h:60
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
const Parallel::Communicator & comm() const
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
uint8_t dof_id_type
Definition: id_types.h:64
processor_id_type processor_id() const
void libMesh::XdrIO::write_serialized_shellface_bcs ( Xdr io,
const header_id_type  n_shellface_bcs 
) const
private

Write the "shell face" boundary conditions for a parallel, distributed mesh. NEW in 1.1.0 format.

Definition at line 1182 of file xdr_io.C.

References write_serialized_bcs_helper().

Referenced by polynomial_level_file_name(), and write().

1183 {
1184  write_serialized_bcs_helper(io, n_shellface_bcs, "shellface");
1185 }
void write_serialized_bcs_helper(Xdr &io, const header_id_type n_side_bcs, const std::string bc_type) const
Definition: xdr_io.C:1035
void libMesh::XdrIO::write_serialized_side_bcs ( Xdr io,
const header_id_type  n_side_bcs 
) const
private

Write the side boundary conditions for a parallel, distributed mesh

Definition at line 1168 of file xdr_io.C.

References write_serialized_bcs_helper().

Referenced by polynomial_level_file_name(), and write().

1169 {
1170  write_serialized_bcs_helper(io, n_side_bcs, "side");
1171 }
void write_serialized_bcs_helper(Xdr &io, const header_id_type n_side_bcs, const std::string bc_type) const
Definition: xdr_io.C:1035
void libMesh::XdrIO::write_serialized_subdomain_names ( Xdr io) const
private

Write subdomain name information - NEW in 0.9.2 format

Definition at line 304 of file xdr_io.C.

References libMesh::Xdr::data(), libMesh::MeshBase::get_subdomain_name_map(), libMesh::MeshInput< MeshBase >::mesh(), libMesh::MeshOutput< MT >::mesh(), and libMesh::ParallelObject::processor_id().

Referenced by polynomial_level_file_name(), and write().

305 {
306  if (this->processor_id() == 0)
307  {
308  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
309 
310  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
311 
312  std::vector<header_id_type> subdomain_ids; subdomain_ids.reserve(subdomain_map.size());
313  std::vector<std::string> subdomain_names; subdomain_names.reserve(subdomain_map.size());
314 
315  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
316  // return writable references in mesh_base, it's possible for the user to leave some entity names
317  // blank. We can't write those to the XDA file.
318  header_id_type n_subdomain_names = 0;
319  std::map<subdomain_id_type, std::string>::const_iterator it_end = subdomain_map.end();
320  for (std::map<subdomain_id_type, std::string>::const_iterator it = subdomain_map.begin(); it != it_end; ++it)
321  {
322  if (!it->second.empty())
323  {
324  n_subdomain_names++;
325  subdomain_ids.push_back(it->first);
326  subdomain_names.push_back(it->second);
327  }
328  }
329 
330  io.data(n_subdomain_names, "# subdomain id to name map");
331  // Write out the ids and names in two vectors
332  if (n_subdomain_names)
333  {
334  io.data(subdomain_ids);
335  io.data(subdomain_names);
336  }
337  }
338 }
const MT & mesh() const
Definition: mesh_output.h:216
uint32_t header_id_type
Definition: xdr_io.h:60
processor_id_type processor_id() const

Member Data Documentation

std::string libMesh::XdrIO::_bc_file_name
private

Definition at line 312 of file xdr_io.h.

Referenced by boundary_condition_file_name().

bool libMesh::XdrIO::_binary
private

Definition at line 305 of file xdr_io.h.

Referenced by binary().

header_id_type libMesh::XdrIO::_field_width
private

Definition at line 310 of file xdr_io.h.

Referenced by read(), and read_serialized_nodes().

const bool libMesh::MeshOutput< MeshBase >::_is_parallel_format
protectedinherited

Flag specifying whether this format is parallel-capable. If this is false (default) I/O is only permitted when the mesh has been serialized.

Definition at line 141 of file mesh_output.h.

Referenced by libMesh::FroIO::write(), libMesh::PostscriptIO::write(), and libMesh::EnsightIO::write().

bool libMesh::XdrIO::_legacy
private

Definition at line 306 of file xdr_io.h.

Referenced by legacy().

std::string libMesh::XdrIO::_p_level_file
private

Definition at line 315 of file xdr_io.h.

Referenced by polynomial_level_file_name().

std::string libMesh::XdrIO::_partition_map_file
private

Definition at line 313 of file xdr_io.h.

Referenced by partition_map_file_name().

const bool libMesh::MeshOutput< MeshBase >::_serial_only_needed_on_proc_0
protectedinherited

Flag specifying whether this format can be written by only serializing the mesh to processor zero

If this is false (default) the mesh will be serialized to all processors

Definition at line 150 of file mesh_output.h.

std::string libMesh::XdrIO::_subdomain_map_file
private

Definition at line 314 of file xdr_io.h.

Referenced by subdomain_map_file_name().

std::string libMesh::XdrIO::_version
private

Definition at line 311 of file xdr_io.h.

Referenced by version().

bool libMesh::XdrIO::_write_parallel
private

Definition at line 308 of file xdr_io.h.

Referenced by set_auto_parallel(), set_write_parallel(), and write_parallel().

bool libMesh::XdrIO::_write_serial
private

Definition at line 307 of file xdr_io.h.

Referenced by set_auto_parallel(), set_write_parallel(), and write_parallel().

bool libMesh::XdrIO::_write_unique_id
private

Definition at line 309 of file xdr_io.h.

Referenced by write(), and write_serialized_connectivity().

const std::size_t libMesh::XdrIO::io_blksize = 128000
staticprivate

Define the block size to use for chunked IO.

Definition at line 320 of file xdr_io.h.

Referenced by read_serialized_bcs_helper(), read_serialized_connectivity(), read_serialized_nodes(), read_serialized_nodesets(), and write_serialized_nodes().


The documentation for this class was generated from the following files: