nemesis_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2018 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 // C++ includes
20 #include <numeric> // std::accumulate
21 
22 // LibMesh includes
24 #include "libmesh/elem.h"
25 #include "libmesh/exodusII_io.h"
27 #include "libmesh/nemesis_io.h"
29 #include "libmesh/node.h"
30 #include "libmesh/parallel.h"
31 #include "libmesh/utility.h" // is_sorted, deallocate
32 #include "libmesh/boundary_info.h"
34 #include "libmesh/fe_type.h"
36 #include "libmesh/numeric_vector.h"
37 
38 namespace libMesh
39 {
40 
41 
42 //-----------------------------------------------
43 // anonymous namespace for implementation details
44 namespace {
45 struct CompareGlobalIdxMappings
46 {
47  // strict weak ordering for a.first -> a.second mapping. since we can only map to one
48  // value only order the first entry
49  bool operator()(const std::pair<unsigned int, unsigned int> & a,
50  const std::pair<unsigned int, unsigned int> & b) const
51  { return a.first < b.first; }
52 
53  // strict weak ordering for a.first -> a.second mapping. lookups will
54  // be in terms of a single integer, which is why we need this method.
55  bool operator()(const std::pair<unsigned int, unsigned int> & a,
56  const unsigned int b) const
57  { return a.first < b; }
58 };
59 
60 // Nemesis & ExodusII use int for all integer values, even the ones which
61 // should never be negative. we like to use unsigned as a force of habit,
62 // this trivial little method saves some typing & also makes sure something
63 // is not horribly wrong.
64 template <typename T>
65 inline unsigned int to_uint ( const T & t )
66 {
67  libmesh_assert_equal_to (t, static_cast<T>(static_cast<unsigned int>(t)));
68 
69  return static_cast<unsigned int>(t);
70 }
71 
72 // test equality for a.first -> a.second mapping. since we can only map to one
73 // value only test the first entry
74 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API) && !defined(NDEBUG)
75 inline bool global_idx_mapping_equality (const std::pair<unsigned int, unsigned int> & a,
76  const std::pair<unsigned int, unsigned int> & b)
77 {
78  return a.first == b.first;
79 }
80 #endif
81 
82 }
83 
84 
85 
86 // ------------------------------------------------------------
87 // Nemesis_IO class members
89 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
90  bool single_precision
91 #else
92  bool
93 #endif
94  ) :
95  MeshInput<MeshBase> (mesh, /*is_parallel_format=*/true),
96  MeshOutput<MeshBase> (mesh, /*is_parallel_format=*/true),
98 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
99  nemhelper(new Nemesis_IO_Helper(*this, false, single_precision)),
100  _timestep(1),
101 #endif
102  _verbose (false),
103  _append(false),
104  _allow_empty_variables(false)
105 {
106 }
107 
108 
109 
110 // Destructor. Defined in the C file so we can be sure to get away
111 // with a forward declaration of Nemesis_IO_Helper in the header file.
113 {
114 }
115 
116 
117 
118 void Nemesis_IO::verbose (bool set_verbosity)
119 {
120  _verbose = set_verbosity;
121 
122 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
123  // Set the verbose flag in the helper object
124  // as well.
125  nemhelper->verbose = _verbose;
126 #endif
127 }
128 
129 
130 
131 void Nemesis_IO::append(bool val)
132 {
133  _append = val;
134 }
135 
136 
137 
138 void Nemesis_IO::set_output_variables(const std::vector<std::string> & output_variables,
139  bool allow_empty)
140 {
141  _output_variables = output_variables;
142  _allow_empty_variables = allow_empty;
143 }
144 
145 
146 
147 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
148 void Nemesis_IO::read (const std::string & base_filename)
149 {
150  // On one processor, Nemesis and ExodusII should be equivalent, so
151  // let's cowardly defer to that implementation...
152  if (this->n_processors() == 1)
153  {
154  // We can do this in one line but if the verbose flag was set in this
155  // object, it will no longer be set... thus no extra print-outs for serial runs.
156  // ExodusII_IO(this->mesh()).read (base_filename); // ambiguous when Nemesis_IO is multiply-inherited
157 
159  ExodusII_IO(mesh).read (base_filename);
160  return;
161  }
162 
163  LOG_SCOPE ("read()","Nemesis_IO");
164 
165  // This function must be run on all processors at once
166  parallel_object_only();
167 
168  if (_verbose)
169  {
170  libMesh::out << "[" << this->processor_id() << "] ";
171  libMesh::out << "Reading Nemesis file on processor: " << this->processor_id() << std::endl;
172  }
173 
174  // Construct the Nemesis filename based on the number of processors and the
175  // current processor ID.
176  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
177 
178  if (_verbose)
179  libMesh::out << "Opening file: " << nemesis_filename << std::endl;
180 
181  // Open the Exodus file in EX_READ mode
182  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/true);
183 
184  // Get a reference to the mesh. We need to be specific
185  // since Nemesis_IO is multiply-inherited
186  // MeshBase & mesh = this->mesh();
188 
189  // We're reading a file on each processor, so our mesh is
190  // partitioned into that many parts as it's created
191  this->set_n_partitions(this->n_processors());
192 
193  // Local information: Read the following information from the standard Exodus header
194  // title[0]
195  // num_dim
196  // num_nodes
197  // num_elem
198  // num_elem_blk
199  // num_node_sets
200  // num_side_sets
201  nemhelper->read_header();
202  nemhelper->print_header();
203 
204  // Get global information: number of nodes, elems, blocks, nodesets and sidesets
205  nemhelper->get_init_global();
206 
207  // Get "load balance" information. This includes the number of internal & border
208  // nodes and elements as well as the number of communication maps.
209  nemhelper->get_loadbal_param();
210 
211  // Do some error checking
212  if (nemhelper->num_external_nodes)
213  libmesh_error_msg("ERROR: there should be no external nodes in an element-based partitioning!");
214 
215  libmesh_assert_equal_to (nemhelper->num_nodes,
216  (nemhelper->num_internal_nodes +
217  nemhelper->num_border_nodes));
218 
219  libmesh_assert_equal_to (nemhelper->num_elem,
220  (nemhelper->num_internal_elems +
221  nemhelper->num_border_elems));
222 
223  libmesh_assert_less_equal (nemhelper->num_nodes, nemhelper->num_nodes_global);
224  libmesh_assert_less_equal (nemhelper->num_elem, nemhelper->num_elems_global);
225 
226  // Read nodes from the exodus file: this fills the nemhelper->x,y,z arrays.
227  nemhelper->read_nodes();
228 
229  // Reads the nemhelper->node_num_map array, node_num_map[i] is the global node number for
230  // local node number i.
231  nemhelper->read_node_num_map();
232 
233  // The get_cmap_params() function reads in the:
234  // node_cmap_ids[],
235  // node_cmap_node_cnts[],
236  // elem_cmap_ids[],
237  // elem_cmap_elem_cnts[],
238  nemhelper->get_cmap_params();
239 
240  // Read the IDs of the interior, boundary, and external nodes. This function
241  // fills the vectors:
242  // node_mapi[],
243  // node_mapb[],
244  // node_mape[]
245  nemhelper->get_node_map();
246 
247  // Read each node communication map for this processor. This function
248  // fills the vectors of vectors named:
249  // node_cmap_node_ids[][]
250  // node_cmap_proc_ids[][]
251  nemhelper->get_node_cmap();
252 
253  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_cnts.size());
254  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_ids.size());
255  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_proc_ids.size());
256 
257 #ifndef NDEBUG
258  // We expect the communication maps to be symmetric - e.g. if processor i thinks it
259  // communicates with processor j, then processor j should also be expecting to
260  // communicate with i. We can assert that here easily enough with an alltoall,
261  // but let's only do it when not in optimized mode to limit unnecessary communication.
262  {
263  std::vector<unsigned char> pid_send_partner (this->n_processors(), 0);
264 
265  // strictly speaking, we should expect to communicate with ourself...
266  pid_send_partner[this->processor_id()] = 1;
267 
268  // mark each processor id we reference with a node cmap
269  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
270  {
271  libmesh_assert_less (to_uint(nemhelper->node_cmap_ids[cmap]), this->n_processors());
272 
273  pid_send_partner[nemhelper->node_cmap_ids[cmap]] = 1;
274  }
275 
276  // Copy the send pairing so we can catch the receive paring and
277  // test for equality
278  const std::vector<unsigned char> pid_recv_partner (pid_send_partner);
279 
280  this->comm().alltoall (pid_send_partner);
281 
282  libmesh_assert (pid_send_partner == pid_recv_partner);
283  }
284 #endif
285 
286  // We now have enough information to infer node ownership. We start by assuming
287  // we own all the nodes on this processor. We will then interrogate the
288  // node cmaps and see if a lower-rank processor is associated with any of
289  // our nodes. If so, then that processor owns the node, not us...
290  std::vector<processor_id_type> node_ownership (nemhelper->num_internal_nodes +
291  nemhelper->num_border_nodes,
292  this->processor_id());
293 
294  // a map from processor id to cmap number, to be used later
295  std::map<unsigned int, unsigned int> pid_to_cmap_map;
296 
297  // For each node_cmap...
298  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
299  {
300  // Good time for error checking...
301  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
302  nemhelper->node_cmap_node_ids[cmap].size());
303 
304  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
305  nemhelper->node_cmap_proc_ids[cmap].size());
306 
307  // In all the samples I have seen, node_cmap_ids[cmap] is the processor
308  // rank of the remote processor...
309  const processor_id_type adjcnt_pid_idx =
310  cast_int<processor_id_type>(nemhelper->node_cmap_ids[cmap]);
311 
312  libmesh_assert_less (adjcnt_pid_idx, this->n_processors());
313  libmesh_assert_not_equal_to (adjcnt_pid_idx, this->processor_id());
314 
315  // We only expect one cmap per adjacent processor
316  libmesh_assert (!pid_to_cmap_map.count(adjcnt_pid_idx));
317 
318  pid_to_cmap_map[adjcnt_pid_idx] = cmap;
319 
320  // ...and each node in that cmap...
321  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
322  {
323  // Are the node_cmap_ids and node_cmap_proc_ids really redundant?
324  libmesh_assert_equal_to
325  (adjcnt_pid_idx,
326  cast_int<processor_id_type>(nemhelper->node_cmap_proc_ids[cmap][idx]));
327 
328  // we are expecting the exodus node numbering to be 1-based...
329  const unsigned int local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1;
330 
331  libmesh_assert_less (local_node_idx, node_ownership.size());
332 
333  // if the adjacent processor is lower rank than the current
334  // owner for this node, then it will get the node...
335  node_ownership[local_node_idx] =
336  std::min(node_ownership[local_node_idx], adjcnt_pid_idx);
337  }
338  } // We now should have established proper node ownership.
339 
340  // now that ownership is established, we can figure out how many nodes we
341  // will be responsible for numbering.
342  unsigned int num_nodes_i_must_number = 0;
343 
344  for (std::size_t idx=0; idx<node_ownership.size(); idx++)
345  if (node_ownership[idx] == this->processor_id())
346  num_nodes_i_must_number++;
347 
348  // more error checking...
349  libmesh_assert_greater_equal (num_nodes_i_must_number, to_uint(nemhelper->num_internal_nodes));
350  libmesh_assert (num_nodes_i_must_number <= to_uint(nemhelper->num_internal_nodes +
351  nemhelper->num_border_nodes));
352  if (_verbose)
353  libMesh::out << "[" << this->processor_id() << "] "
354  << "num_nodes_i_must_number="
355  << num_nodes_i_must_number
356  << std::endl;
357 
358  // The call to get_loadbal_param() gets 7 pieces of information. We allgather
359  // these now across all processors to determine some global numberings. We should
360  // also gather the number of nodes each processor thinks it will number so that
361  // we can (i) determine our offset, and (ii) do some error checking.
362  std::vector<int> all_loadbal_data ( 8 );
363  all_loadbal_data[0] = nemhelper->num_internal_nodes;
364  all_loadbal_data[1] = nemhelper->num_border_nodes;
365  all_loadbal_data[2] = nemhelper->num_external_nodes;
366  all_loadbal_data[3] = nemhelper->num_internal_elems;
367  all_loadbal_data[4] = nemhelper->num_border_elems;
368  all_loadbal_data[5] = nemhelper->num_node_cmaps;
369  all_loadbal_data[6] = nemhelper->num_elem_cmaps;
370  all_loadbal_data[7] = num_nodes_i_must_number;
371 
372  this->comm().allgather (all_loadbal_data, /* identical_buffer_sizes = */ true);
373 
374  // OK, we are now in a position to request new global indices for all the nodes
375  // we do not own
376 
377  // Let's get a unique message tag to use for send()/receive()
378  Parallel::MessageTag nodes_tag = mesh.comm().get_unique_tag(12345);
379 
380  std::vector<std::vector<int>>
381  needed_node_idxs (nemhelper->num_node_cmaps); // the indices we will ask for
382 
383  std::vector<Parallel::Request>
384  needed_nodes_requests (nemhelper->num_node_cmaps);
385 
386  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
387  {
388  // We know we will need no more indices than there are nodes
389  // in this cmap, but that number is an upper bound in general
390  // since the neighboring processor associated with the cmap
391  // may not actually own it
392  needed_node_idxs[cmap].reserve (nemhelper->node_cmap_node_cnts[cmap]);
393 
394  const unsigned int adjcnt_pid_idx = nemhelper->node_cmap_ids[cmap];
395 
396  // ...and each node in that cmap...
397  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
398  {
399  const unsigned int
400  local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1,
401  owning_pid_idx = node_ownership[local_node_idx];
402 
403  // add it to the request list for its owning processor.
404  if (owning_pid_idx == adjcnt_pid_idx)
405  {
406  const unsigned int
407  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
408  needed_node_idxs[cmap].push_back(global_node_idx);
409  }
410  }
411  // now post the send for this cmap
412  this->comm().send (adjcnt_pid_idx, // destination
413  needed_node_idxs[cmap], // send buffer
414  needed_nodes_requests[cmap], // request
415  nodes_tag);
416  } // all communication requests for getting updated global indices for border
417  // nodes have been initiated
418 
419  // Figure out how many nodes each processor thinks it will number and make sure
420  // that it adds up to the global number of nodes. Also, set up global node
421  // index offsets for each processor.
422  std::vector<unsigned int>
423  all_num_nodes_i_must_number (this->n_processors());
424 
425  for (unsigned int pid=0; pid<this->n_processors(); pid++)
426  all_num_nodes_i_must_number[pid] = all_loadbal_data[8*pid + 7];
427 
428  // The sum of all the entries in this vector should sum to the number of global nodes
429  libmesh_assert (std::accumulate(all_num_nodes_i_must_number.begin(),
430  all_num_nodes_i_must_number.end(),
431  0) == nemhelper->num_nodes_global);
432 
433  unsigned int my_next_node = 0;
434  for (unsigned int pid=0; pid<this->processor_id(); pid++)
435  my_next_node += all_num_nodes_i_must_number[pid];
436 
437  const unsigned int my_node_offset = my_next_node;
438 
439  if (_verbose)
440  libMesh::out << "[" << this->processor_id() << "] "
441  << "my_node_offset="
442  << my_node_offset
443  << std::endl;
444 
445  // Add internal nodes to the DistributedMesh, using the node ID offset we
446  // computed and the current processor's ID.
447  for (unsigned int i=0; i<to_uint(nemhelper->num_internal_nodes); ++i)
448  {
449  const unsigned int local_node_idx = nemhelper->node_mapi[i]-1;
450 #ifndef NDEBUG
451  const unsigned int owning_pid_idx = node_ownership[local_node_idx];
452 #endif
453 
454  // an internal node we do not own? huh??
455  libmesh_assert_equal_to (owning_pid_idx, this->processor_id());
456  libmesh_assert_less (my_next_node, to_uint(nemhelper->num_nodes_global));
457 
458  // "Catch" the node pointer after addition, make sure the
459  // ID matches the requested value.
460  Node * added_node =
461  mesh.add_point (Point(nemhelper->x[local_node_idx],
462  nemhelper->y[local_node_idx],
463  nemhelper->z[local_node_idx]),
464  my_next_node,
465  this->processor_id());
466 
467  // Make sure the node we added has the ID we thought it would
468  if (added_node->id() != my_next_node)
469  {
470  libMesh::err << "Error, node added with ID " << added_node->id()
471  << ", but we wanted ID " << my_next_node << std::endl;
472  }
473 
474  // update the local->global index map, when we are done
475  // it will be 0-based.
476  nemhelper->node_num_map[local_node_idx] = my_next_node++;
477  }
478 
479  // Now, for the boundary nodes... We may very well own some of them,
480  // but there may be others for which we have requested the new global
481  // id. We expect to be asked for the ids of the ones we own, so
482  // we need to create a map from the old global id to the new one
483  // we are about to create.
484  typedef std::vector<std::pair<unsigned int, unsigned int>> global_idx_mapping_type;
485  global_idx_mapping_type old_global_to_new_global_map;
486  old_global_to_new_global_map.reserve (num_nodes_i_must_number // total # i will have
487  - (my_next_node // amount i have thus far
488  - my_node_offset)); // this should be exact!
489  CompareGlobalIdxMappings global_idx_mapping_comp;
490 
491  for (unsigned int i=0; i<to_uint(nemhelper->num_border_nodes); ++i)
492  {
493  const unsigned int
494  local_node_idx = nemhelper->node_mapb[i]-1,
495  owning_pid_idx = node_ownership[local_node_idx];
496 
497  // if we own it...
498  if (owning_pid_idx == this->processor_id())
499  {
500  const unsigned int
501  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
502 
503  // we will number it, and create a mapping from its old global index to
504  // the new global index, for lookup purposes when neighbors come calling
505  old_global_to_new_global_map.push_back(std::make_pair(global_node_idx,
506  my_next_node));
507 
508  // "Catch" the node pointer after addition, make sure the
509  // ID matches the requested value.
510  Node * added_node =
511  mesh.add_point (Point(nemhelper->x[local_node_idx],
512  nemhelper->y[local_node_idx],
513  nemhelper->z[local_node_idx]),
514  my_next_node,
515  this->processor_id());
516 
517  // Make sure the node we added has the ID we thought it would
518  if (added_node->id() != my_next_node)
519  {
520  libMesh::err << "Error, node added with ID " << added_node->id()
521  << ", but we wanted ID " << my_next_node << std::endl;
522  }
523 
524  // update the local->global index map, when we are done
525  // it will be 0-based.
526  nemhelper->node_num_map[local_node_idx] = my_next_node++;
527  }
528  }
529  // That should cover numbering all the nodes which belong to us...
530  libmesh_assert_equal_to (num_nodes_i_must_number, (my_next_node - my_node_offset));
531 
532  // Let's sort the mapping so we can efficiently answer requests
533  std::sort (old_global_to_new_global_map.begin(),
534  old_global_to_new_global_map.end(),
535  global_idx_mapping_comp);
536 
537  // and it had better be unique...
538  libmesh_assert (std::unique (old_global_to_new_global_map.begin(),
539  old_global_to_new_global_map.end(),
540  global_idx_mapping_equality) ==
541  old_global_to_new_global_map.end());
542 
543  // We can now catch incoming requests and process them. for efficiency
544  // let's do whatever is available next
545  std::map<unsigned int, std::vector<int>> requested_node_idxs; // the indices asked of us
546 
547  std::vector<Parallel::Request> requested_nodes_requests(nemhelper->num_node_cmaps);
548 
549  // We know we will receive the request from a given processor before
550  // we receive its reply to our request. However, we may receive
551  // a request and a response from one processor before getting
552  // a request from another processor. So what we are doing here
553  // is processing whatever message comes next, while recognizing
554  // we will receive a request from a processor before receiving
555  // its reply
556  std::vector<bool> processed_cmap (nemhelper->num_node_cmaps, false);
557 
558  for (unsigned int comm_step=0; comm_step<2*to_uint(nemhelper->num_node_cmaps); comm_step++)
559  {
560  // query the first message which is available
561  const Parallel::Status
562  status (this->comm().probe (Parallel::any_source,
563  nodes_tag));
564  const unsigned int
565  requesting_pid_idx = status.source(),
566  source_pid_idx = status.source();
567 
568  // this had better be from a processor we are expecting...
569  libmesh_assert (pid_to_cmap_map.count(requesting_pid_idx));
570 
571  // the local cmap which corresponds to the source processor
572  const unsigned int cmap = pid_to_cmap_map[source_pid_idx];
573 
574  if (!processed_cmap[cmap])
575  {
576  processed_cmap[cmap] = true;
577 
578  // we should only get one request per paired processor
579  libmesh_assert (!requested_node_idxs.count(requesting_pid_idx));
580 
581  // get a reference to the request buffer for this processor to
582  // avoid repeated map lookups
583  std::vector<int> & xfer_buf (requested_node_idxs[requesting_pid_idx]);
584 
585  // actually receive the message.
586  this->comm().receive (requesting_pid_idx, xfer_buf, nodes_tag);
587 
588  // Fill the request
589  for (std::size_t i=0; i<xfer_buf.size(); i++)
590  {
591  // the requested old global node index, *now 0-based*
592  const unsigned int old_global_node_idx = xfer_buf[i];
593 
594  // find the new global node index for the requested node -
595  // note that requesting_pid_idx thinks we own this node,
596  // so we better!
597  const global_idx_mapping_type::const_iterator it =
598  std::lower_bound (old_global_to_new_global_map.begin(),
599  old_global_to_new_global_map.end(),
600  old_global_node_idx,
601  global_idx_mapping_comp);
602 
603  libmesh_assert (it != old_global_to_new_global_map.end());
604  libmesh_assert_equal_to (it->first, old_global_node_idx);
605  libmesh_assert_greater_equal (it->second, my_node_offset);
606  libmesh_assert_less (it->second, my_next_node);
607 
608  // overwrite the requested old global node index with the new global index
609  xfer_buf[i] = it->second;
610  }
611 
612  // and send the new global indices back to the processor which asked for them
613  this->comm().send (requesting_pid_idx,
614  xfer_buf,
615  requested_nodes_requests[cmap],
616  nodes_tag);
617  } // done processing the request
618 
619  // this is the second time we have heard from this processor,
620  // so it must be its reply to our request
621  else
622  {
623  // a long time ago, we sent off our own requests. now it is time to catch the
624  // replies and get the new global node numbering. note that for any reply
625  // we receive, the corresponding nonblocking send from above *must* have been
626  // completed, since the reply is in response to that request!!
627 
628  // if we have received a reply, our send *must* have completed
629  // (note we never actually need to wait on the request)
630  libmesh_assert (needed_nodes_requests[cmap].test());
631  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_ids[cmap]), source_pid_idx);
632 
633  // now post the receive for this cmap
634  this->comm().receive (source_pid_idx,
635  needed_node_idxs[cmap],
636  nodes_tag);
637 
638  libmesh_assert_less_equal (needed_node_idxs[cmap].size(),
639  nemhelper->node_cmap_node_ids[cmap].size());
640 
641  for (std::size_t i=0, j=0; i<nemhelper->node_cmap_node_ids[cmap].size(); i++)
642  {
643  const unsigned int
644  local_node_idx = nemhelper->node_cmap_node_ids[cmap][i]-1,
645  owning_pid_idx = node_ownership[local_node_idx];
646 
647  // if this node is owned by source_pid_idx, its new global id
648  // is in the buffer we just received
649  if (owning_pid_idx == source_pid_idx)
650  {
651  libmesh_assert_less (j, needed_node_idxs[cmap].size());
652 
653  const unsigned int // now 0-based!
654  global_node_idx = needed_node_idxs[cmap][j++];
655 
656  // "Catch" the node pointer after addition, make sure the
657  // ID matches the requested value.
658  Node * added_node =
659  mesh.add_point (Point(nemhelper->x[local_node_idx],
660  nemhelper->y[local_node_idx],
661  nemhelper->z[local_node_idx]),
662  cast_int<dof_id_type>(global_node_idx),
663  cast_int<processor_id_type>(source_pid_idx));
664 
665  // Make sure the node we added has the ID we thought it would
666  if (added_node->id() != global_node_idx)
667  {
668  libMesh::err << "Error, node added with ID " << added_node->id()
669  << ", but we wanted ID " << global_node_idx << std::endl;
670  }
671 
672  // update the local->global index map, when we are done
673  // it will be 0-based.
674  nemhelper->node_num_map[local_node_idx] = global_node_idx;
675 
676  // we are not really going to use my_next_node again, but we can
677  // keep incrementing it to track how many nodes we have added
678  // to the mesh
679  my_next_node++;
680  }
681  }
682  }
683  } // end of node index communication loop
684 
685  // we had better have added all the nodes we need to!
686  libmesh_assert_equal_to ((my_next_node - my_node_offset), to_uint(nemhelper->num_nodes));
687 
688  // After all that, we should be done with all node-related arrays *except* the
689  // node_num_map, which we have transformed to use our new numbering...
690  // So let's clean up the arrays we are done with.
691  {
692  Utility::deallocate (nemhelper->node_mapi);
693  Utility::deallocate (nemhelper->node_mapb);
694  Utility::deallocate (nemhelper->node_mape);
695  Utility::deallocate (nemhelper->node_cmap_ids);
696  Utility::deallocate (nemhelper->node_cmap_node_cnts);
697  Utility::deallocate (nemhelper->node_cmap_node_ids);
698  Utility::deallocate (nemhelper->node_cmap_proc_ids);
702  Utility::deallocate (needed_node_idxs);
703  Utility::deallocate (node_ownership);
704  }
705 
706  Parallel::wait (needed_nodes_requests);
707  Parallel::wait (requested_nodes_requests);
708  requested_node_idxs.clear();
709 
710  // See what the node count is up to now.
711  if (_verbose)
712  {
713  // Report the number of nodes which have been added locally
714  libMesh::out << "[" << this->processor_id() << "] ";
715  libMesh::out << "mesh.n_nodes()=" << mesh.n_nodes() << std::endl;
716 
717  // Reports the number of nodes that have been added in total.
718  libMesh::out << "[" << this->processor_id() << "] ";
719  libMesh::out << "mesh.parallel_n_nodes()=" << mesh.parallel_n_nodes() << std::endl;
720  }
721 
722 
723 
724  // --------------------------------------------------------------------------------
725  // --------------------------------------------------------------------------------
726  // --------------------------------------------------------------------------------
727 
728 
729  // We can now read in the elements...Exodus stores them in blocks in which all
730  // elements have the same geometric type. This code is adapted directly from exodusII_io.C
731 
732  // Assertion: The sum of the border and internal elements on all processors
733  // should equal nemhelper->num_elems_global
734 #ifndef NDEBUG
735  {
736  int sum_internal_elems=0, sum_border_elems=0;
737  for (unsigned int j=3,c=0; c<this->n_processors(); j+=8,++c)
738  sum_internal_elems += all_loadbal_data[j];
739 
740  for (unsigned int j=4,c=0; c<this->n_processors(); j+=8,++c)
741  sum_border_elems += all_loadbal_data[j];
742 
743  if (_verbose)
744  {
745  libMesh::out << "[" << this->processor_id() << "] ";
746  libMesh::out << "sum_internal_elems=" << sum_internal_elems << std::endl;
747 
748  libMesh::out << "[" << this->processor_id() << "] ";
749  libMesh::out << "sum_border_elems=" << sum_border_elems << std::endl;
750  }
751 
752  libmesh_assert_equal_to (sum_internal_elems+sum_border_elems, nemhelper->num_elems_global);
753  }
754 #endif
755 
756  // We need to set the mesh dimension, but the following...
757  // mesh.set_mesh_dimension(static_cast<unsigned int>(nemhelper->num_dim));
758 
759  // ... is not sufficient since some codes report num_dim==3 for two dimensional
760  // meshes living in 3D, even though all the elements are of 2D type. Therefore,
761  // we instead use the dimension of the highest element found for the Mesh dimension,
762  // similar to what is done by the Exodus reader, except here it requires a
763  // parallel communication.
764  elems_of_dimension.resize(4, false); // will use 1-based
765 
766  // Compute my_elem_offset, the amount by which to offset the local elem numbering
767  // on my processor.
768  unsigned int my_next_elem = 0;
769  for (unsigned int pid=0; pid<this->processor_id(); ++pid)
770  my_next_elem += (all_loadbal_data[8*pid + 3]+ // num_internal_elems, proc pid
771  all_loadbal_data[8*pid + 4]); // num_border_elems, proc pid
772  const unsigned int my_elem_offset = my_next_elem;
773 
774  if (_verbose)
775  libMesh::out << "[" << this->processor_id() << "] "
776  << "my_elem_offset=" << my_elem_offset << std::endl;
777 
778 
779  // Fills in the:
780  // global_elem_blk_ids[] and
781  // global_elem_blk_cnts[] arrays.
782  nemhelper->get_eb_info_global();
783 
784  // // Fills in the vectors
785  // // elem_mapi[num_internal_elems]
786  // // elem_mapb[num_border_elems ]
787  // // These tell which of the (locally-numbered) elements are internal and which are border elements.
788  // // In our test example these arrays are sorted (but non-contiguous), which makes it possible to
789  // // binary search for each element ID... however I don't think we need to distinguish between the
790  // // two types, since either can have nodes the boundary!
791  // nemhelper->get_elem_map();
792 
793  // Fills in the vectors of vectors:
794  // elem_cmap_elem_ids[][]
795  // elem_cmap_side_ids[][]
796  // elem_cmap_proc_ids[][]
797  // These arrays are of size num_elem_cmaps * elem_cmap_elem_cnts[i], i = 0..num_elem_cmaps
798  nemhelper->get_elem_cmap();
799 
800  // Get information about the element blocks:
801  // (read in the array nemhelper->block_ids[])
802  nemhelper->read_block_info();
803 
804  // Reads the nemhelper->elem_num_map array, elem_num_map[i] is the global element number for
805  // local element number i.
806  nemhelper->read_elem_num_map();
807 
808  // Instantiate the ElementMaps interface. This is what translates LibMesh's
809  // element numbering scheme to Exodus's.
811 
812  // Read in the element connectivity for each block by
813  // looping over all the blocks.
814  for (unsigned int i=0; i<to_uint(nemhelper->num_elem_blk); i++)
815  {
816  // Read the information for block i: For nemhelper->block_ids[i], reads
817  // elem_type
818  // num_elem_this_blk
819  // num_nodes_per_elem
820  // num_attr
821  // connect <-- the nodal connectivity array for each element in the block.
822  nemhelper->read_elem_in_block(i);
823 
824  // Note that with parallel files it is possible we have no elements in
825  // this block!
826  if (!nemhelper->num_elem_this_blk) continue;
827 
828  // Set subdomain ID based on the block ID.
829  subdomain_id_type subdomain_id =
830  cast_int<subdomain_id_type>(nemhelper->block_ids[i]);
831 
832  // Create a type string (this uses the null-terminated string ctor).
833  const std::string type_str ( nemhelper->elem_type.data() );
834 
835  // Set any relevant node/edge maps for this element
836  const ExodusII_IO_Helper::Conversion conv = em.assign_conversion(type_str);
837 
838  if (_verbose)
839  libMesh::out << "Reading a block of " << type_str << " elements." << std::endl;
840 
841  // Loop over all the elements in this block
842  for (unsigned int j=0; j<to_uint(nemhelper->num_elem_this_blk); j++)
843  {
844  Elem * elem = Elem::build (conv.get_canonical_type()).release();
845  libmesh_assert (elem);
846 
847  // Assign subdomain and processor ID to the newly-created Elem.
848  // Assigning the processor ID beforehand ensures that the Elem is
849  // not added as an "unpartitioned" element. Note that the element
850  // numbering in Exodus is also 1-based.
851  elem->subdomain_id() = subdomain_id;
852  elem->processor_id() = this->processor_id();
853  elem->set_id() = my_next_elem++;
854 #ifdef LIBMESH_ENABLE_UNIQUE_ID
855  elem->set_unique_id() = elem->id();
856 #endif
857 
858  // Mark that we have seen an element of the current element's
859  // dimension.
860  elems_of_dimension[elem->dim()] = true;
861 
862  // Add the created Elem to the Mesh, catch the Elem
863  // pointer that the Mesh throws back.
864  elem = mesh.add_elem (elem);
865 
866  // We are expecting the element "thrown back" by libmesh to have the ID we specified for it...
867  // Check to see that really is the case. Note that my_next_elem was post-incremented, so
868  // subtract 1 when performing the check.
869  if (elem->id() != my_next_elem-1)
870  libmesh_error_msg("Unexpected ID " \
871  << elem->id() \
872  << " set by parallel mesh. (expecting " \
873  << my_next_elem-1 \
874  << ").");
875 
876  // Set all the nodes for this element
877  if (_verbose)
878  libMesh::out << "[" << this->processor_id() << "] "
879  << "Setting nodes for Elem " << elem->id() << std::endl;
880 
881  for (unsigned int k=0; k<to_uint(nemhelper->num_nodes_per_elem); k++)
882  {
883  const unsigned int
884  gi = (j*nemhelper->num_nodes_per_elem + // index into connectivity array
885  conv.get_node_map(k)),
886  local_node_idx = nemhelper->connect[gi]-1, // local node index
887  global_node_idx = nemhelper->node_num_map[local_node_idx]; // new global node index
888 
889  // Set node number
890  elem->set_node(k) = mesh.node_ptr(global_node_idx);
891  }
892  } // for (unsigned int j=0; j<nemhelper->num_elem_this_blk; j++)
893  } // end for (unsigned int i=0; i<nemhelper->num_elem_blk; i++)
894 
895  libmesh_assert_equal_to ((my_next_elem - my_elem_offset), to_uint(nemhelper->num_elem));
896 
897  if (_verbose)
898  {
899  // Print local elems_of_dimension information
900  for (std::size_t i=1; i<elems_of_dimension.size(); ++i)
901  libMesh::out << "[" << this->processor_id() << "] "
902  << "elems_of_dimension[" << i << "]=" << elems_of_dimension[i] << std::endl;
903  }
904 
905  // Get the max dimension seen on the current processor
906  unsigned char max_dim_seen = 0;
907  for (std::size_t i=1; i<elems_of_dimension.size(); ++i)
908  if (elems_of_dimension[i])
909  max_dim_seen = static_cast<unsigned char>(i);
910 
911  // Do a global max to determine the max dimension seen by all processors.
912  // It should match -- I don't think we even support calculations on meshes
913  // with elements of different dimension...
914  this->comm().max(max_dim_seen);
915 
916  if (_verbose)
917  {
918  // Print the max element dimension from all processors
919  libMesh::out << "[" << this->processor_id() << "] "
920  << "max_dim_seen=" << +max_dim_seen << std::endl;
921  }
922 
923  // Set the mesh dimension to the largest encountered for an element
924  mesh.set_mesh_dimension(max_dim_seen);
925 
926 #if LIBMESH_DIM < 3
927  if (mesh.mesh_dimension() > LIBMESH_DIM)
928  libmesh_error_msg("Cannot open dimension " \
929  << mesh.mesh_dimension() \
930  << " mesh file when configured without " \
931  << mesh.mesh_dimension() \
932  << "D support." );
933 #endif
934 
935 
936  // Global sideset information, they are distributed as well, not sure if they will require communication...?
937  nemhelper->get_ss_param_global();
938 
939  if (_verbose)
940  {
941  libMesh::out << "[" << this->processor_id() << "] "
942  << "Read global sideset parameter information." << std::endl;
943 
944  // These global values should be the same on all processors...
945  libMesh::out << "[" << this->processor_id() << "] "
946  << "Number of global sideset IDs: " << nemhelper->global_sideset_ids.size() << std::endl;
947  }
948 
949  // Read *local* sideset info the same way it is done in
950  // exodusII_io_helper. May be called any time after
951  // nem_helper->read_header(); This sets num_side_sets and resizes
952  // elem_list, side_list, and id_list to num_elem_all_sidesets. Note
953  // that there appears to be the same number of sidesets in each file
954  // but they all have different numbers of entries (some are empty).
955  // Note that the sum of "nemhelper->num_elem_all_sidesets" over all
956  // processors should equal the sum of the entries in the "num_global_side_counts" array
957  // filled up by nemhelper->get_ss_param_global()
958  nemhelper->read_sideset_info();
959 
960  if (_verbose)
961  {
962  libMesh::out << "[" << this->processor_id() << "] "
963  << "nemhelper->num_side_sets = " << nemhelper->num_side_sets << std::endl;
964 
965  libMesh::out << "[" << this->processor_id() << "] "
966  << "nemhelper->num_elem_all_sidesets = " << nemhelper->num_elem_all_sidesets << std::endl;
967 
968  if (nemhelper->num_side_sets > 0)
969  {
970  libMesh::out << "Sideset names are: ";
971  for (const auto & pr : nemhelper->id_to_ss_names)
972  libMesh::out << "(" << pr.first << "," << pr.second << ") ";
973  libMesh::out << std::endl;
974  }
975  }
976 
977 #ifdef DEBUG
978  {
979  // In DEBUG mode, check that the global number of sidesets reported
980  // in each nemesis file matches the sum of all local sideset counts
981  // from each processor. This requires a small communication, so only
982  // do it in DEBUG mode.
983  int sum_num_global_side_counts = std::accumulate(nemhelper->num_global_side_counts.begin(),
984  nemhelper->num_global_side_counts.end(),
985  0);
986 
987  // MPI sum up the local files contributions
988  int sum_num_elem_all_sidesets = nemhelper->num_elem_all_sidesets;
989  this->comm().sum(sum_num_elem_all_sidesets);
990 
991  if (sum_num_global_side_counts != sum_num_elem_all_sidesets)
992  libmesh_error_msg("Error! global side count reported by Nemesis does not " \
993  << "match the side count reported by the individual files!");
994  }
995 #endif
996 
997  // Note that exodus stores sidesets in separate vectors but we want to pack
998  // them all into a single vector. So when we call read_sideset(), we pass an offset
999  // into the single vector of all IDs
1000  for (int offset=0, i=0; i<nemhelper->num_side_sets; i++)
1001  {
1002  offset += (i > 0 ? nemhelper->num_sides_per_set[i-1] : 0); // Compute new offset
1003  nemhelper->read_sideset (i, offset);
1004  }
1005 
1006  // Now that we have the lists of elements, sides, and IDs, we are ready to set them
1007  // in the BoundaryInfo object of our Mesh object. This is slightly different in parallel...
1008  // For example, I think the IDs in each of the split Exodus files are numbered locally,
1009  // and we have to know the appropriate ID for this processor to be able to set the
1010  // entry in BoundaryInfo. This offset should be given by my_elem_offset determined in
1011  // this function...
1012 
1013  // Debugging:
1014  // Print entries of elem_list
1015  // libMesh::out << "[" << this->processor_id() << "] "
1016  // << "elem_list = ";
1017  // for (std::size_t e=0; e<nemhelper->elem_list.size(); e++)
1018  // {
1019  // libMesh::out << nemhelper->elem_list[e] << ", ";
1020  // }
1021  // libMesh::out << std::endl;
1022 
1023  // Print entries of side_list
1024  // libMesh::out << "[" << this->processor_id() << "] "
1025  // << "side_list = ";
1026  // for (std::size_t e=0; e<nemhelper->side_list.size(); e++)
1027  // {
1028  // libMesh::out << nemhelper->side_list[e] << ", ";
1029  // }
1030  // libMesh::out << std::endl;
1031 
1032 
1033  // Loop over the entries of the elem_list, get their pointers from the
1034  // Mesh data structure, and assign the appropriate side to the BoundaryInfo object.
1035  for (std::size_t e=0; e<nemhelper->elem_list.size(); e++)
1036  {
1037  // Calling mesh.elem_ptr() is an error if no element with that
1038  // id exists on this processor...
1039  //
1040  // Perhaps we should iterate over elements and look them up in
1041  // the elem list instead? Note that the IDs in elem_list are
1042  // not necessarily in order, so if we did instead loop over the
1043  // mesh, we would have to search the (unsorted) elem_list vector
1044  // for each entry! We'll settle for doing some error checking instead.
1045  Elem * elem = mesh.elem_ptr
1046  (my_elem_offset +
1047  (nemhelper->elem_list[e]-1)/*Exodus numbering is 1-based!*/);
1048 
1049  // The side numberings in libmesh and exodus are not 1:1, so we need to map
1050  // whatever side number is stored in Exodus into a libmesh side number using
1051  // a conv object...
1052  const ExodusII_IO_Helper::Conversion conv =
1053  em.assign_conversion(elem->type());
1054 
1055  // Finally, we are ready to add the element and its side to the BoundaryInfo object.
1056  // Call the version of add_side which takes a pointer, since we have already gone to
1057  // the trouble of getting said pointer...
1059  cast_int<unsigned short>(conv.get_side_map(nemhelper->side_list[e]-1)), // Exodus numbering is 1-based
1060  cast_int<boundary_id_type>(nemhelper->id_list[e]));
1061  }
1062 
1063  // Debugging: make sure there are as many boundary conditions in the
1064  // boundary ID object as expected. Note that, at this point, the
1065  // mesh still thinks it's serial, so n_boundary_conds() returns the
1066  // local number of boundary conditions (and is therefore cheap)
1067  // which should match nemhelper->elem_list.size().
1068  {
1069  std::size_t nbcs = mesh.get_boundary_info().n_boundary_conds();
1070  if (nbcs != nemhelper->elem_list.size())
1071  libmesh_error_msg("[" << this->processor_id() << "] " \
1072  << "BoundaryInfo contains " \
1073  << nbcs \
1074  << " boundary conditions, while the Exodus file had " \
1075  << nemhelper->elem_list.size());
1076  }
1077 
1078  // Read global nodeset parameters? We might be able to use this to verify
1079  // something about the local files, but I haven't figured out what yet...
1080  nemhelper->get_ns_param_global();
1081 
1082  // Read local nodeset info
1083  nemhelper->read_nodeset_info();
1084 
1085  if (_verbose)
1086  {
1087  libMesh::out << "[" << this->processor_id() << "] ";
1088  libMesh::out << "nemhelper->num_node_sets=" << nemhelper->num_node_sets << std::endl;
1089  if (nemhelper->num_node_sets > 0)
1090  {
1091  libMesh::out << "Nodeset names are: ";
1092  for (const auto & pr : nemhelper->id_to_ns_names)
1093  libMesh::out << "(" << pr.first << "," << pr.second << ") ";
1094  libMesh::out << std::endl;
1095  }
1096  }
1097 
1098  // // Debugging, what is currently in nemhelper->node_num_map anyway?
1099  // libMesh::out << "[" << this->processor_id() << "] "
1100  // << "nemhelper->node_num_map = ";
1101  //
1102  // for (std::size_t i=0; i<nemhelper->node_num_map.size(); ++i)
1103  // libMesh::out << nemhelper->node_num_map[i] << ", ";
1104  // libMesh::out << std::endl;
1105 
1106  // For each nodeset,
1107  for (int nodeset=0; nodeset<nemhelper->num_node_sets; nodeset++)
1108  {
1109  // Get the user-defined ID associated with the nodeset
1110  int nodeset_id = nemhelper->nodeset_ids[nodeset];
1111 
1112  if (_verbose)
1113  {
1114  libMesh::out << "[" << this->processor_id() << "] ";
1115  libMesh::out << "nemhelper->nodeset_ids[" << nodeset << "]=" << nodeset_id << std::endl;
1116  }
1117 
1118  // Read the nodeset from file, store them in a vector
1119  nemhelper->read_nodeset(nodeset);
1120 
1121  // Add nodes from the node_list to the BoundaryInfo object
1122  for (std::size_t node=0; node<nemhelper->node_list.size(); node++)
1123  {
1124  // Don't run past the end of our node map!
1125  if (to_uint(nemhelper->node_list[node]-1) >= nemhelper->node_num_map.size())
1126  libmesh_error_msg("Error, index is past the end of node_num_map array!");
1127 
1128  // We should be able to use the node_num_map data structure set up previously to determine
1129  // the proper global node index.
1130  unsigned global_node_id = nemhelper->node_num_map[ nemhelper->node_list[node]-1 /*Exodus is 1-based!*/ ];
1131 
1132  if (_verbose)
1133  {
1134  libMesh::out << "[" << this->processor_id() << "] "
1135  << "nodeset " << nodeset
1136  << ", local node number: " << nemhelper->node_list[node]-1
1137  << ", global node id: " << global_node_id
1138  << std::endl;
1139  }
1140 
1141  // Add the node to the BoundaryInfo object with the proper nodeset_id
1143  (cast_int<dof_id_type>(global_node_id),
1144  cast_int<boundary_id_type>(nodeset_id));
1145  }
1146  }
1147 
1148  // See what the elem count is up to now.
1149  if (_verbose)
1150  {
1151  // Report the number of elements which have been added locally
1152  libMesh::out << "[" << this->processor_id() << "] ";
1153  libMesh::out << "mesh.n_elem()=" << mesh.n_elem() << std::endl;
1154 
1155  // Reports the number of elements that have been added in total.
1156  libMesh::out << "[" << this->processor_id() << "] ";
1157  libMesh::out << "mesh.parallel_n_elem()=" << mesh.parallel_n_elem() << std::endl;
1158  }
1159 
1160  // For DistributedMesh, it seems that _is_serial is true by default. A hack to
1161  // make the Mesh think it's parallel might be to call:
1165 
1166  // And if that didn't work, then we're actually reading into a
1167  // ReplicatedMesh, so forget about gathering neighboring elements
1168  if (mesh.is_serial())
1169  return;
1170 
1171  // Gather neighboring elements so that the mesh has the proper "ghost" neighbor information.
1172  MeshCommunication().gather_neighboring_elements(cast_ref<DistributedMesh &>(mesh));
1173 }
1174 
1175 #else
1176 
1177 void Nemesis_IO::read (const std::string &)
1178 {
1179  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1180 }
1181 
1182 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1183 
1184 
1185 
1186 
1187 
1188 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1189 
1190 void Nemesis_IO::write (const std::string & base_filename)
1191 {
1192  // Get a constant reference to the mesh for writing
1194 
1195  // Create the filename for this processor given the base_filename passed in.
1196  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
1197 
1198  // If the user has set the append flag here, it doesn't really make
1199  // sense: the intent of this function is to write a Mesh with no
1200  // data, while "appending" is really intended to add data to an
1201  // existing file. If we're verbose, print a message to this effect.
1202  if (_append && _verbose)
1203  libmesh_warning("Warning: Appending in Nemesis_IO::write() does not make sense.\n"
1204  "Creating a new file instead!");
1205 
1206  nemhelper->create(nemesis_filename);
1207 
1208  // Initialize data structures and write some global Nemesis-specific data, such as
1209  // communication maps, to file.
1210  nemhelper->initialize(nemesis_filename,mesh);
1211 
1212  // Call the Nemesis-specialized version of write_nodal_coordinates() to write
1213  // the nodal coordinates.
1214  nemhelper->write_nodal_coordinates(mesh);
1215 
1216  // Call the Nemesis-specialized version of write_elements() to write
1217  // the elements. Note: Must write a zero if a given global block ID has no
1218  // elements...
1219  nemhelper->write_elements(mesh);
1220 
1221  // Call our specialized function to write the nodesets
1222  nemhelper->write_nodesets(mesh);
1223 
1224  // Call our specialized write_sidesets() function to write the sidesets to file
1225  nemhelper->write_sidesets(mesh);
1226 
1227  // Not sure if this is really necessary, but go ahead and flush the file
1228  // once we have written all this stuff.
1229  nemhelper->ex_err = exII::ex_update(nemhelper->ex_id);
1230 
1231  if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1232  libmesh_warning("Warning: Mesh contains edge boundary IDs, but these "
1233  "are not supported by the Nemesis format.");
1234 }
1235 
1236 #else
1237 
1238 void Nemesis_IO::write (const std::string & )
1239 {
1240  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1241 }
1242 
1243 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1244 
1245 
1246 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1247 
1248 void Nemesis_IO::write_timestep (const std::string & fname,
1249  const EquationSystems & es,
1250  const int timestep,
1251  const Real time)
1252 {
1253  _timestep=timestep;
1254  write_equation_systems(fname,es);
1255 
1256  nemhelper->write_timestep(timestep, time);
1257 }
1258 
1259 #else
1260 
1261 void Nemesis_IO::write_timestep (const std::string &,
1262  const EquationSystems &,
1263  const int,
1264  const Real)
1265 {
1266  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1267 }
1268 
1269 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1270 
1271 
1272 
1273 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1274 
1275 void Nemesis_IO::prepare_to_write_nodal_data (const std::string & fname,
1276  const std::vector<std::string> & names)
1277 {
1279 
1280  std::string nemesis_filename = nemhelper->construct_nemesis_filename(fname);
1281 
1282  if (!nemhelper->opened_for_writing)
1283  {
1284  // If we're appending, open() the file with read_only=false,
1285  // otherwise create() it and write the contents of the mesh to
1286  // it.
1287  if (_append)
1288  {
1289  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/false);
1290  // After opening the file, read the header so that certain
1291  // fields, such as the number of nodes and the number of
1292  // elements, are correctly initialized for the subsequent
1293  // call to write the nodal solution.
1294  nemhelper->read_header();
1295  }
1296  else
1297  {
1298  nemhelper->create(nemesis_filename);
1299  nemhelper->initialize(nemesis_filename,mesh);
1300  nemhelper->write_nodal_coordinates(mesh);
1301  nemhelper->write_elements(mesh);
1302  nemhelper->write_nodesets(mesh);
1303  nemhelper->write_sidesets(mesh);
1304 
1305  if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1306  libmesh_warning("Warning: Mesh contains edge boundary IDs, but these "
1307  "are not supported by the ExodusII format.");
1308  }
1309  }
1310 
1311  // Even if we were already open for writing, we might not have
1312  // initialized the nodal variable names yet. Even if we did, it
1313  // should not hurt to call this twice because the routine sets a
1314  // flag the first time it is called.
1315 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1316  std::vector<std::string> complex_names = nemhelper->get_complex_names(names);
1317  nemhelper->initialize_nodal_variables(complex_names);
1318 #else
1319  nemhelper->initialize_nodal_variables(names);
1320 #endif
1321 }
1322 
1323 #else
1324 
1325 void Nemesis_IO::prepare_to_write_nodal_data (const std::string &,
1326  const std::vector<std::string> &)
1327 {
1328  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1329 }
1330 
1331 #endif
1332 
1333 
1334 
1335 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1336 
1337 void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1338  const NumericVector<Number> & parallel_soln,
1339  const std::vector<std::string> & names)
1340 {
1341  LOG_SCOPE("write_nodal_data(parallel)", "Nemesis_IO");
1342 
1343  // Only prepare and write nodal variables that are also in
1344  // _output_variables, unless _output_variables is empty. This is the
1345  // same logic that is in ExodusII_IO::write_nodal_data().
1346  std::vector<std::string> output_names;
1347 
1348  if (_allow_empty_variables || !_output_variables.empty())
1349  output_names = _output_variables;
1350  else
1351  output_names = names;
1352 
1353  this->prepare_to_write_nodal_data(base_filename, output_names);
1354 
1355  // Call the new version of write_nodal_solution() that takes a
1356  // NumericVector directly without localizing.
1357  nemhelper->write_nodal_solution(parallel_soln, names, _timestep, output_names);
1358 }
1359 
1360 #else
1361 
1362 void Nemesis_IO::write_nodal_data (const std::string &,
1363  const NumericVector<Number> &,
1364  const std::vector<std::string> &)
1365 {
1366  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1367 }
1368 
1369 #endif
1370 
1371 
1372 
1373 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1374 
1376 {
1377  if (!nemhelper->opened_for_writing)
1378  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting elemental variables.");
1379 
1380  // To be (possibly) filled with a filtered list of variable names to output.
1381  std::vector<std::string> names;
1382 
1383  // If _output_variables is populated, only output the monomials which are
1384  // also in the _output_variables vector.
1385  if (_output_variables.size() > 0)
1386  {
1387  std::vector<std::string> monomials;
1388  const FEType type(CONSTANT, MONOMIAL);
1389 
1390  // Create a list of monomial variable names
1391  es.build_variable_names(monomials, &type);
1392 
1393  // Filter that list against the _output_variables list. Note: if names is still empty after
1394  // all this filtering, all the monomial variables will be gathered
1395  for (const auto & var : monomials)
1396  if (std::find(_output_variables.begin(), _output_variables.end(), var) != _output_variables.end())
1397  names.push_back(var);
1398  }
1399 
1400  // Build the parallel elemental solution vector. The 'names' vector
1401  // will also be updated with the variable's names that were actually
1402  // written to the vector.
1403  std::unique_ptr<NumericVector<Number>> parallel_soln =
1405 
1406  // build_parallel_elemental_solution_vector() can return a nullptr,
1407  // in which case there are no constant monomial variables to write,
1408  // and we can just return.
1409  if (!parallel_soln)
1410  {
1411  if (_verbose)
1412  libMesh::out << "No CONSTANT, MONOMIAL data to be written." << std::endl;
1413  return;
1414  }
1415 
1416  // Store the list of subdomains on which each variable *that we are
1417  // going to plot* is active. Note: if any of these sets is _empty_,
1418  // the variable in question is active on _all_ subdomains.
1419  std::vector<std::set<subdomain_id_type>> vars_active_subdomains;
1420  es.get_vars_active_subdomains(names, vars_active_subdomains);
1421 
1422  // Call the Nemesis version of initialize_element_variables().
1423  //
1424  // The Exodus helper version of this function writes an incorrect
1425  // truth table in parallel that somehow does not account for the
1426  // case where a subdomain does not appear on one or more of the
1427  // processors. So, we override that function's behavior in the
1428  // Nemesis helper.
1429  nemhelper->initialize_element_variables(names, vars_active_subdomains);
1430 
1431  // Call (non-virtual) function to write the elemental data in
1432  // parallel. This function is named similarly to the corresponding
1433  // function in the Exodus helper, but it has a different calling
1434  // sequence and is not virtual or an override.
1436  nemhelper->write_element_values(mesh,
1437  *parallel_soln,
1438  names,
1439  _timestep,
1440  vars_active_subdomains);
1441 }
1442 
1443 #else
1444 
1446 {
1447  libmesh_not_implemented();
1448 }
1449 
1450 #endif
1451 
1452 
1453 
1454 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1455 
1456 void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1457  const std::vector<Number> & soln,
1458  const std::vector<std::string> & names)
1459 {
1460  LOG_SCOPE("write_nodal_data(serialized)", "Nemesis_IO");
1461 
1462  this->prepare_to_write_nodal_data(base_filename, names);
1463 
1464  nemhelper->write_nodal_solution(soln, names, _timestep);
1465 }
1466 
1467 #else
1468 
1469 void Nemesis_IO::write_nodal_data (const std::string &,
1470  const std::vector<Number> &,
1471  const std::vector<std::string> &)
1472 {
1473  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1474 }
1475 
1476 #endif
1477 
1478 
1479 
1480 
1481 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1482 
1483 void Nemesis_IO::write_global_data (const std::vector<Number> & soln,
1484  const std::vector<std::string> & names)
1485 {
1486  if (!nemhelper->opened_for_writing)
1487  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting global variables.");
1488 
1489 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1490 
1491  std::vector<std::string> complex_names = nemhelper->get_complex_names(names);
1492 
1493  nemhelper->initialize_global_variables(complex_names);
1494 
1495  unsigned int num_values = soln.size();
1496  unsigned int num_vars = names.size();
1497  unsigned int num_elems = num_values / num_vars;
1498 
1499  // This will contain the real and imaginary parts and the magnitude
1500  // of the values in soln
1501  std::vector<Real> complex_soln(3*num_values);
1502 
1503  for (unsigned i=0; i<num_vars; ++i)
1504  {
1505  for (unsigned int j=0; j<num_elems; ++j)
1506  {
1507  Number value = soln[i*num_vars + j];
1508  complex_soln[3*i*num_elems + j] = value.real();
1509  }
1510  for (unsigned int j=0; j<num_elems; ++j)
1511  {
1512  Number value = soln[i*num_vars + j];
1513  complex_soln[3*i*num_elems + num_elems +j] = value.imag();
1514  }
1515  for (unsigned int j=0; j<num_elems; ++j)
1516  {
1517  Number value = soln[i*num_vars + j];
1518  complex_soln[3*i*num_elems + 2*num_elems + j] = std::abs(value);
1519  }
1520  }
1521 
1522  nemhelper->write_global_values(complex_soln, _timestep);
1523 
1524 #else
1525 
1526  // Call the Exodus writer implementation
1527  nemhelper->initialize_global_variables( names );
1528  nemhelper->write_global_values( soln, _timestep);
1529 
1530 #endif
1531 
1532 }
1533 
1534 #else
1535 
1536 void Nemesis_IO::write_global_data (const std::vector<Number> &,
1537  const std::vector<std::string> &)
1538 {
1539  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1540 }
1541 
1542 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1543 
1544 
1545 
1546 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1547 
1548 void Nemesis_IO::write_information_records (const std::vector<std::string> & records)
1549 {
1550  if (!nemhelper->opened_for_writing)
1551  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting information records.");
1552 
1553  // Call the Exodus writer implementation
1554  nemhelper->write_information_records( records );
1555 }
1556 
1557 
1558 #else
1559 
1560 void Nemesis_IO::write_information_records ( const std::vector<std::string> & )
1561 {
1562  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1563 }
1564 
1565 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1566 
1567 } // namespace libMesh
Manages the family, order, etc. parameters for a given FE.
Definition: fe_type.h:179
const MT & mesh() const
Definition: mesh_output.h:234
std::size_t n_boundary_conds() const
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
double abs(double a)
void wait(std::vector< Request > &r)
Definition: request.C:213
unique_id_type & set_unique_id()
Definition: dof_object.h:685
Manages multiples systems of equations.
void make_node_unique_ids_parallel_consistent(MeshBase &)
void build_variable_names(std::vector< std::string > &var_names, const FEType *type=nullptr, const std::set< std::string > *system_names=nullptr) const
virtual Node *& set_node(const unsigned int i)
Definition: elem.h:2024
A geometric point in (x,y,z) space associated with a DOF.
Definition: node.h:52
virtual void read(const std::string &base_filename) override
Definition: nemesis_io.C:148
const unsigned int any_source
Definition: communicator.h:70
virtual void write_equation_systems(const std::string &, const EquationSystems &, const std::set< std::string > *system_names=nullptr)
Definition: mesh_output.C:31
std::size_t n_edge_conds() const
std::unique_ptr< NumericVector< Number > > build_parallel_elemental_solution_vector(std::vector< std::string > &names) const
Handles reading and writing of Exodus binary files.
Definition: exodusII_io.h:52
std::vector< bool > elems_of_dimension
Definition: mesh_input.h:97
void deallocate(std::vector< T > &vec)
Definition: utility.h:247
void allgather(const T &send, std::vector< T, A > &recv) const
if(!eq) SETERRQ2(((PetscObject) dm) -> comm, PETSC_ERR_ARG_WRONG, "DM of type %s, not of type %s",((PetscObject) dm) ->type, DMLIBMESH)
The base class for all geometric element types.
Definition: elem.h:100
MeshBase & mesh
uint8_t processor_id_type
Definition: id_types.h:99
ExodusII_IO_Helper::Conversion assign_conversion(std::string type_str)
void write_information_records(const std::vector< std::string > &)
Definition: nemesis_io.C:1548
void alltoall(std::vector< T, A > &r) const
const Parallel::Communicator & comm() const
MessageTag get_unique_tag(int tagvalue) const
Definition: communicator.C:201
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
Base class for Mesh.
Definition: mesh_base.h:77
dof_id_type & set_id()
Definition: dof_object.h:664
void get_vars_active_subdomains(const std::vector< std::string > &names, std::vector< std::set< subdomain_id_type >> &vars_active_subdomains) const
virtual ~Nemesis_IO()
Definition: nemesis_io.C:112
processor_id_type n_processors() const
virtual bool is_serial() const
Definition: mesh_base.h:154
void add_node(const Node *node, const boundary_id_type id)
void write_timestep(const std::string &fname, const EquationSystems &es, const int timestep, const Real time)
Definition: nemesis_io.C:1248
void prepare_to_write_nodal_data(const std::string &fname, const std::vector< std::string > &names)
Definition: nemesis_io.C:1275
dof_id_type id() const
Definition: dof_object.h:655
virtual Elem * add_elem(Elem *e)=0
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:245
void write_element_data(const EquationSystems &es)
Definition: nemesis_io.C:1375
MPI_Status status
Definition: status.h:41
OStreamProxy err(std::cerr)
void set_mesh_dimension(unsigned char d)
Definition: mesh_base.h:213
virtual dof_id_type parallel_n_nodes() const =0
An object whose state is distributed along a set of processors.
virtual void read(const std::string &name) override
Definition: exodusII_io.C:140
virtual dof_id_type parallel_n_elem() const =0
void gather_neighboring_elements(DistributedMesh &) const
virtual void write_nodal_data(const std::string &fname, const std::vector< Number > &soln, const std::vector< std::string > &names) override
Definition: nemesis_io.C:1456
virtual const Elem * elem_ptr(const dof_id_type i) const =0
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
subdomain_id_type subdomain_id() const
Definition: elem.h:2034
virtual unsigned short dim() const =0
void write_global_data(const std::vector< Number > &, const std::vector< std::string > &)
Definition: nemesis_io.C:1483
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
static const bool value
Definition: xdr_io.C:109
void set_n_partitions(unsigned int n_parts)
Definition: mesh_input.h:91
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
unsigned int mesh_dimension() const
Definition: mesh_base.C:126
virtual void write(const std::string &base_filename) override
Definition: nemesis_io.C:1190
void append(bool val)
Definition: nemesis_io.C:131
EXODUS_EXPORT int ex_update(int exoid)
std::unique_ptr< Nemesis_IO_Helper > nemhelper
Definition: nemesis_io.h:149
virtual void delete_remote_elements()
Definition: mesh_base.h:196
void set_output_variables(const std::vector< std::string > &output_variables, bool allow_empty=true)
Definition: nemesis_io.C:138
virtual void update_post_partitioning()
Definition: mesh_base.h:763
virtual dof_id_type n_elem() const =0
virtual const Node * node_ptr(const dof_id_type i) const =0
processor_id_type processor_id() const
Nemesis_IO(MeshBase &mesh, bool single_precision=false)
Definition: nemesis_io.C:88
OStreamProxy out(std::cout)
processor_id_type processor_id() const
Definition: dof_object.h:717
virtual ElemType type() const =0
long double min(long double a, double b)
A geometric point in (x,y,z) space.
Definition: point.h:38
void verbose(bool set_verbosity)
Definition: nemesis_io.C:118
std::vector< std::string > _output_variables
Definition: nemesis_io.h:181
virtual dof_id_type n_nodes() const =0
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)