nemesis_io_helper.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2018 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 // C++ headers
20 #include <iomanip>
21 #include <set>
22 #include <sstream>
23 
24 // Libmesh headers
26 #include "libmesh/node.h"
27 #include "libmesh/elem.h"
28 #include "libmesh/boundary_info.h"
29 #include "libmesh/parallel.h"
30 #include "libmesh/mesh_base.h"
31 #include "libmesh/numeric_vector.h"
32 #include "libmesh/int_range.h"
33 
34 #if defined(LIBMESH_HAVE_NEMESIS_API) && defined(LIBMESH_HAVE_EXODUS_API)
35 
36 namespace libMesh
37 {
38 
39 
40 // Initialize the various integer members to zero. We can check
41 // these later to see if they've been properly initialized...
42 // The parent ExodusII_IO_Helper is created with the run_only_on_proc0
43 // flag set to false, so that we can make use of its functionality
44 // on multiple processors.
46  bool verbose_in, bool single_precision) :
47  ExodusII_IO_Helper(parent, verbose_in, /*run_only_on_proc0=*/false, /*single_precision=*/single_precision),
48  nemesis_err_flag(0),
49  num_nodes_global(0),
50  num_elems_global(0),
51  num_elem_blks_global(0),
52  num_node_sets_global(0),
53  num_side_sets_global(0),
54  num_proc(0),
55  num_proc_in_file(0),
56  ftype('\0'),
57  num_internal_nodes(0),
58  num_border_nodes(0),
59  num_external_nodes(0),
60  num_internal_elems(0),
61  num_border_elems(0),
62  num_node_cmaps(0),
63  num_elem_cmaps(0)
64 {
65  // Warn about using untested code!
66  libmesh_experimental();
67 }
68 
69 
71 {
72  // Our destructor is called from Nemesis_IO. We close the Exodus file here since we have
73  // responsibility for managing the file's lifetime. Only call ex_update() if the file was
74  // opened for writing!
75  if (this->opened_for_writing)
76  {
77  this->ex_err = exII::ex_update(this->ex_id);
78  EX_EXCEPTIONLESS_CHECK_ERR(ex_err, "Error flushing buffers to file.");
79  }
80  this->close();
81 }
82 
83 
84 
86 {
88  Nemesis::ne_get_init_global(ex_id,
94  EX_CHECK_ERR(nemesis_err_flag, "Error reading initial global data!");
95 
96  if (verbose)
97  {
98  libMesh::out << "[" << this->processor_id() << "] " << "num_nodes_global=" << num_nodes_global << std::endl;
99  libMesh::out << "[" << this->processor_id() << "] " << "num_elems_global=" << num_elems_global << std::endl;
100  libMesh::out << "[" << this->processor_id() << "] " << "num_elem_blks_global=" << num_elem_blks_global << std::endl;
101  libMesh::out << "[" << this->processor_id() << "] " << "num_node_sets_global=" << num_node_sets_global << std::endl;
102  libMesh::out << "[" << this->processor_id() << "] " << "num_side_sets_global=" << num_side_sets_global << std::endl;
103  }
104 }
105 
106 
107 
109 {
110  if (num_side_sets_global > 0)
111  {
114 
115  // df = "distribution factor", not really sure what that is. I don't yet have a file
116  // which has distribution factors so I guess we'll worry about it later...
118 
120  Nemesis::ne_get_ss_param_global(ex_id,
121  global_sideset_ids.data(),
122  num_global_side_counts.data(),
124  EX_CHECK_ERR(nemesis_err_flag, "Error reading global sideset parameters!");
125 
126  if (verbose)
127  {
128  libMesh::out << "[" << this->processor_id() << "] " << "Global Sideset IDs, Side Counts, and DF counts:" << std::endl;
129  for (std::size_t bn=0; bn<global_sideset_ids.size(); ++bn)
130  {
131  libMesh::out << " [" << this->processor_id() << "] "
132  << "global_sideset_ids["<<bn<<"]=" << global_sideset_ids[bn]
133  << ", num_global_side_counts["<<bn<<"]=" << num_global_side_counts[bn]
134  << ", num_global_side_df_counts["<<bn<<"]=" << num_global_side_df_counts[bn]
135  << std::endl;
136  }
137  }
138  }
139 }
140 
141 
142 
143 
145 {
146  if (num_node_sets_global > 0)
147  {
151 
153  Nemesis::ne_get_ns_param_global(ex_id,
154  global_nodeset_ids.data(),
155  num_global_node_counts.data(),
157  EX_CHECK_ERR(nemesis_err_flag, "Error reading global nodeset parameters!");
158 
159  if (verbose)
160  {
161  libMesh::out << "[" << this->processor_id() << "] " << "Global Nodeset IDs, Node Counts, and DF counts:" << std::endl;
162  for (std::size_t bn=0; bn<global_nodeset_ids.size(); ++bn)
163  {
164  libMesh::out << " [" << this->processor_id() << "] "
165  << "global_nodeset_ids["<<bn<<"]=" << global_nodeset_ids[bn]
166  << ", num_global_node_counts["<<bn<<"]=" << num_global_node_counts[bn]
167  << ", num_global_node_df_counts["<<bn<<"]=" << num_global_node_df_counts[bn]
168  << std::endl;
169  }
170  }
171  }
172 }
173 
174 
175 
177 {
180 
181  if (num_elem_blks_global > 0)
182  {
184  Nemesis::ne_get_eb_info_global(ex_id,
185  global_elem_blk_ids.data(),
186  global_elem_blk_cnts.data());
187  EX_CHECK_ERR(nemesis_err_flag, "Error reading global element block info!");
188  }
189 
190  if (verbose)
191  {
192  libMesh::out << "[" << this->processor_id() << "] " << "Global Element Block IDs and Counts:" << std::endl;
193  for (std::size_t bn=0; bn<global_elem_blk_ids.size(); ++bn)
194  {
195  libMesh::out << " [" << this->processor_id() << "] "
196  << "global_elem_blk_ids["<<bn<<"]=" << global_elem_blk_ids[bn]
197  << ", global_elem_blk_cnts["<<bn<<"]=" << global_elem_blk_cnts[bn]
198  << std::endl;
199  }
200  }
201 }
202 
203 
204 
206 {
208  Nemesis::ne_get_init_info(ex_id,
209  &num_proc,
211  &ftype);
212  EX_CHECK_ERR(nemesis_err_flag, "Error reading initial info!");
213 
214  if (verbose)
215  {
216  libMesh::out << "[" << this->processor_id() << "] " << "num_proc=" << num_proc << std::endl;
217  libMesh::out << "[" << this->processor_id() << "] " << "num_proc_in_file=" << num_proc_in_file << std::endl;
218  libMesh::out << "[" << this->processor_id() << "] " << "ftype=" << ftype << std::endl;
219  }
220 }
221 
222 
223 
225 {
227  Nemesis::ne_get_loadbal_param(ex_id,
235  this->processor_id() // The ID of the processor for which info is to be read
236  );
237  EX_CHECK_ERR(nemesis_err_flag, "Error reading load balance parameters!");
238 
239 
240  if (verbose)
241  {
242  libMesh::out << "[" << this->processor_id() << "] " << "num_internal_nodes=" << num_internal_nodes << std::endl;
243  libMesh::out << "[" << this->processor_id() << "] " << "num_border_nodes=" << num_border_nodes << std::endl;
244  libMesh::out << "[" << this->processor_id() << "] " << "num_external_nodes=" << num_external_nodes << std::endl;
245  libMesh::out << "[" << this->processor_id() << "] " << "num_internal_elems=" << num_internal_elems << std::endl;
246  libMesh::out << "[" << this->processor_id() << "] " << "num_border_elems=" << num_border_elems << std::endl;
247  libMesh::out << "[" << this->processor_id() << "] " << "num_node_cmaps=" << num_node_cmaps << std::endl;
248  libMesh::out << "[" << this->processor_id() << "] " << "num_elem_cmaps=" << num_elem_cmaps << std::endl;
249  }
250 }
251 
252 
253 
255 {
257  elem_mapb.resize(num_border_elems);
258 
260  Nemesis::ne_get_elem_map(ex_id,
261  elem_mapi.empty() ? nullptr : elem_mapi.data(),
262  elem_mapb.empty() ? nullptr : elem_mapb.data(),
263  this->processor_id()
264  );
265  EX_CHECK_ERR(nemesis_err_flag, "Error reading element maps!");
266 
267 
268  if (verbose)
269  {
270  libMesh::out << "[" << this->processor_id() << "] elem_mapi[i] = ";
271  for (auto i : IntRange<int>(0, num_internal_elems-1))
272  libMesh::out << elem_mapi[i] << ", ";
273  libMesh::out << "... " << elem_mapi.back() << std::endl;
274 
275  libMesh::out << "[" << this->processor_id() << "] elem_mapb[i] = ";
276  for (auto i : IntRange<int>(0, std::min(10, num_border_elems-1)))
277  libMesh::out << elem_mapb[i] << ", ";
278  libMesh::out << "... " << elem_mapb.back() << std::endl;
279  }
280 }
281 
282 
283 
284 
286 {
288  node_mapb.resize(num_border_nodes);
290 
292  Nemesis::ne_get_node_map(ex_id,
293  node_mapi.empty() ? nullptr : node_mapi.data(),
294  node_mapb.empty() ? nullptr : node_mapb.data(),
295  node_mape.empty() ? nullptr : node_mape.data(),
296  this->processor_id()
297  );
298  EX_CHECK_ERR(nemesis_err_flag, "Error reading node maps!");
299 
300  if (verbose)
301  {
302  // Remark: The Exodus/Nemesis node numbering is always (?) 1-based! This means the first interior node id will
303  // always be == 1.
304  libMesh::out << "[" << this->processor_id() << "] " << "first interior node id=" << node_mapi[0] << std::endl;
305  libMesh::out << "[" << this->processor_id() << "] " << "last interior node id=" << node_mapi.back() << std::endl;
306 
307  libMesh::out << "[" << this->processor_id() << "] " << "first boundary node id=" << node_mapb[0] << std::endl;
308  libMesh::out << "[" << this->processor_id() << "] " << "last boundary node id=" << node_mapb.back() << std::endl;
309 
310  // The number of external nodes is sometimes zero, don't try to access
311  // node_mape.back() in this case!
312  if (num_external_nodes > 0)
313  {
314  libMesh::out << "[" << this->processor_id() << "] " << "first external node id=" << node_mape[0] << std::endl;
315  libMesh::out << "[" << this->processor_id() << "] " << "last external node id=" << node_mape.back() << std::endl;
316  }
317  }
318 }
319 
320 
321 
323 {
328 
330  Nemesis::ne_get_cmap_params(ex_id,
331  node_cmap_ids.empty() ? nullptr : node_cmap_ids.data(),
332  node_cmap_node_cnts.empty() ? nullptr : node_cmap_node_cnts.data(),
333  elem_cmap_ids.empty() ? nullptr : elem_cmap_ids.data(),
334  elem_cmap_elem_cnts.empty() ? nullptr : elem_cmap_elem_cnts.data(),
335  this->processor_id());
336  EX_CHECK_ERR(nemesis_err_flag, "Error reading cmap parameters!");
337 
338 
339  if (verbose)
340  {
341  libMesh::out << "[" << this->processor_id() << "] ";
342  for (std::size_t i=0; i<node_cmap_ids.size(); ++i)
343  libMesh::out << "node_cmap_ids["<<i<<"]=" << node_cmap_ids[i] << " ";
344  libMesh::out << std::endl;
345 
346  libMesh::out << "[" << this->processor_id() << "] ";
347  for (std::size_t i=0; i<node_cmap_node_cnts.size(); ++i)
348  libMesh::out << "node_cmap_node_cnts["<<i<<"]=" << node_cmap_node_cnts[i] << " ";
349  libMesh::out << std::endl;
350 
351  libMesh::out << "[" << this->processor_id() << "] ";
352  for (std::size_t i=0; i<elem_cmap_ids.size(); ++i)
353  libMesh::out << "elem_cmap_ids["<<i<<"]=" << elem_cmap_ids[i] << " ";
354  libMesh::out << std::endl;
355 
356  libMesh::out << "[" << this->processor_id() << "] ";
357  for (std::size_t i=0; i<elem_cmap_elem_cnts.size(); ++i)
358  libMesh::out << "elem_cmap_elem_cnts["<<i<<"]=" << elem_cmap_elem_cnts[i] << " ";
359  libMesh::out << std::endl;
360  }
361 }
362 
363 
364 
366 {
369 
370  for (std::size_t i=0; i<node_cmap_node_ids.size(); ++i)
371  {
374 
375  // Don't call ne_get_node_cmap() if there is nothing there to
376  // get, Nemesis throws an error in this case.
377  if (node_cmap_node_cnts[i] > 0)
378  {
380  Nemesis::ne_get_node_cmap(ex_id,
381  node_cmap_ids[i],
384  this->processor_id());
385  EX_CHECK_ERR(nemesis_err_flag, "Error reading node cmap node and processor ids!");
386  }
387 
388  if (verbose)
389  {
390  libMesh::out << "[" << this->processor_id() << "] node_cmap_node_ids["<<i<<"]=";
391  for (std::size_t j=0; j<node_cmap_node_ids[i].size(); ++j)
392  libMesh::out << node_cmap_node_ids[i][j] << " ";
393  libMesh::out << std::endl;
394 
395  // This is basically a vector, all entries of which are = node_cmap_ids[i]
396  // Not sure if it's always guaranteed to be that or what...
397  libMesh::out << "[" << this->processor_id() << "] node_cmap_proc_ids["<<i<<"]=";
398  for (std::size_t j=0; j<node_cmap_proc_ids[i].size(); ++j)
399  libMesh::out << node_cmap_proc_ids[i][j] << " ";
400  libMesh::out << std::endl;
401  }
402  }
403 }
404 
405 
406 
408 {
412 
413  for (std::size_t i=0; i<elem_cmap_elem_ids.size(); ++i)
414  {
418 
419  if (elem_cmap_elem_cnts[i] > 0)
420  {
422  Nemesis::ne_get_elem_cmap(ex_id,
423  elem_cmap_ids[i],
427  this->processor_id());
428  EX_CHECK_ERR(nemesis_err_flag, "Error reading elem cmap elem, side, and processor ids!");
429  }
430 
431  if (verbose)
432  {
433  libMesh::out << "[" << this->processor_id() << "] elem_cmap_elem_ids["<<i<<"]=";
434  for (std::size_t j=0; j<elem_cmap_elem_ids[i].size(); ++j)
435  libMesh::out << elem_cmap_elem_ids[i][j] << " ";
436  libMesh::out << std::endl;
437 
438  // These must be the (local) side IDs (in the ExodusII face numbering scheme)
439  // of the sides shared across processors.
440  libMesh::out << "[" << this->processor_id() << "] elem_cmap_side_ids["<<i<<"]=";
441  for (std::size_t j=0; j<elem_cmap_side_ids[i].size(); ++j)
442  libMesh::out << elem_cmap_side_ids[i][j] << " ";
443  libMesh::out << std::endl;
444 
445  // This is basically a vector, all entries of which are = elem_cmap_ids[i]
446  // Not sure if it's always guaranteed to be that or what...
447  libMesh::out << "[" << this->processor_id() << "] elem_cmap_proc_ids["<<i<<"]=";
448  for (std::size_t j=0; j<elem_cmap_proc_ids[i].size(); ++j)
449  libMesh::out << elem_cmap_proc_ids[i][j] << " ";
450  libMesh::out << std::endl;
451  }
452  }
453 }
454 
455 
456 
457 
458 void Nemesis_IO_Helper::put_init_info(unsigned num_proc_in,
459  unsigned num_proc_in_file_in,
460  const char * ftype_in)
461 {
463  Nemesis::ne_put_init_info(ex_id,
464  num_proc_in,
465  num_proc_in_file_in,
466  const_cast<char *>(ftype_in));
467 
468  EX_CHECK_ERR(nemesis_err_flag, "Error writing initial information!");
469 }
470 
471 
472 
473 
475  dof_id_type num_elems_global_in,
476  unsigned num_elem_blks_global_in,
477  unsigned num_node_sets_global_in,
478  unsigned num_side_sets_global_in)
479 {
481  Nemesis::ne_put_init_global(ex_id,
482  num_nodes_global_in,
483  num_elems_global_in,
484  num_elem_blks_global_in,
485  num_node_sets_global_in,
486  num_side_sets_global_in);
487 
488  EX_CHECK_ERR(nemesis_err_flag, "Error writing initial global data!");
489 }
490 
491 
492 
493 void Nemesis_IO_Helper::put_eb_info_global(std::vector<int> & global_elem_blk_ids_in,
494  std::vector<int> & global_elem_blk_cnts_in)
495 {
497  Nemesis::ne_put_eb_info_global(ex_id,
498  global_elem_blk_ids_in.data(),
499  global_elem_blk_cnts_in.data());
500 
501  EX_CHECK_ERR(nemesis_err_flag, "Error writing global element block information!");
502 }
503 
504 
505 
506 
507 void Nemesis_IO_Helper::put_ns_param_global(std::vector<int> & global_nodeset_ids_in,
508  std::vector<int> & num_global_node_counts_in,
509  std::vector<int> & num_global_node_df_counts_in)
510 {
511  // Only add nodesets if there are some
512  if (global_nodeset_ids.size())
513  {
515  Nemesis::ne_put_ns_param_global(ex_id,
516  global_nodeset_ids_in.data(),
517  num_global_node_counts_in.data(),
518  num_global_node_df_counts_in.data());
519  }
520 
521  EX_CHECK_ERR(nemesis_err_flag, "Error writing global nodeset parameters!");
522 }
523 
524 
525 
526 
527 void Nemesis_IO_Helper::put_ss_param_global(std::vector<int> & global_sideset_ids_in,
528  std::vector<int> & num_global_side_counts_in,
529  std::vector<int> & num_global_side_df_counts_in)
530 {
531  // Only add sidesets if there are some
532  if (global_sideset_ids.size())
533  {
535  Nemesis::ne_put_ss_param_global(ex_id,
536  global_sideset_ids_in.data(),
537  num_global_side_counts_in.data(),
538  num_global_side_df_counts_in.data());
539  }
540 
541  EX_CHECK_ERR(nemesis_err_flag, "Error writing global sideset parameters!");
542 }
543 
544 
545 
546 
547 void Nemesis_IO_Helper::put_loadbal_param(unsigned num_internal_nodes_in,
548  unsigned num_border_nodes_in,
549  unsigned num_external_nodes_in,
550  unsigned num_internal_elems_in,
551  unsigned num_border_elems_in,
552  unsigned num_node_cmaps_in,
553  unsigned num_elem_cmaps_in)
554 {
556  Nemesis::ne_put_loadbal_param(ex_id,
557  num_internal_nodes_in,
558  num_border_nodes_in,
559  num_external_nodes_in,
560  num_internal_elems_in,
561  num_border_elems_in,
562  num_node_cmaps_in,
563  num_elem_cmaps_in,
564  this->processor_id());
565 
566  EX_CHECK_ERR(nemesis_err_flag, "Error writing loadbal parameters!");
567 }
568 
569 
570 
571 
572 
573 void Nemesis_IO_Helper::put_cmap_params(std::vector<int> & node_cmap_ids_in,
574  std::vector<int> & node_cmap_node_cnts_in,
575  std::vector<int> & elem_cmap_ids_in,
576  std::vector<int> & elem_cmap_elem_cnts_in)
577 {
578  libmesh_assert(!node_cmap_ids_in.empty());
579  libmesh_assert(!node_cmap_node_cnts_in.empty());
580  libmesh_assert(!elem_cmap_ids_in.empty());
581  libmesh_assert(!elem_cmap_elem_cnts_in.empty());
582 
584  Nemesis::ne_put_cmap_params(ex_id,
585  node_cmap_ids_in.data(),
586  node_cmap_node_cnts_in.data(),
587  elem_cmap_ids_in.data(),
588  elem_cmap_elem_cnts_in.data(),
589  this->processor_id());
590 
591  EX_CHECK_ERR(nemesis_err_flag, "Error writing cmap parameters!");
592 }
593 
594 
595 
596 
597 void Nemesis_IO_Helper::put_node_cmap(std::vector<std::vector<int>> & node_cmap_node_ids_in,
598  std::vector<std::vector<int>> & node_cmap_proc_ids_in)
599 {
600 
601  // Print to screen what we are about to print to Nemesis file
602  if (verbose)
603  {
604  for (std::size_t i=0; i<node_cmap_node_ids_in.size(); ++i)
605  {
606  libMesh::out << "[" << this->processor_id() << "] put_node_cmap() : nodes communicated to proc "
607  << this->node_cmap_ids[i]
608  << " = ";
609  for (std::size_t j=0; j<node_cmap_node_ids_in[i].size(); ++j)
610  libMesh::out << node_cmap_node_ids_in[i][j] << " ";
611  libMesh::out << std::endl;
612  }
613 
614  for (std::size_t i=0; i<node_cmap_node_ids_in.size(); ++i)
615  {
616  libMesh::out << "[" << this->processor_id() << "] put_node_cmap() : processor IDs = ";
617  for (std::size_t j=0; j<node_cmap_proc_ids_in[i].size(); ++j)
618  libMesh::out << node_cmap_proc_ids_in[i][j] << " ";
619  libMesh::out << std::endl;
620  }
621  }
622 
623  for (std::size_t i=0; i<node_cmap_node_ids_in.size(); ++i)
624  {
625  int * node_ids_ptr = node_cmap_node_ids_in[i].empty() ?
626  nullptr : node_cmap_node_ids_in[i].data();
627  int * proc_ids_ptr = node_cmap_proc_ids_in[i].empty() ?
628  nullptr : node_cmap_proc_ids_in[i].data();
629 
631  Nemesis::ne_put_node_cmap(ex_id, this->node_cmap_ids[i],
632  node_ids_ptr, proc_ids_ptr,
633  this->processor_id());
634 
635  EX_CHECK_ERR(nemesis_err_flag, "Error writing node communication map to file!");
636  }
637 }
638 
639 
640 
641 
642 void Nemesis_IO_Helper::put_node_map(std::vector<int> & node_mapi_in,
643  std::vector<int> & node_mapb_in,
644  std::vector<int> & node_mape_in)
645 {
647  Nemesis::ne_put_node_map(ex_id,
648  node_mapi_in.empty() ? nullptr : node_mapi_in.data(),
649  node_mapb_in.empty() ? nullptr : node_mapb_in.data(),
650  node_mape_in.empty() ? nullptr : node_mape_in.data(),
651  this->processor_id());
652 
653  EX_CHECK_ERR(nemesis_err_flag, "Error writing Nemesis internal and border node maps to file!");
654 }
655 
656 
657 
658 
659 void Nemesis_IO_Helper::put_elem_cmap(std::vector<std::vector<int>> & elem_cmap_elem_ids_in,
660  std::vector<std::vector<int>> & elem_cmap_side_ids_in,
661  std::vector<std::vector<int>> & elem_cmap_proc_ids_in)
662 {
663  for (std::size_t i=0; i<elem_cmap_ids.size(); ++i)
664  {
666  Nemesis::ne_put_elem_cmap(ex_id,
667  this->elem_cmap_ids[i],
668  elem_cmap_elem_ids_in[i].data(),
669  elem_cmap_side_ids_in[i].data(),
670  elem_cmap_proc_ids_in[i].data(),
671  this->processor_id());
672 
673  EX_CHECK_ERR(nemesis_err_flag, "Error writing elem communication map to file!");
674  }
675 }
676 
677 
678 
679 
680 void Nemesis_IO_Helper::put_elem_map(std::vector<int> & elem_mapi_in,
681  std::vector<int> & elem_mapb_in)
682 {
684  Nemesis::ne_put_elem_map(ex_id,
685  elem_mapi_in.empty() ? nullptr : elem_mapi_in.data(),
686  elem_mapb_in.empty() ? nullptr : elem_mapb_in.data(),
687  this->processor_id());
688 
689  EX_CHECK_ERR(nemesis_err_flag, "Error writing Nemesis internal and border element maps to file!");
690 }
691 
692 
693 
694 
695 
696 
697 void Nemesis_IO_Helper::put_n_coord(unsigned start_node_num,
698  unsigned num_nodes_in,
699  std::vector<Real> & x_coor,
700  std::vector<Real> & y_coor,
701  std::vector<Real> & z_coor)
702 {
703  if (num_nodes_in)
704  {
706  Nemesis::ne_put_n_coord(ex_id,
707  start_node_num,
708  num_nodes_in,
709  x_coor.empty() ? nullptr : x_coor.data(),
710  y_coor.empty() ? nullptr : y_coor.data(),
711  z_coor.empty() ? nullptr : z_coor.data());
712 
713  EX_CHECK_ERR(nemesis_err_flag, "Error writing coords to file!");
714  }
715 }
716 
717 
718 
719 
720 
721 
722 
723 
724 // Note: we can't reuse the ExodusII_IO_Helper code directly, since it only runs
725 // on processor 0. TODO: We could have the body of this function as a separate
726 // function and then ExodusII_IO_Helper would only call it if on processor 0...
727 void Nemesis_IO_Helper::create(std::string filename)
728 {
729  // Fall back on double precision when necessary since ExodusII
730  // doesn't seem to support long double
731  int
732  comp_ws = 0,
733  io_ws = 0;
734 
735  if (_single_precision)
736  {
737  comp_ws = sizeof(float);
738  io_ws = sizeof(float);
739  }
740  else
741  {
742  comp_ws = cast_int<int>(std::min(sizeof(Real), sizeof(double)));
743  io_ws = cast_int<int>(std::min(sizeof(Real), sizeof(double)));
744  }
745 
746  this->ex_id = exII::ex_create(filename.c_str(), EX_CLOBBER, &comp_ws, &io_ws);
747 
748  EX_CHECK_ERR(ex_id, "Error creating Nemesis mesh file.");
749 
750  if (verbose)
751  libMesh::out << "File created successfully." << std::endl;
752 
753  this->opened_for_writing = true;
754 }
755 
756 
757 
758 
759 
760 
761 
762 
763 void Nemesis_IO_Helper::initialize(std::string title_in, const MeshBase & mesh, bool /*use_discontinuous*/)
764 {
765  // Make sure that the reference passed in is really a DistributedMesh
766  // const DistributedMesh & pmesh = cast_ref<const DistributedMesh &>(mesh);
767  const MeshBase & pmesh = mesh;
768 
769  // According to Nemesis documentation, first call when writing should be to
770  // ne_put_init_info(). Our reader doesn't actually call this, but we should
771  // strive to be as close to a normal nemesis file as possible...
772  this->put_init_info(this->n_processors(), 1, "p");
773 
774 
775  // Gather global "initial" information for Nemesis. This consists of
776  // three parts labeled I, II, and III below...
777 
778  //
779  // I.) Need to compute the number of global element blocks. To be consistent with
780  // Exodus, we also incorrectly associate the number of element blocks with the
781  // number of libmesh subdomains...
782  //
783  this->compute_num_global_elem_blocks(pmesh);
784 
785  //
786  // II.) Determine the global number of nodesets by communication.
787  // This code relies on BoundaryInfo storing side and node
788  // boundary IDs separately at the time they are added to the
789  // BoundaryInfo object.
790  //
791  this->compute_num_global_nodesets(pmesh);
792 
793  //
794  // III.) Need to compute the global number of sidesets by communication:
795  // This code relies on BoundaryInfo storing side and node
796  // boundary IDs separately at the time they are added to the
797  // BoundaryInfo object.
798  //
799  this->compute_num_global_sidesets(pmesh);
800 
801  // Now write the global data obtained in steps I, II, and III to the Nemesis file
802  this->put_init_global(pmesh.parallel_n_nodes(),
803  pmesh.parallel_n_elem(),
804  this->num_elem_blks_global, /* I. */
805  this->num_node_sets_global, /* II. */
806  this->num_side_sets_global /* III. */
807  );
808 
809  // Next, we'll write global element block information to the file. This was already
810  // gathered in step I. above
812  this->global_elem_blk_cnts);
813 
814 
815  // Next, write global nodeset information to the file. This was already gathered in
816  // step II. above.
817  this->num_global_node_df_counts.clear();
818  this->num_global_node_df_counts.resize(this->global_nodeset_ids.size()); // distribution factors all zero...
822 
823 
824  // Next, write global sideset information to the file. This was already gathered in
825  // step III. above.
826  this->num_global_side_df_counts.clear();
827  this->num_global_side_df_counts.resize(this->global_sideset_ids.size()); // distribution factors all zero...
831 
832 
833  // Before we go any further we need to derive consistent node and
834  // element numbering schemes for all local elems and nodes connected
835  // to local elements.
836  //
837  // Must be called *after* the local_subdomain_counts map has been constructed
838  // by the compute_num_global_elem_blocks() function!
839  this->build_element_and_node_maps(pmesh);
840 
841  // Next step is to write "load balance" parameters. Several things need to
842  // be computed first though...
843 
844  // First we'll collect IDs of border nodes.
845  this->compute_border_node_ids(pmesh);
846 
847  // Next we'll collect numbers of internal and border elements, and internal nodes.
848  // Note: "A border node does not a border element make...", that is, just because one
849  // of an element's nodes has been identified as a border node, the element is not
850  // necessarily a border element. It must have a side on the boundary between processors,
851  // i.e. have a face neighbor with a different processor id...
853 
854  // Finally we are ready to write the loadbal information to the file
856  this->num_border_nodes,
857  this->num_external_nodes,
858  this->num_internal_elems,
859  this->num_border_elems,
860  this->num_node_cmaps,
861  this->num_elem_cmaps);
862 
863 
864  // Now we need to compute the "communication map" parameters. These are basically
865  // lists of nodes and elements which need to be communicated between different processors
866  // when the mesh file is read back in.
868 
869  // Do we have communication maps to write? Note that
870  // ne_put_cmap_params expects us to have either *both* node and elem
871  // cmaps or *neither*
872  if (!this->node_cmap_ids.empty() &&
873  !this->node_cmap_node_cnts.empty() &&
874  !this->elem_cmap_ids.empty() &&
875  !this->elem_cmap_elem_cnts.empty())
876  {
877  // Write communication map parameters to file.
878  this->put_cmap_params(this->node_cmap_ids,
879  this->node_cmap_node_cnts,
880  this->elem_cmap_ids,
881  this->elem_cmap_elem_cnts);
882 
883  // Ready the node communication maps. The node IDs which
884  // are communicated are the ones currently stored in
885  // proc_nodes_touched_intersections.
887 
888  // Write the packed node communication vectors to file.
889  this->put_node_cmap(this->node_cmap_node_ids,
890  this->node_cmap_proc_ids);
891 
892  // Ready the node maps. These have nothing to do with communication, they map
893  // the nodes to internal, border, and external nodes in the file.
894  this->compute_node_maps();
895 
896  // Call the Nemesis API to write the node maps to file.
897  this->put_node_map(this->node_mapi,
898  this->node_mapb,
899  this->node_mape);
900 
901  // Ready the element communication maps. This includes border
902  // element IDs, sides which are on the border, and the processors to which
903  // they are to be communicated...
905 
906  // Call the Nemesis API to write the packed element communication maps vectors to file
907  this->put_elem_cmap(this->elem_cmap_elem_ids,
908  this->elem_cmap_side_ids,
909  this->elem_cmap_proc_ids);
910  }
911 
912 
913  // Ready the Nemesis element maps (internal and border) for writing to file.
914  this->compute_element_maps();
915 
916  // Call the Nemesis API to write the internal and border element IDs.
917  this->put_elem_map(this->elem_mapi,
918  this->elem_mapb);
919 
920  // Now write Exodus-specific initialization information, some of which is
921  // different when you are using Nemesis.
922  this->write_exodus_initialization_info(pmesh, title_in);
923 } // end initialize()
924 
925 
926 
927 
928 
929 
931  const std::string & title_in)
932 {
933  // This follows the convention of Exodus: we always write out the mesh as LIBMESH_DIM-dimensional,
934  // even if it is 2D...
935  this->num_dim = LIBMESH_DIM;
936 
937  this->num_elem = static_cast<unsigned int>(std::distance (pmesh.active_local_elements_begin(),
938  pmesh.active_local_elements_end()));
939 
940  // Exodus will also use *global* number of side and node sets,
941  // though it will not write out entries for all of them...
942  this->num_side_sets =
943  cast_int<int>(this->global_sideset_ids.size());
944  this->num_node_sets =
945  cast_int<int>(this->global_nodeset_ids.size());
946 
947  // We need to write the global number of blocks, even though this processor might not have
948  // elements in some of them!
949  this->num_elem_blk = this->num_elem_blks_global;
950 
952  title_in.c_str(),
953  this->num_dim,
954  this->num_nodes,
955  this->num_elem,
956  this->num_elem_blk,
957  this->num_node_sets,
958  this->num_side_sets);
959 
960  EX_CHECK_ERR(ex_err, "Error initializing new Nemesis file.");
961 }
962 
963 
964 
965 
966 
968 {
969  // Make sure we don't have any leftover info
970  this->elem_mapi.clear();
971  this->elem_mapb.clear();
972 
973  // Copy set contents into vectors
974  this->elem_mapi.resize(this->internal_elem_ids.size());
975  this->elem_mapb.resize(this->border_elem_ids.size());
976 
977  {
978  unsigned cnt = 0;
979  for (const auto & id : this->internal_elem_ids)
980  {
981  std::map<int, int>::iterator elem_it = libmesh_elem_num_to_exodus.find(id);
982  if (elem_it == libmesh_elem_num_to_exodus.end())
983  libmesh_error_msg("Elem number " << id << " not found in libmesh_elem_num_to_exodus map.");
984  this->elem_mapi[cnt++] = elem_it->second;
985  }
986  }
987 
988  {
989  unsigned cnt = 0;
990  for (const auto & id : this->border_elem_ids)
991  {
992  std::map<int, int>::iterator elem_it = libmesh_elem_num_to_exodus.find(id);
993  if (elem_it == libmesh_elem_num_to_exodus.end())
994  libmesh_error_msg("Elem number " << id << " not found in libmesh_elem_num_to_exodus map.");
995  this->elem_mapb[cnt++] = elem_it->second;
996  }
997  }
998 }
999 
1000 
1001 
1003 {
1004  // Make sure there is no leftover information
1005  this->elem_cmap_elem_ids.clear();
1006  this->elem_cmap_side_ids.clear();
1007  this->elem_cmap_proc_ids.clear();
1008 
1009  // Allocate enough space for all our element maps
1010  this->elem_cmap_elem_ids.resize(this->num_elem_cmaps);
1011  this->elem_cmap_side_ids.resize(this->num_elem_cmaps);
1012  this->elem_cmap_proc_ids.resize(this->num_elem_cmaps);
1013  {
1014  unsigned cnt=0; // Index into vectors
1016  it = this->proc_border_elem_sets.begin(),
1017  end = this->proc_border_elem_sets.end();
1018 
1019  for (; it != end; ++it)
1020  {
1021  // Make sure the current elem_cmap_id matches the index in our map of node intersections
1022  libmesh_assert_equal_to (static_cast<unsigned>(this->elem_cmap_ids[cnt]), it->first);
1023 
1024  // Get reference to the set of IDs to be packed into the vector
1025  std::set<std::pair<unsigned,unsigned>> & elem_set = it->second;
1026 
1027  // Resize the vectors to receive their payload
1028  this->elem_cmap_elem_ids[cnt].resize(elem_set.size());
1029  this->elem_cmap_side_ids[cnt].resize(elem_set.size());
1030  this->elem_cmap_proc_ids[cnt].resize(elem_set.size());
1031 
1032  std::set<std::pair<unsigned,unsigned>>::iterator elem_set_iter = elem_set.begin();
1033 
1034  // Pack the vectors with elem IDs, side IDs, and processor IDs.
1035  for (std::size_t j=0; j<this->elem_cmap_elem_ids[cnt].size(); ++j, ++elem_set_iter)
1036  {
1037  std::map<int, int>::iterator elem_it = libmesh_elem_num_to_exodus.find((*elem_set_iter).first);
1038 
1039  if (elem_it == libmesh_elem_num_to_exodus.end())
1040  libmesh_error_msg("Elem number " << (*elem_set_iter).first << " not found in libmesh_elem_num_to_exodus map.");
1041 
1042  this->elem_cmap_elem_ids[cnt][j] = elem_it->second;
1043  this->elem_cmap_side_ids[cnt][j] = (*elem_set_iter).second; // Side ID, this has already been converted above
1044  this->elem_cmap_proc_ids[cnt][j] = it->first; // All have the same processor ID
1045  }
1046 
1047  // increment vector index to go to next processor
1048  cnt++;
1049  }
1050  } // end scope for packing
1051 }
1052 
1053 
1054 
1055 
1056 
1058 {
1059  // Make sure we don't have any leftover information
1060  this->node_mapi.clear();
1061  this->node_mapb.clear();
1062  this->node_mape.clear();
1063 
1064  // Make sure there's enough space to hold all our node IDs
1065  this->node_mapi.resize(this->internal_node_ids.size());
1066  this->node_mapb.resize(this->border_node_ids.size());
1067 
1068  // Copy set contents into vectors
1069  {
1070  unsigned cnt = 0;
1071  for (const auto & id : this->internal_node_ids)
1072  {
1073  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(id);
1074  if (node_it == libmesh_node_num_to_exodus.end())
1075  libmesh_error_msg("Node number " << id << " not found in libmesh_node_num_to_exodus map.");
1076  this->node_mapi[cnt++] = node_it->second;
1077  }
1078  }
1079 
1080  {
1081  unsigned cnt=0;
1082  for (const auto & id : this->border_node_ids)
1083  {
1084  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(id);
1085  if (node_it == libmesh_node_num_to_exodus.end())
1086  libmesh_error_msg("Node number " << id << " not found in libmesh_node_num_to_exodus map.");
1087  this->node_mapb[cnt++] = node_it->second;
1088  }
1089  }
1090 }
1091 
1092 
1093 
1094 
1095 
1097 {
1098  // Make sure there's no left-over information
1099  this->node_cmap_node_ids.clear();
1100  this->node_cmap_proc_ids.clear();
1101 
1102  libmesh_assert_less_equal
1103  (this->proc_nodes_touched_intersections.size(),
1104  std::size_t(this->num_node_cmaps));
1105 
1106  // Allocate enough space for all our node maps
1107  this->node_cmap_node_ids.resize(this->num_node_cmaps);
1108  this->node_cmap_proc_ids.resize(this->num_node_cmaps);
1109  {
1110  unsigned cnt=0; // Index into vectors
1112  it = this->proc_nodes_touched_intersections.begin(),
1113  end = this->proc_nodes_touched_intersections.end();
1114 
1115  for (; it != end; ++it)
1116  {
1117  // Make sure the current node_cmap_id matches the index in our map of node intersections
1118  libmesh_assert_equal_to (static_cast<unsigned>(this->node_cmap_ids[cnt]), it->first);
1119 
1120  // Get reference to the set of IDs to be packed into the vector.
1121  std::set<unsigned> & node_set = it->second;
1122 
1123  // Resize the vectors to receive their payload
1124  this->node_cmap_node_ids[cnt].resize(node_set.size());
1125  this->node_cmap_proc_ids[cnt].resize(node_set.size());
1126 
1127  std::set<unsigned>::iterator node_set_iter = node_set.begin();
1128 
1129  // Pack the vectors with node IDs and processor IDs.
1130  for (std::size_t j=0; j<this->node_cmap_node_ids[cnt].size(); ++j, ++node_set_iter)
1131  {
1132  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(*node_set_iter);
1133  if (node_it == libmesh_node_num_to_exodus.end())
1134  libmesh_error_msg("Node number " << *node_set_iter << " not found in libmesh_node_num_to_exodus map.");
1135 
1136  this->node_cmap_node_ids[cnt][j] = node_it->second;
1137  this->node_cmap_proc_ids[cnt][j] = it->first;
1138  }
1139 
1140  // increment vector index to go to next processor
1141  cnt++;
1142  }
1143  } // end scope for packing
1144 
1145  // Print out the vectors we just packed
1146  if (verbose)
1147  {
1148  for (std::size_t i=0; i<this->node_cmap_node_ids.size(); ++i)
1149  {
1150  libMesh::out << "[" << this->processor_id() << "] nodes communicated to proc "
1151  << this->node_cmap_ids[i]
1152  << " = ";
1153  for (std::size_t j=0; j<this->node_cmap_node_ids[i].size(); ++j)
1154  libMesh::out << this->node_cmap_node_ids[i][j] << " ";
1155  libMesh::out << std::endl;
1156  }
1157 
1158  for (std::size_t i=0; i<this->node_cmap_node_ids.size(); ++i)
1159  {
1160  libMesh::out << "[" << this->processor_id() << "] processor ID node communicated to = ";
1161  for (std::size_t j=0; j<this->node_cmap_proc_ids[i].size(); ++j)
1162  libMesh::out << this->node_cmap_proc_ids[i][j] << " ";
1163  libMesh::out << std::endl;
1164  }
1165  }
1166 }
1167 
1168 
1169 
1170 
1172 {
1173  // For the nodes, these are the number of entries in the sets in proc_nodes_touched_intersections
1174  // map computed above. Note: this map does not contain self-intersections so we can loop over it
1175  // directly.
1176  this->node_cmap_node_cnts.clear(); // Make sure we don't have any leftover information...
1177  this->node_cmap_ids.clear(); // Make sure we don't have any leftover information...
1178  this->node_cmap_node_cnts.resize(this->num_node_cmaps);
1179  this->node_cmap_ids.resize(this->num_node_cmaps);
1180 
1181  {
1182  unsigned cnt=0; // Index into the vector
1184  it = this->proc_nodes_touched_intersections.begin(),
1185  end = this->proc_nodes_touched_intersections.end();
1186 
1187  for (; it != end; ++it)
1188  {
1189  this->node_cmap_ids[cnt] = it->first; // The ID of the proc we communicate with
1190  this->node_cmap_node_cnts[cnt] = cast_int<int>(it->second.size()); // The number of nodes we communicate
1191  cnt++; // increment vector index!
1192  }
1193  }
1194 
1195  // Print the packed vectors we just filled
1196  if (verbose)
1197  {
1198  libMesh::out << "[" << this->processor_id() << "] node_cmap_node_cnts = ";
1199  for (std::size_t i=0; i<node_cmap_node_cnts.size(); ++i)
1200  libMesh::out << node_cmap_node_cnts[i] << ", ";
1201  libMesh::out << std::endl;
1202 
1203  libMesh::out << "[" << this->processor_id() << "] node_cmap_ids = ";
1204  for (std::size_t i=0; i<node_cmap_ids.size(); ++i)
1205  libMesh::out << node_cmap_ids[i] << ", ";
1206  libMesh::out << std::endl;
1207  }
1208 
1209  // For the elements, we have not yet computed all this information..
1210  this->elem_cmap_elem_cnts.clear(); // Make sure we don't have any leftover information...
1211  this->elem_cmap_ids.clear(); // Make sure we don't have any leftover information...
1212  this->elem_cmap_elem_cnts.resize(this->num_elem_cmaps);
1213  this->elem_cmap_ids.resize(this->num_elem_cmaps);
1214 
1215  // Pack the elem_cmap_ids and elem_cmap_elem_cnts vectors
1216  {
1217  unsigned cnt=0; // Index into the vectors we're filling
1219  it = this->proc_border_elem_sets.begin(),
1220  end = this->proc_border_elem_sets.end();
1221 
1222  for (; it != end; ++it)
1223  {
1224  this->elem_cmap_ids[cnt] = it->first; // The ID of the proc we communicate with
1225  this->elem_cmap_elem_cnts[cnt] = cast_int<int>(it->second.size()); // The number of elems we communicate to/from that proc
1226  cnt++; // increment vector index!
1227  }
1228  }
1229 
1230  // Print the packed vectors we just filled
1231  if (verbose)
1232  {
1233  libMesh::out << "[" << this->processor_id() << "] elem_cmap_elem_cnts = ";
1234  for (std::size_t i=0; i<elem_cmap_elem_cnts.size(); ++i)
1235  libMesh::out << elem_cmap_elem_cnts[i] << ", ";
1236  libMesh::out << std::endl;
1237 
1238  libMesh::out << "[" << this->processor_id() << "] elem_cmap_ids = ";
1239  for (std::size_t i=0; i<elem_cmap_ids.size(); ++i)
1240  libMesh::out << elem_cmap_ids[i] << ", ";
1241  libMesh::out << std::endl;
1242  }
1243 }
1244 
1245 
1246 
1247 
1248 void
1250 {
1251  // Set of all local, active element IDs. After we have identified border element
1252  // IDs, the set_difference between this set and the border_elem_ids set will give us
1253  // the set of internal_elem_ids.
1254  std::set<unsigned> all_elem_ids;
1255 
1256  // A set of processor IDs which elements on this processor have as
1257  // neighbors. The size of this set will determine the number of
1258  // element communication maps in Exodus.
1259  std::set<unsigned> neighboring_processor_ids;
1260 
1261  // Will be used to create conversion objects capable of mapping libmesh
1262  // element numberings into Nemesis numberings.
1263  ExodusII_IO_Helper::ElementMaps element_mapper;
1264 
1265  for (const auto & elem : pmesh.active_local_element_ptr_range())
1266  {
1267  // Add this Elem's ID to all_elem_ids, later we will take the difference
1268  // between this set and the set of border_elem_ids, to get the set of
1269  // internal_elem_ids.
1270  all_elem_ids.insert(elem->id());
1271 
1272  // Will be set to true if element is determined to be a border element
1273  bool is_border_elem = false;
1274 
1275  // Construct a conversion object for this Element. This will help us map
1276  // Libmesh numberings into Nemesis numberings for sides.
1277  ExodusII_IO_Helper::Conversion conv = element_mapper.assign_conversion(elem->type());
1278 
1279  // Add all this element's node IDs to the set of all node IDs.
1280  // The set of internal_node_ids will be the set difference between
1281  // the set of all nodes and the set of border nodes.
1282  //
1283  // In addition, if any node of a local node is listed in the
1284  // border nodes list, then this element goes into the proc_border_elem_sets.
1285  // Note that there is not a 1:1 correspondence between
1286  // border_elem_ids and the entries which go into proc_border_elem_sets.
1287  // The latter is for communication purposes, ie determining which elements
1288  // should be shared between processors.
1289  for (auto node : elem->node_index_range())
1290  this->nodes_attached_to_local_elems.insert(elem->node_id(node));
1291 
1292  // Loop over element's neighbors, see if it has a neighbor which is off-processor
1293  for (auto n : elem->side_index_range())
1294  {
1295  if (elem->neighbor_ptr(n) != nullptr)
1296  {
1297  unsigned neighbor_proc_id = elem->neighbor_ptr(n)->processor_id();
1298 
1299  // If my neighbor has a different processor ID, I must be a border element.
1300  // Also track the neighboring processor ID if it is are different from our processor ID
1301  if (neighbor_proc_id != this->processor_id())
1302  {
1303  is_border_elem = true;
1304  neighboring_processor_ids.insert(neighbor_proc_id);
1305 
1306  // Convert libmesh side(n) of this element into a side ID for Nemesis
1307  unsigned nemesis_side_id = conv.get_inverse_side_map(n);
1308 
1309  if (verbose)
1310  libMesh::out << "[" << this->processor_id() << "] LibMesh side "
1311  << n
1312  << " mapped to (1-based) Exodus side "
1313  << nemesis_side_id
1314  << std::endl;
1315 
1316  // Add this element's ID and the ID of the side which is on the boundary
1317  // to the set of border elements for this processor.
1318  // Note: if the set does not already exist, this creates it.
1319  this->proc_border_elem_sets[ neighbor_proc_id ].insert( std::make_pair(elem->id(), nemesis_side_id) );
1320  }
1321  }
1322  } // end for loop over neighbors
1323 
1324  // If we're on a border element, add it to the set
1325  if (is_border_elem)
1326  this->border_elem_ids.insert( elem->id() );
1327 
1328  } // end for loop over active local elements
1329 
1330  // Take the set_difference between all elements and border elements to get internal
1331  // element IDs
1332  std::set_difference(all_elem_ids.begin(), all_elem_ids.end(),
1333  this->border_elem_ids.begin(), this->border_elem_ids.end(),
1334  std::inserter(this->internal_elem_ids, this->internal_elem_ids.end()));
1335 
1336  // Take the set_difference between all nodes and border nodes to get internal nodes
1337  std::set_difference(this->nodes_attached_to_local_elems.begin(), this->nodes_attached_to_local_elems.end(),
1338  this->border_node_ids.begin(), this->border_node_ids.end(),
1339  std::inserter(this->internal_node_ids, this->internal_node_ids.end()));
1340 
1341  if (verbose)
1342  {
1343  libMesh::out << "[" << this->processor_id() << "] neighboring_processor_ids = ";
1344  for (const auto & id : neighboring_processor_ids)
1345  libMesh::out << id << " ";
1346  libMesh::out << std::endl;
1347  }
1348 
1349  // The size of the neighboring_processor_ids set should be the number of element communication maps
1350  this->num_elem_cmaps =
1351  cast_int<int>(neighboring_processor_ids.size());
1352 
1353  if (verbose)
1354  libMesh::out << "[" << this->processor_id() << "] "
1355  << "Number of neighboring processor IDs="
1356  << this->num_elem_cmaps
1357  << std::endl;
1358 
1359  if (verbose)
1360  {
1361  // Print out counts of border elements for each processor
1362  for (const auto & pr : proc_border_elem_sets)
1363  {
1364  libMesh::out << "[" << this->processor_id() << "] "
1365  << "Proc "
1366  << pr.first << " communicates "
1367  << pr.second.size() << " elements." << std::endl;
1368  }
1369  }
1370 
1371  // Store the number of internal and border elements, and the number of internal nodes,
1372  // to be written to the Nemesis file.
1373  this->num_internal_elems =
1374  cast_int<int>(this->internal_elem_ids.size());
1375  this->num_border_elems =
1376  cast_int<int>(this->border_elem_ids.size());
1377  this->num_internal_nodes =
1378  cast_int<int>(this->internal_node_ids.size());
1379 
1380  if (verbose)
1381  {
1382  libMesh::out << "[" << this->processor_id() << "] num_internal_nodes=" << this->num_internal_nodes << std::endl;
1383  libMesh::out << "[" << this->processor_id() << "] num_border_nodes=" << this->num_border_nodes << std::endl;
1384  libMesh::out << "[" << this->processor_id() << "] num_border_elems=" << this->num_border_elems << std::endl;
1385  libMesh::out << "[" << this->processor_id() << "] num_internal_elems=" << this->num_internal_elems << std::endl;
1386  }
1387 }
1388 
1389 
1390 
1392 {
1393  // 1.) Get reference to the set of side boundary IDs
1394  std::set<boundary_id_type> global_side_boundary_ids
1395  (pmesh.get_boundary_info().get_side_boundary_ids().begin(),
1396  pmesh.get_boundary_info().get_side_boundary_ids().end());
1397 
1398  // 2.) Gather boundary side IDs from other processors
1399  this->comm().set_union(global_side_boundary_ids);
1400 
1401  // 3.) Now global_side_boundary_ids actually contains a global list of all side boundary IDs
1402  this->num_side_sets_global =
1403  cast_int<int>(global_side_boundary_ids.size());
1404 
1405  // 4.) Pack these sidesets into a vector so they can be written by Nemesis
1406  this->global_sideset_ids.clear(); // Make sure there is no leftover information
1407  this->global_sideset_ids.insert(this->global_sideset_ids.end(),
1408  global_side_boundary_ids.begin(),
1409  global_side_boundary_ids.end());
1410 
1411  if (verbose)
1412  {
1413  libMesh::out << "[" << this->processor_id() << "] global_sideset_ids = ";
1414  for (std::size_t i=0; i<this->global_sideset_ids.size(); ++i)
1415  libMesh::out << this->global_sideset_ids[i] << ", ";
1416  libMesh::out << std::endl;
1417  }
1418 
1419  // We also need global counts of sides in each of the sidesets.
1420  // Build a list of (elem, side, bc) tuples.
1421  typedef std::tuple<dof_id_type, unsigned short int, boundary_id_type> Tuple;
1422  std::vector<Tuple> bc_triples = pmesh.get_boundary_info().build_side_list();
1423 
1424  // Iterators to the beginning and end of the current range.
1425  std::vector<Tuple>::iterator
1426  it = bc_triples.begin(),
1427  new_end = bc_triples.end();
1428 
1429  while (it != new_end)
1430  {
1431  if (pmesh.elem_ref(std::get<0>(*it)).processor_id() != this->processor_id())
1432  {
1433  // Back up the new end iterators to prepare for swap
1434  --new_end;
1435 
1436  // Swap places, the non-local elem will now be "past-the-end"
1437  std::swap (*it, *new_end);
1438  }
1439  else // elem is local, go to next
1440  ++it;
1441  }
1442 
1443  // Erase from "new" end to old.
1444  bc_triples.erase(new_end, bc_triples.end());
1445 
1446  this->num_global_side_counts.clear(); // Make sure we don't have any leftover information
1447  this->num_global_side_counts.resize(this->global_sideset_ids.size());
1448 
1449  // Get the count for each global sideset ID
1450  for (std::size_t i=0; i<global_sideset_ids.size(); ++i)
1451  {
1452  int id = global_sideset_ids[i];
1453  this->num_global_side_counts[i] =
1454  cast_int<int>(std::count_if(bc_triples.begin(),
1455  bc_triples.end(),
1456  [id](const Tuple & t)->bool { return std::get<2>(t) == id; }));
1457  }
1458 
1459  if (verbose)
1460  {
1461  libMesh::out << "[" << this->processor_id() << "] num_global_side_counts = ";
1462  for (std::size_t i=0; i<this->num_global_side_counts.size(); ++i)
1463  libMesh::out << this->num_global_side_counts[i] << ", ";
1464  libMesh::out << std::endl;
1465  }
1466 
1467  // Finally sum up the result
1468  this->comm().sum(this->num_global_side_counts);
1469 
1470  if (verbose)
1471  {
1472  libMesh::out << "[" << this->processor_id() << "] num_global_side_counts = ";
1473  for (std::size_t i=0; i<this->num_global_side_counts.size(); ++i)
1474  libMesh::out << this->num_global_side_counts[i] << ", ";
1475  libMesh::out << std::endl;
1476  }
1477 }
1478 
1479 
1480 
1481 
1482 
1483 
1485 {
1486  std::set<boundary_id_type> local_node_boundary_ids;
1487 
1488  // 1.) Get reference to the set of node boundary IDs *for this processor*
1489  std::set<boundary_id_type> global_node_boundary_ids
1490  (pmesh.get_boundary_info().get_node_boundary_ids().begin(),
1491  pmesh.get_boundary_info().get_node_boundary_ids().end());
1492 
1493  // Save a copy of the local_node_boundary_ids...
1494  local_node_boundary_ids = global_node_boundary_ids;
1495 
1496  // 2.) Gather boundary node IDs from other processors
1497  this->comm().set_union(global_node_boundary_ids);
1498 
1499  // 3.) Now global_node_boundary_ids actually contains a global list of all node boundary IDs
1500  this->num_node_sets_global =
1501  cast_int<int>(global_node_boundary_ids.size());
1502 
1503  // 4.) Create a vector<int> from the global_node_boundary_ids set
1504  this->global_nodeset_ids.clear();
1505  this->global_nodeset_ids.insert(this->global_nodeset_ids.end(),
1506  global_node_boundary_ids.begin(),
1507  global_node_boundary_ids.end());
1508 
1509  if (verbose)
1510  {
1511  libMesh::out << "[" << this->processor_id() << "] global_nodeset_ids = ";
1512  for (std::size_t i=0; i<global_nodeset_ids.size(); ++i)
1513  libMesh::out << global_nodeset_ids[i] << ", ";
1514  libMesh::out << std::endl;
1515 
1516  libMesh::out << "[" << this->processor_id() << "] local_node_boundary_ids = ";
1517  for (const auto & id : local_node_boundary_ids)
1518  libMesh::out << id << ", ";
1519  libMesh::out << std::endl;
1520  }
1521 
1522  // 7.) We also need to know the number of nodes which is in each of the nodesets, globally.
1523 
1524  // Build list of (node-id, bc-id) tuples.
1525  typedef std::tuple<dof_id_type, boundary_id_type> Tuple;
1526  std::vector<Tuple> bc_tuples = pmesh.get_boundary_info().build_node_list();
1527 
1528  if (verbose)
1529  {
1530  libMesh::out << "[" << this->processor_id() << "] boundary_node_list.size()="
1531  << bc_tuples.size() << std::endl;
1532  libMesh::out << "[" << this->processor_id() << "] (boundary_node_id, boundary_id) = ";
1533  for (const auto & t : bc_tuples)
1534  libMesh::out << "(" << std::get<0>(t) << ", " << std::get<1>(t) << ") ";
1535  libMesh::out << std::endl;
1536  }
1537 
1538  // Now get the global information. In this case, we only want to count boundary
1539  // information for nodes *owned* by this processor, so there are no duplicates.
1540 
1541  // Make sure we don't have any left over information
1542  this->num_global_node_counts.clear();
1543  this->num_global_node_counts.resize(this->global_nodeset_ids.size());
1544 
1545  // Unfortunately, we can't just count up all occurrences of a given id,
1546  // that would give us duplicate entries when we do the parallel summation.
1547  // So instead, only count entries for nodes owned by this processor.
1548  // Start by getting rid of all non-local node entries from the vectors.
1549  std::vector<Tuple>::iterator
1550  it = bc_tuples.begin(),
1551  new_end = bc_tuples.end();
1552 
1553  while (it != new_end)
1554  {
1555  if (pmesh.node_ptr(std::get<0>(*it))->processor_id() != this->processor_id())
1556  {
1557  // Back up the new end iterators to prepare for swap
1558  --new_end;
1559 
1560  // Swap places, the non-local node will now be "past-the-end"
1561  std::swap(*it, *new_end);
1562  }
1563  else // node is local, go to next
1564  ++it;
1565  }
1566 
1567  // Erase from "new" end to old end.
1568  bc_tuples.erase(new_end, bc_tuples.end());
1569 
1570  // Now we can do the local count for each ID...
1571  for (std::size_t i=0; i<global_nodeset_ids.size(); ++i)
1572  {
1573  int id = this->global_nodeset_ids[i];
1574  this->num_global_node_counts[i] =
1575  cast_int<int>(std::count_if(bc_tuples.begin(),
1576  bc_tuples.end(),
1577  [id](const Tuple & t)->bool { return std::get<1>(t) == id; }));
1578  }
1579 
1580  // And finally we can sum them up
1581  this->comm().sum(this->num_global_node_counts);
1582 
1583  if (verbose)
1584  {
1585  libMesh::out << "[" << this->processor_id() << "] num_global_node_counts = ";
1586  for (std::size_t i=0; i<num_global_node_counts.size(); ++i)
1587  libMesh::out << num_global_node_counts[i] << ", ";
1588  libMesh::out << std::endl;
1589  }
1590 }
1591 
1592 
1593 
1594 
1596 {
1597  // 1.) Loop over active local elements, build up set of subdomain IDs.
1598  std::set<subdomain_id_type> global_subdomain_ids;
1599 
1600  // This map keeps track of the number of elements in each subdomain over all processors
1601  std::map<subdomain_id_type, unsigned> global_subdomain_counts;
1602 
1603  for (const auto & elem : pmesh.active_local_element_ptr_range())
1604  {
1605  subdomain_id_type cur_subdomain = elem->subdomain_id();
1606 
1607  /*
1608  // We can't have a zero subdomain ID in Exodus (for some reason?)
1609  // so map zero subdomains to a max value...
1610  if (cur_subdomain == 0)
1611  cur_subdomain = std::numeric_limits<subdomain_id_type>::max();
1612  */
1613 
1614  global_subdomain_ids.insert(cur_subdomain);
1615 
1616  // Increment the count of elements in this subdomain
1617  global_subdomain_counts[cur_subdomain]++;
1618  }
1619 
1620  // We're next going to this->comm().sum the subdomain counts, so save the local counts
1621  this->local_subdomain_counts = global_subdomain_counts;
1622 
1623  {
1624  // 2.) Copy local subdomain IDs into a vector for communication
1625  std::vector<subdomain_id_type> global_subdomain_ids_vector(global_subdomain_ids.begin(),
1626  global_subdomain_ids.end());
1627 
1628  // 3.) Gather them into an enlarged vector
1629  this->comm().allgather(global_subdomain_ids_vector);
1630 
1631  // 4.) Insert any new IDs into the set (any duplicates will be dropped)
1632  global_subdomain_ids.insert(global_subdomain_ids_vector.begin(),
1633  global_subdomain_ids_vector.end());
1634  }
1635 
1636  // 5.) Now global_subdomain_ids actually contains a global list of all subdomain IDs
1637  this->num_elem_blks_global =
1638  cast_int<int>(global_subdomain_ids.size());
1639 
1640  // Print the number of elements found locally in each subdomain
1641  if (verbose)
1642  {
1643  libMesh::out << "[" << this->processor_id() << "] ";
1644  for (const auto & pr : global_subdomain_counts)
1645  {
1646  libMesh::out << "ID: "
1647  << static_cast<unsigned>(pr.first)
1648  << ", Count: " << pr.second << ", ";
1649  }
1650  libMesh::out << std::endl;
1651  }
1652 
1653  // 6.) this->comm().sum up the number of elements in each block. We know the global
1654  // subdomain IDs, so pack them into a vector one by one. Use a vector of int since
1655  // that is what Nemesis wants
1656  this->global_elem_blk_cnts.resize(global_subdomain_ids.size());
1657 
1658  unsigned cnt=0;
1659  // Find the entry in the local map, note: if not found, will be created with 0 default value, which is OK...
1660  for (const auto & id : global_subdomain_ids)
1661  this->global_elem_blk_cnts[cnt++] = global_subdomain_counts[id];
1662 
1663  // Sum up subdomain counts from all processors
1664  this->comm().sum(this->global_elem_blk_cnts);
1665 
1666  if (verbose)
1667  {
1668  libMesh::out << "[" << this->processor_id() << "] global_elem_blk_cnts = ";
1669  for (std::size_t i=0; i<this->global_elem_blk_cnts.size(); ++i)
1670  libMesh::out << this->global_elem_blk_cnts[i] << ", ";
1671  libMesh::out << std::endl;
1672  }
1673 
1674  // 7.) Create a vector<int> from the global_subdomain_ids set, for passing to Nemesis
1675  this->global_elem_blk_ids.clear();
1676  this->global_elem_blk_ids.insert(this->global_elem_blk_ids.end(), // pos
1677  global_subdomain_ids.begin(),
1678  global_subdomain_ids.end());
1679 
1680  if (verbose)
1681  {
1682  libMesh::out << "[" << this->processor_id() << "] global_elem_blk_ids = ";
1683  for (std::size_t i=0; i<this->global_elem_blk_ids.size(); ++i)
1684  libMesh::out << this->global_elem_blk_ids[i] << ", ";
1685  libMesh::out << std::endl;
1686  }
1687 
1688 
1689  // 8.) We will call put_eb_info_global later, it must be called after this->put_init_global().
1690 }
1691 
1692 
1693 
1694 
1696 {
1697  // If we don't have any local subdomains, it had better be because
1698  // we don't have any local elements
1699 #ifdef DEBUG
1700  if (local_subdomain_counts.empty())
1701  {
1702  libmesh_assert(pmesh.active_local_elements_begin() ==
1703  pmesh.active_local_elements_end());
1704  libmesh_assert(this->nodes_attached_to_local_elems.empty());
1705  }
1706 #endif
1707 
1708  // Elements have to be numbered contiguously based on what block
1709  // number they are in. Therefore we have to do a bit of work to get
1710  // the block (ie subdomain) numbers first and store them off as
1711  // block_ids.
1712 
1713  // Make sure there is no leftover information in the subdomain_map, and reserve
1714  // enough space to store the elements we need.
1715  this->subdomain_map.clear();
1716  for (const auto & pr : local_subdomain_counts)
1717  {
1718  if (verbose)
1719  {
1720  libMesh::out << "[" << this->processor_id() << "] "
1721  << "local_subdomain_counts [" << static_cast<unsigned>(pr.first) << "]= "
1722  << pr.second
1723  << std::endl;
1724  }
1725 
1726  // pr.first is the subdomain ID, pr.second is the number of elements it contains
1727  this->subdomain_map[pr.first].reserve(pr.second);
1728  }
1729 
1730 
1731  // First loop over the elements to figure out which elements are in which subdomain
1732  for (const auto & elem : pmesh.active_local_element_ptr_range())
1733  {
1734  // Grab the nodes while we're here.
1735  for (auto n : elem->node_index_range())
1736  this->nodes_attached_to_local_elems.insert( elem->node_id(n) );
1737 
1738  subdomain_id_type cur_subdomain = elem->subdomain_id();
1739 
1740  this->subdomain_map[cur_subdomain].push_back(elem->id());
1741  }
1742 
1743  // Set num_nodes which is used by exodusII_io_helper
1744  this->num_nodes =
1745  cast_int<int>(this->nodes_attached_to_local_elems.size());
1746 
1747  // Now come up with a 1-based numbering for these nodes
1748  this->exodus_node_num_to_libmesh.clear(); // Make sure it's empty
1749  this->exodus_node_num_to_libmesh.reserve(this->nodes_attached_to_local_elems.size());
1750 
1751  // Also make sure there's no leftover information in the map which goes the
1752  // other direction.
1753  this->libmesh_node_num_to_exodus.clear();
1754 
1755  // Set the map for nodes
1756  for (const auto & id : nodes_attached_to_local_elems)
1757  {
1758  // I.e. given exodus_node_id,
1759  // exodus_node_num_to_libmesh[ exodus_node_id ] returns the libmesh ID for that node.
1760  // Note that even though most of Exodus is 1-based, this code will map an Exodus ID of
1761  // zero to some libmesh node ID. Is that a problem?
1762  this->exodus_node_num_to_libmesh.push_back(id);
1763 
1764  // Likewise, given libmesh_node_id,
1765  // libmesh_node_num_to_exodus[ libmesh_node_id ] returns the *Exodus* ID for that node.
1766  // Unlike the exodus_node_num_to_libmesh vector above, this one is a std::map
1767  this->libmesh_node_num_to_exodus[id] =
1768  cast_int<int>(this->exodus_node_num_to_libmesh.size()); // should never be zero...
1769  }
1770 
1771  // Now we're going to loop over the subdomain map and build a few things right
1772  // now that we'll use later.
1773 
1774  // First make sure our data structures don't have any leftover data...
1775  this->exodus_elem_num_to_libmesh.clear();
1776  this->block_ids.clear();
1777  this->libmesh_elem_num_to_exodus.clear();
1778 
1779  // Now loop over each subdomain and get a unique numbering for the elements
1780  for (auto & pr : subdomain_map)
1781  {
1782  block_ids.push_back(pr.first);
1783 
1784  // Vector of element IDs for this subdomain
1785  std::vector<dof_id_type> & elem_ids_this_subdomain = pr.second;
1786 
1787  // The code below assumes this subdomain block is not empty, make sure that's the case!
1788  if (elem_ids_this_subdomain.size() == 0)
1789  libmesh_error_msg("Error, no element IDs found in subdomain " << pr.first);
1790 
1792 
1793  // Use the first element in this block to get representative information.
1794  // Note that Exodus assumes all elements in a block are of the same type!
1795  // We are using that same assumption here!
1797  (pmesh.elem_ref(elem_ids_this_subdomain[0]).type());
1798  this->num_nodes_per_elem =
1799  pmesh.elem_ref(elem_ids_this_subdomain[0]).n_nodes();
1800 
1801  // Get a reference to the connectivity vector for this subdomain. This vector
1802  // is most likely empty, we are going to fill it up now.
1803  std::vector<int> & current_block_connectivity = this->block_id_to_elem_connectivity[pr.first];
1804 
1805  // Just in case it's not already empty...
1806  current_block_connectivity.clear();
1807  current_block_connectivity.resize(elem_ids_this_subdomain.size() * this->num_nodes_per_elem);
1808 
1809  for (auto i : index_range(elem_ids_this_subdomain))
1810  {
1811  auto elem_id = elem_ids_this_subdomain[i];
1812 
1813  // Set the number map for elements
1814  this->exodus_elem_num_to_libmesh.push_back(elem_id);
1815  this->libmesh_elem_num_to_exodus[elem_id] =
1816  cast_int<int>(this->exodus_elem_num_to_libmesh.size());
1817 
1818  const Elem & elem = pmesh.elem_ref(elem_id);
1819 
1820  // Exodus/Nemesis want every block to have the same element type
1821  // libmesh_assert_equal_to (elem->type(), conv.get_canonical_type());
1822 
1823  // But we can get away with writing e.g. HEX8 and INFHEX8 in
1824  // the same block...
1825  libmesh_assert_equal_to (elem.n_nodes(), Elem::build(conv.get_canonical_type(), nullptr)->n_nodes());
1826 
1827  for (auto j : IntRange<int>(0, this->num_nodes_per_elem))
1828  {
1829  const unsigned int connect_index = (i*this->num_nodes_per_elem)+j;
1830  const unsigned int elem_node_index = conv.get_node_map(j);
1831 
1832  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(elem.node_id(elem_node_index));
1833  if (node_it == libmesh_node_num_to_exodus.end())
1834  libmesh_error_msg("Node number " << elem.node_id(elem_node_index) << " not found in libmesh_node_num_to_exodus map.");
1835 
1836  current_block_connectivity[connect_index] = node_it->second;
1837  }
1838  } // End loop over elems in this subdomain
1839  } // end loop over subdomain_map
1840 }
1841 
1842 
1843 
1844 
1845 
1847 {
1848  // The set which will eventually contain the IDs of "border nodes". These are nodes
1849  // that lie on the boundary between one or more processors.
1850  //std::set<unsigned> border_node_ids;
1851 
1852  // map from processor ID to set of nodes which elements from this processor "touch",
1853  // that is,
1854  // proc_nodes_touched[p] = (set all node IDs found in elements owned by processor p)
1855  std::map<unsigned, std::set<unsigned>> proc_nodes_touched;
1856 
1857 
1858  // We are going to create a lot of intermediate data structures here, so make sure
1859  // as many as possible all cleaned up by creating scope!
1860  {
1861  // Loop over active (not just active local) elements, make sets of node IDs for each
1862  // processor which has an element that "touches" a node.
1863  for (const auto & elem : pmesh.active_element_ptr_range())
1864  {
1865  // Get reference to the set for this processor. If it does not exist
1866  // it will be created.
1867  std::set<unsigned> & set_p = proc_nodes_touched[ elem->processor_id() ];
1868 
1869  // Insert all nodes touched by this element into the set
1870  for (auto node : elem->node_index_range())
1871  set_p.insert(elem->node_id(node));
1872  }
1873 
1874  // The number of node communication maps is the number of other processors
1875  // with which we share nodes. (I think.) This is just the size of the map we just
1876  // created, minus 1 unless this processor has no nodes of its own.
1877  this->num_node_cmaps =
1878  cast_int<int>(proc_nodes_touched.size() -
1879  proc_nodes_touched.count(this->processor_id()));
1880 
1881  // We can't be connecting to more processors than exist outside
1882  // ourselves
1883  libmesh_assert_less (static_cast<unsigned>(this->num_node_cmaps), this->n_processors());
1884 
1885  if (verbose)
1886  {
1887  libMesh::out << "[" << this->processor_id()
1888  << "] proc_nodes_touched contains "
1889  << proc_nodes_touched.size()
1890  << " sets of nodes."
1891  << std::endl;
1892 
1893  for (const auto & pr : proc_nodes_touched)
1894  libMesh::out << "[" << this->processor_id()
1895  << "] proc_nodes_touched[" << pr.first << "] has "
1896  << pr.second.size()
1897  << " entries."
1898  << std::endl;
1899  }
1900 
1901 
1902  // Loop over all the sets we just created and compute intersections with the
1903  // this processor's set. Obviously, don't intersect with ourself.
1904  for (auto & pr : proc_nodes_touched)
1905  {
1906  // Don't compute intersections with ourself
1907  if (pr.first == this->processor_id())
1908  continue;
1909 
1910  // Otherwise, compute intersection with other processor and ourself
1911  std::set<unsigned> & my_set = proc_nodes_touched[this->processor_id()];
1912  std::set<unsigned> & other_set = pr.second;
1913  std::set<unsigned> & result_set = this->proc_nodes_touched_intersections[pr.first]; // created if does not exist
1914 
1915  std::set_intersection(my_set.begin(), my_set.end(),
1916  other_set.begin(), other_set.end(),
1917  std::inserter(result_set, result_set.end()));
1918  }
1919 
1920  if (verbose)
1921  {
1922  for (const auto & pr : proc_nodes_touched_intersections)
1923  libMesh::out << "[" << this->processor_id()
1924  << "] this->proc_nodes_touched_intersections[" << pr.first << "] has "
1925  << pr.second.size()
1926  << " entries."
1927  << std::endl;
1928  }
1929 
1930  // Compute the set_union of all the preceding intersections. This will be the set of
1931  // border node IDs for this processor.
1932  for (auto & pr : proc_nodes_touched_intersections)
1933  {
1934  std::set<unsigned> & other_set = pr.second;
1935  std::set<unsigned> intermediate_result; // Don't think we can insert into one of the sets we're unioning...
1936 
1937  std::set_union(this->border_node_ids.begin(), this->border_node_ids.end(),
1938  other_set.begin(), other_set.end(),
1939  std::inserter(intermediate_result, intermediate_result.end()));
1940 
1941  // Swap our intermediate result into the final set
1942  this->border_node_ids.swap(intermediate_result);
1943  }
1944 
1945  libmesh_assert_less_equal
1946  (this->proc_nodes_touched_intersections.size(),
1947  std::size_t(this->num_node_cmaps));
1948 
1949  if (verbose)
1950  {
1951  libMesh::out << "[" << this->processor_id()
1952  << "] border_node_ids.size()=" << this->border_node_ids.size()
1953  << std::endl;
1954  }
1955  } // end scope for border node ID creation
1956 
1957  // Store the number of border node IDs to be written to Nemesis file
1958  this->num_border_nodes = cast_int<int>(this->border_node_ids.size());
1959 }
1960 
1961 
1962 
1963 
1964 
1966 {
1967  // Write the nodesets. In Nemesis, the idea is to "create space" for the global
1968  // set of boundary nodesets, but to only write node IDs which are local to the current
1969  // processor. This is what is done in Nemesis files created by the "loadbal" script.
1970 
1971  // Store a map of vectors for boundary node IDs on this processor.
1972  // Use a vector of int here so it can be passed directly to Exodus.
1973  std::map<boundary_id_type, std::vector<int>> local_node_boundary_id_lists;
1974 
1975  // FIXME: We should build this list only one time!! We already built it above, but we
1976  // did not have the libmesh to exodus node mapping at that time... for now we'll just
1977  // build it here again, hopefully it's small relative to the size of the entire mesh.
1978 
1979  // Build list of (node-id, bc-id) tuples.
1980  typedef std::tuple<dof_id_type, boundary_id_type> Tuple;
1981  std::vector<Tuple> bc_tuples = mesh.get_boundary_info().build_node_list();
1982 
1983  if (verbose)
1984  {
1985  libMesh::out << "[" << this->processor_id() << "] boundary_node_list.size()="
1986  << bc_tuples.size() << std::endl;
1987  libMesh::out << "[" << this->processor_id() << "] (boundary_node_id, boundary_id) = ";
1988  for (const auto & t : bc_tuples)
1989  libMesh::out << "(" << std::get<0>(t) << ", " << std::get<1>(t) << ") ";
1990  libMesh::out << std::endl;
1991  }
1992 
1993  // For each node in the node list, add it to the vector of node IDs for that
1994  // set for the local processor. This will be used later when writing Exodus
1995  // nodesets.
1996  for (const auto & t : bc_tuples)
1997  {
1998  // Don't try to grab a reference to the vector unless the current node is attached
1999  // to a local element. Otherwise, another processor will be responsible for writing it in its nodeset.
2000  std::map<int, int>::iterator it = this->libmesh_node_num_to_exodus.find(std::get<0>(t));
2001 
2002  if (it != this->libmesh_node_num_to_exodus.end())
2003  {
2004  // Get reference to the vector where this node ID will be inserted. If it
2005  // doesn't yet exist, this will create it.
2006  std::vector<int> & current_id_set = local_node_boundary_id_lists[std::get<1>(t)];
2007 
2008  // Push back Exodus-mapped node ID for this set
2009  // TODO: reserve space in these vectors somehow.
2010  current_id_set.push_back( it->second );
2011  }
2012  }
2013 
2014  // See what we got
2015  if (verbose)
2016  {
2017  for (const auto & pr : local_node_boundary_id_lists)
2018  {
2019  libMesh::out << "[" << this->processor_id() << "] ID: " << pr.first << ", ";
2020 
2021  // Libmesh node ID (Exodus Node ID)
2022  for (const auto & id : pr.second)
2023  libMesh::out << id << ", ";
2024  libMesh::out << std::endl;
2025  }
2026  }
2027 
2028  // Loop over *global* nodeset IDs, call the Exodus API. Note that some nodesets may be empty
2029  // for a given processor.
2030  if (global_nodeset_ids.size() > 0)
2031  {
2032  NamesData names_table(global_nodeset_ids.size(), MAX_STR_LENGTH);
2033 
2034  for (std::size_t i=0; i<this->global_nodeset_ids.size(); ++i)
2035  {
2036  const std::string & current_ns_name =
2037  mesh.get_boundary_info().get_nodeset_name
2038  (cast_int<boundary_id_type>(global_nodeset_ids[i]));
2039 
2040  // Store this name in a data structure that will be used to
2041  // write sideset names to file.
2042  names_table.push_back_entry(current_ns_name);
2043 
2044  if (verbose)
2045  {
2046  libMesh::out << "[" << this->processor_id()
2047  << "] Writing out Exodus nodeset info for ID: " << global_nodeset_ids[i]
2048  << ", Name: " << current_ns_name
2049  << std::endl;
2050  }
2051 
2052  // Convert current global_nodeset_id into an exodus ID, which can't be zero...
2053  int exodus_id = global_nodeset_ids[i];
2054 
2055  /*
2056  // Exodus can't handle zero nodeset IDs (?) Use max short here since
2057  // when libmesh reads it back in, it will want to store it as a short...
2058  if (exodus_id==0)
2059  exodus_id = std::numeric_limits<short>::max();
2060  */
2061 
2062  // Try to find this boundary ID in the local list we created
2063  auto it =
2064  local_node_boundary_id_lists.find (cast_int<boundary_id_type>(this->global_nodeset_ids[i]));
2065 
2066  // No nodes found for this boundary ID on this processor
2067  if (it == local_node_boundary_id_lists.end())
2068  {
2069  if (verbose)
2070  libMesh::out << "[" << this->processor_id()
2071  << "] No nodeset data for ID: " << global_nodeset_ids[i]
2072  << " on this processor." << std::endl;
2073 
2074  // Call the Exodus interface to write the parameters of this node set
2076  exodus_id,
2077  0, /* No nodes for this ID */
2078  0 /* No distribution factors */);
2079  EX_CHECK_ERR(this->ex_err, "Error writing nodeset parameters in Nemesis");
2080 
2081  }
2082  else // Boundary ID *was* found in list
2083  {
2084  // Get reference to the vector of node IDs
2085  std::vector<int> & current_nodeset_ids = it->second;
2086 
2087  // Call the Exodus interface to write the parameters of this node set
2089  exodus_id,
2090  current_nodeset_ids.size(),
2091  0 /* No distribution factors */);
2092 
2093  EX_CHECK_ERR(this->ex_err, "Error writing nodeset parameters in Nemesis");
2094 
2095  // Call Exodus interface to write the actual node IDs for this boundary ID
2096  this->ex_err = exII::ex_put_node_set(this->ex_id,
2097  exodus_id,
2098  current_nodeset_ids.data());
2099 
2100  EX_CHECK_ERR(this->ex_err, "Error writing nodesets in Nemesis");
2101 
2102  }
2103  } // end loop over global nodeset IDs
2104 
2105  // Write out the nodeset names
2108  names_table.get_char_star_star());
2109  EX_CHECK_ERR(ex_err, "Error writing nodeset names");
2110  } // end for loop over global nodeset IDs
2111 }
2112 
2113 
2114 
2115 
2117 {
2118  // Write the sidesets. In Nemesis, the idea is to "create space" for the global
2119  // set of boundary sidesets, but to only write sideset IDs which are local to the current
2120  // processor. This is what is done in Nemesis files created by the "loadbal" script.
2121  // See also: ExodusII_IO_Helper::write_sidesets()...
2122 
2123 
2124  // Store a map of vectors for boundary side IDs on this processor.
2125  // Use a vector of int here so it can be passed directly to Exodus.
2126  std::map<boundary_id_type, std::vector<int>> local_elem_boundary_id_lists;
2127  std::map<boundary_id_type, std::vector<int>> local_elem_boundary_id_side_lists;
2128 
2130 
2131  // FIXME: We already built this list once, we should reuse that information!
2132  std::vector<std::tuple<dof_id_type, unsigned short int, boundary_id_type>> bndry_elem_side_id_list =
2133  mesh.get_boundary_info().build_side_list();
2134 
2135  // Integer looping, skipping non-local elements
2136  for (std::size_t i=0; i<bndry_elem_side_id_list.size(); ++i)
2137  {
2138  // Get pointer to current Elem
2139  const Elem * elem = mesh.elem_ptr(std::get<0>(bndry_elem_side_id_list[i]));
2140 
2141  std::vector<const Elem *> family;
2142 #ifdef LIBMESH_ENABLE_AMR
2143  // We need to build up active elements if AMR is enabled and add
2144  // them to the exodus sidesets instead of the potentially inactive "parent" elements
2145  // Technically we don't need to "reset" the tree since the vector was just created.
2146  elem->active_family_tree_by_side(family, std::get<1>(bndry_elem_side_id_list[i]), /*reset tree=*/false);
2147 #else
2148  // If AMR is not even enabled, just push back the element itself
2149  family.push_back( elem );
2150 #endif
2151 
2152  // Loop over all the elements in the family tree, store their converted IDs
2153  // and side IDs to the map's vectors. TODO: Somehow reserve enough space for these
2154  // push_back's...
2155  for (std::size_t j=0; j<family.size(); ++j)
2156  {
2157  const dof_id_type f_id = family[j]->id();
2158  const Elem & f = mesh.elem_ref(f_id);
2159 
2160  // If element is local, process it
2161  if (f.processor_id() == this->processor_id())
2162  {
2164 
2165  // Use the libmesh to exodus data structure map to get the proper sideset IDs
2166  // The data structure contains the "collapsed" contiguous ids.
2167  //
2168  // We know the parent element is local, but let's be absolutely sure that all the children have been
2169  // actually mapped to Exodus IDs before we blindly try to add them...
2170  std::map<int,int>::iterator it = this->libmesh_elem_num_to_exodus.find( f_id );
2171  if (it != this->libmesh_elem_num_to_exodus.end())
2172  {
2173  local_elem_boundary_id_lists[ std::get<2>(bndry_elem_side_id_list[i]) ].push_back( it->second );
2174  local_elem_boundary_id_side_lists[ std::get<2>(bndry_elem_side_id_list[i]) ].push_back(conv.get_inverse_side_map( std::get<1>(bndry_elem_side_id_list[i]) ));
2175  }
2176  else
2177  libmesh_error_msg("Error, no Exodus mapping for Elem " \
2178  << f_id \
2179  << " on processor " \
2180  << this->processor_id());
2181  }
2182  }
2183  }
2184 
2185 
2186  // Loop over *global* sideset IDs, call the Exodus API. Note that some sidesets may be empty
2187  // for a given processor.
2188  if (global_sideset_ids.size() > 0)
2189  {
2190  NamesData names_table(global_sideset_ids.size(), MAX_STR_LENGTH);
2191 
2192  for (std::size_t i=0; i<this->global_sideset_ids.size(); ++i)
2193  {
2194  const std::string & current_ss_name =
2195  mesh.get_boundary_info().get_sideset_name
2196  (cast_int<boundary_id_type>(global_sideset_ids[i]));
2197 
2198  // Store this name in a data structure that will be used to
2199  // write sideset names to file.
2200  names_table.push_back_entry(current_ss_name);
2201 
2202  if (verbose)
2203  {
2204  libMesh::out << "[" << this->processor_id()
2205  << "] Writing out Exodus sideset info for ID: " << global_sideset_ids[i]
2206  << ", Name: " << current_ss_name
2207  << std::endl;
2208  }
2209 
2210  // Convert current global_sideset_id into an exodus ID, which can't be zero...
2211  int exodus_id = global_sideset_ids[i];
2212 
2213  /*
2214  // Exodus can't handle zero sideset IDs (?) Use max short here since
2215  // when libmesh reads it back in, it will want to store it as a short...
2216  if (exodus_id==0)
2217  exodus_id = std::numeric_limits<short>::max();
2218  */
2219 
2220  // Try to find this boundary ID in the local list we created
2221  auto it =
2222  local_elem_boundary_id_lists.find (cast_int<boundary_id_type>(this->global_sideset_ids[i]));
2223 
2224  // No sides found for this boundary ID on this processor
2225  if (it == local_elem_boundary_id_lists.end())
2226  {
2227  if (verbose)
2228  libMesh::out << "[" << this->processor_id()
2229  << "] No sideset data for ID: " << global_sideset_ids[i]
2230  << " on this processor." << std::endl;
2231 
2232  // Call the Exodus interface to write the parameters of this side set
2234  exodus_id,
2235  0, /* No sides for this ID */
2236  0 /* No distribution factors */);
2237  EX_CHECK_ERR(this->ex_err, "Error writing sideset parameters in Nemesis");
2238 
2239  }
2240  else // Boundary ID *was* found in list
2241  {
2242  // Get iterator to sides vector as well
2243  auto it_sides =
2244  local_elem_boundary_id_side_lists.find (cast_int<boundary_id_type>(this->global_sideset_ids[i]));
2245 
2246  libmesh_assert (it_sides != local_elem_boundary_id_side_lists.end());
2247 
2248  // Get reference to the vector of elem IDs
2249  std::vector<int> & current_sideset_elem_ids = it->second;
2250 
2251  // Get reference to the vector of side IDs
2252  std::vector<int> & current_sideset_side_ids = (*it_sides).second;
2253 
2254  // Call the Exodus interface to write the parameters of this side set
2256  exodus_id,
2257  current_sideset_elem_ids.size(),
2258  0 /* No distribution factors */);
2259 
2260  EX_CHECK_ERR(this->ex_err, "Error writing sideset parameters in Nemesis");
2261 
2262  // Call Exodus interface to write the actual side IDs for this boundary ID
2263  this->ex_err = exII::ex_put_side_set(this->ex_id,
2264  exodus_id,
2265  current_sideset_elem_ids.data(),
2266  current_sideset_side_ids.data());
2267 
2268  EX_CHECK_ERR(this->ex_err, "Error writing sidesets in Nemesis");
2269  }
2270  } // end for loop over global sideset IDs
2271 
2272  // Write sideset names to file. Some of these may be blank strings
2273  // if the current processor didn't have all the sideset names for
2274  // any reason...
2277  names_table.get_char_star_star());
2278  EX_CHECK_ERR(ex_err, "Error writing sideset names");
2279 
2280  } // end if (global_sideset_ids.size() > 0)
2281 }
2282 
2283 
2284 
2285 void Nemesis_IO_Helper::write_nodal_coordinates(const MeshBase & mesh, bool /*use_discontinuous*/)
2286 {
2287  auto local_num_nodes = this->exodus_node_num_to_libmesh.size();
2288 
2289  x.resize(local_num_nodes);
2290  y.resize(local_num_nodes);
2291  z.resize(local_num_nodes);
2292 
2293  // Just loop over our list outputting the nodes the way we built the map
2294  for (auto i : IntRange<int>(0, local_num_nodes))
2295  {
2296  const Point & pt = mesh.point(this->exodus_node_num_to_libmesh[i]);
2297  x[i]=pt(0);
2298  y[i]=pt(1);
2299  z[i]=pt(2);
2300  }
2301 
2302  if (local_num_nodes)
2303  {
2304  if (_single_precision)
2305  {
2306  std::vector<float>
2307  x_single(x.begin(), x.end()),
2308  y_single(y.begin(), y.end()),
2309  z_single(z.begin(), z.end());
2310 
2312  x_single.data(),
2313  y_single.data(),
2314  z_single.data());
2315  }
2316  else
2317  {
2318  // Call Exodus API to write nodal coordinates...
2320  x.data(),
2321  y.data(),
2322  z.data());
2323  }
2324  EX_CHECK_ERR(ex_err, "Error writing node coordinates");
2325 
2326  // And write the nodal map we created for them
2328  EX_CHECK_ERR(ex_err, "Error writing node num map");
2329  }
2330  else // Does the Exodus API want us to write empty nodal coordinates?
2331  {
2332  ex_err = exII::ex_put_coord(ex_id, nullptr, nullptr, nullptr);
2333  EX_CHECK_ERR(ex_err, "Error writing empty node coordinates");
2334 
2336  EX_CHECK_ERR(ex_err, "Error writing empty node num map");
2337  }
2338 }
2339 
2340 
2341 
2342 
2343 
2344 void Nemesis_IO_Helper::write_elements(const MeshBase & mesh, bool /*use_discontinuous*/)
2345 {
2346  // Only write elements if there are elements blocks available.
2347  if (this->num_elem_blks_global > 0)
2348  {
2349  // Data structure to store element block names that will be used to
2350  // write the element block names to file.
2351  NamesData names_table(this->num_elem_blks_global, MAX_STR_LENGTH);
2352 
2353  // Loop over all blocks, even if we don't have elements in each block.
2354  // If we don't have elements we need to write out a 0 for that block...
2355  for (auto i : IntRange<int>(0, this->num_elem_blks_global))
2356  {
2357  // Even if there are no elements for this block on the current
2358  // processor, we still want to write its name to file, if
2359  // possible. MeshBase::subdomain_name() will just return an
2360  // empty string if there is no name associated with the current
2361  // block.
2362  names_table.push_back_entry
2363  (mesh.subdomain_name(cast_int<subdomain_id_type>(this->global_elem_blk_ids[i])));
2364 
2365  // Search for the current global block ID in the map
2366  std::map<int, std::vector<int>>::iterator it =
2367  this->block_id_to_elem_connectivity.find( this->global_elem_blk_ids[i] );
2368 
2369  // If not found, write a zero to file....
2370  if (it == this->block_id_to_elem_connectivity.end())
2371  {
2372  this->ex_err = exII::ex_put_elem_block(this->ex_id,
2373  this->global_elem_blk_ids[i],
2374  "Empty",
2375  0, /* n. elements in this block */
2376  0, /* n. nodes per element */
2377  0); /* number of attributes per element */
2378 
2379  EX_CHECK_ERR(this->ex_err, "Error writing element block from Nemesis.");
2380  }
2381 
2382  // Otherwise, write the actual block information and connectivity to file
2383  else
2384  {
2385  subdomain_id_type block =
2386  cast_int<subdomain_id_type>(it->first);
2387  std::vector<int> & this_block_connectivity = it->second;
2388  std::vector<dof_id_type> & elements_in_this_block = subdomain_map[block];
2389 
2391 
2392  //Use the first element in this block to get representative information.
2393  //Note that Exodus assumes all elements in a block are of the same type!
2394  //We are using that same assumption here!
2395  const ExodusII_IO_Helper::Conversion conv =
2396  em.assign_conversion(mesh.elem_ref(elements_in_this_block[0]).type());
2397 
2398  this->num_nodes_per_elem =
2399  mesh.elem_ref(elements_in_this_block[0]).n_nodes();
2400 
2402  block,
2403  conv.exodus_elem_type().c_str(),
2404  elements_in_this_block.size(),
2406  0);
2407  EX_CHECK_ERR(ex_err, "Error writing element block from Nemesis.");
2408 
2410  block,
2411  this_block_connectivity.data());
2412  EX_CHECK_ERR(ex_err, "Error writing element connectivities from Nemesis.");
2413  }
2414  } // end loop over global block IDs
2415 
2416  // Only call this once, not in the loop above!
2418  exodus_elem_num_to_libmesh.empty() ? nullptr : exodus_elem_num_to_libmesh.data());
2419  EX_CHECK_ERR(ex_err, "Error writing element map");
2420 
2421  // Write the element block names to file.
2423  EX_CHECK_ERR(ex_err, "Error writing element block names");
2424  } // end if (this->num_elem_blks_global > 0)
2425 }
2426 
2427 
2428 
2429 
2430 
2431 void Nemesis_IO_Helper::write_nodal_solution(const std::vector<Number> & values,
2432  const std::vector<std::string> & names,
2433  int timestep)
2434 {
2435  int num_vars = cast_int<int>(names.size());
2436  //int num_values = values.size(); // Not used?
2437 
2438  for (int c=0; c<num_vars; c++)
2439  {
2440 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
2441  std::vector<Real> real_parts(num_nodes);
2442  std::vector<Real> imag_parts(num_nodes);
2443  std::vector<Real> magnitudes(num_nodes);
2444 
2445  for (int i=0; i<num_nodes; ++i)
2446  {
2447  Number value = values[this->exodus_node_num_to_libmesh[i]*num_vars + c];
2448  real_parts[i] = value.real();
2449  imag_parts[i] = value.imag();
2450  magnitudes[i] = std::abs(value);
2451  }
2452  write_nodal_values(3*c+1,real_parts,timestep);
2453  write_nodal_values(3*c+2,imag_parts,timestep);
2454  write_nodal_values(3*c+3,magnitudes,timestep);
2455 #else
2456  std::vector<Number> cur_soln(num_nodes);
2457 
2458  // Copy out this variable's solution
2459  for (int i=0; i<num_nodes; i++)
2460  cur_soln[i] = values[this->exodus_node_num_to_libmesh[i]*num_vars + c];
2461 
2462  write_nodal_values(c+1,cur_soln,timestep);
2463 #endif
2464  }
2465 }
2466 
2467 
2468 
2470  const std::vector<std::string> & names,
2471  int timestep,
2472  const std::vector<std::string> & output_names)
2473 {
2474  int num_vars = cast_int<int>(names.size());
2475 
2476  for (int c=0; c<num_vars; c++)
2477  {
2478  // Find the position of names[c] in the output_names vector, if it exists.
2479  auto pos = std::find(output_names.begin(), output_names.end(), names[c]);
2480 
2481  // Skip names[c] if it's not supposed to be output.
2482  if (pos == output_names.end())
2483  continue;
2484 
2485  // Compute the (zero-based) index which determines which
2486  // variable this will be as far as Nemesis is concerned. This
2487  // will be used below in the write_nodal_values() call.
2488  int variable_name_position =
2489  cast_int<int>(std::distance(output_names.begin(), pos));
2490 
2491  // Fill up a std::vector with the dofs for the current variable
2492  std::vector<numeric_index_type> required_indices(num_nodes);
2493 
2494  for (int i=0; i<num_nodes; i++)
2495  required_indices[i] = static_cast<dof_id_type>(this->exodus_node_num_to_libmesh[i]) * num_vars + c;
2496 
2497  // Get the dof values required to write just our local part of
2498  // the solution vector.
2499  std::vector<Number> local_soln;
2500  parallel_soln.localize(local_soln, required_indices);
2501 
2502 #ifndef LIBMESH_USE_COMPLEX_NUMBERS
2503  // Call the ExodusII_IO_Helper function to write the data.
2504  write_nodal_values(variable_name_position + 1, local_soln, timestep);
2505 #else
2506  // We have the local (complex) values. Now extract the real,
2507  // imaginary, and magnitude values from them.
2508  std::vector<Real> real_parts(num_nodes);
2509  std::vector<Real> imag_parts(num_nodes);
2510  std::vector<Real> magnitudes(num_nodes);
2511 
2512  for (int i=0; i<num_nodes; ++i)
2513  {
2514  real_parts[i] = local_soln[i].real();
2515  imag_parts[i] = local_soln[i].imag();
2516  magnitudes[i] = std::abs(local_soln[i]);
2517  }
2518 
2519  // Write the real, imaginary, and magnitude values to file.
2520  write_nodal_values(3 * variable_name_position + 1, real_parts, timestep);
2521  write_nodal_values(3 * variable_name_position + 2, imag_parts, timestep);
2522  write_nodal_values(3 * variable_name_position + 3, magnitudes, timestep);
2523 #endif
2524  }
2525 }
2526 
2527 
2528 
2529 
2530 void
2532  const std::vector<std::set<subdomain_id_type>> & vars_active_subdomains)
2533 {
2534  // Quick return if there are no element variables to write
2535  if (names.size() == 0)
2536  return;
2537 
2538  // Quick return if we have already called this function
2540  return;
2541 
2542  // Be sure that variables in the file match what we are asking for
2543  if (num_elem_vars > 0)
2544  {
2545  this->check_existing_vars(ELEMENTAL, names, this->elem_var_names);
2546  return;
2547  }
2548 
2549  // Set the flag so we can skip this stuff on subsequent calls to
2550  // initialize_element_variables()
2551  _elem_vars_initialized = true;
2552 
2553  this->write_var_names(ELEMENTAL, names);
2554 
2555  // Create a truth table from global_elem_blk_ids and the information
2556  // in vars_active_subdomains. Create a truth table of
2557  // size global_elem_blk_ids.size() * names.size().
2558  std::vector<int> truth_tab(global_elem_blk_ids.size() * names.size());
2559  for (unsigned int blk=0; blk<global_elem_blk_ids.size(); ++blk)
2560  for (unsigned int var=0; var<names.size(); ++var)
2561  if (vars_active_subdomains[var].empty() ||
2562  vars_active_subdomains[var].count
2563  (cast_int<subdomain_id_type>(global_elem_blk_ids[blk])))
2564  truth_tab[names.size()*blk + var] = 1;
2565 
2566  // Write truth table to file.
2567  if (truth_tab.size())
2568  {
2570  cast_int<int>(global_elem_blk_ids.size()),
2571  cast_int<int>(names.size()),
2572  truth_tab.data());
2573  EX_CHECK_ERR(ex_err, "Error writing element truth table.");
2574  }
2575 }
2576 
2577 
2578 
2579 void
2581  const NumericVector<Number> & parallel_soln,
2582  const std::vector<std::string> & names,
2583  int timestep,
2584  const std::vector<std::set<subdomain_id_type>> & vars_active_subdomains)
2585 {
2586  // The total number of elements in the mesh. We need this for
2587  // indexing into parallel_soln.
2588  dof_id_type parallel_n_elem = mesh.parallel_n_elem();
2589 
2590  // For each variable in names,
2591  // For each subdomain in subdomain_map,
2592  // If this (subdomain, variable) combination is active
2593  // Extract our values into local_soln (localize is a collective)
2594  // Write local_soln to file
2595  for (unsigned int v=0; v<names.size(); ++v)
2596  {
2597  // Get list of active subdomains for variable v
2598  const auto & active_subdomains = vars_active_subdomains[v];
2599 
2600  // Loop over all subdomain blocks, even ones for which we have
2601  // no elements, so we localize in sync
2602  for (const int sbd_id_int : global_elem_blk_ids)
2603  {
2604  const subdomain_id_type sbd_id =
2605  cast_int<subdomain_id_type>(sbd_id_int);
2606  auto it = subdomain_map.find(sbd_id);
2607  const std::vector<dof_id_type> empty_vec;
2608  const std::vector<dof_id_type> & elem_ids =
2609  (it == subdomain_map.end()) ? empty_vec : it->second;
2610 
2611  // Possibly skip this (variable, subdomain) combination
2612  if (active_subdomains.empty() || active_subdomains.count(sbd_id))
2613  {
2614  std::vector<numeric_index_type> required_indices;
2615  required_indices.reserve(elem_ids.size());
2616 
2617  for (const auto & id : elem_ids)
2618  required_indices.push_back(v * parallel_n_elem + id);
2619 
2620  std::vector<Number> local_soln;
2621  parallel_soln.localize(local_soln, required_indices);
2622 
2623  // It's possible that there's nothing for us to write:
2624  // we may not be responsible for any elements on the
2625  // current subdomain. We did still have to participate
2626  // in the localize() call above, however, since it is a
2627  // collective.
2628  if (local_soln.size())
2629  {
2631  timestep,
2632  static_cast<int>(v+1),
2633  static_cast<int>(sbd_id),
2634  static_cast<int>(local_soln.size()),
2635  local_soln.data());
2636  EX_CHECK_ERR(ex_err, "Error writing element values.");
2637  }
2638  }
2639  }
2640  } // end loop over vars
2641 
2643  EX_CHECK_ERR(ex_err, "Error flushing buffers to file.");
2644 }
2645 
2646 
2647 
2648 std::string Nemesis_IO_Helper::construct_nemesis_filename(const std::string & base_filename)
2649 {
2650  // Build a filename for this processor. This code is cut-n-pasted from the read function
2651  // and should probably be put into a separate function...
2652  std::ostringstream file_oss;
2653 
2654  // We have to be a little careful here: Nemesis left pads its file
2655  // numbers based on the number of processors, so for example on 10
2656  // processors, we'd have:
2657  // mesh.e.10.00
2658  // mesh.e.10.01
2659  // mesh.e.10.02
2660  // ...
2661  // mesh.e.10.09
2662 
2663  // And on 100 you'd have:
2664  // mesh.e.100.000
2665  // mesh.e.100.001
2666  // ...
2667  // mesh.e.128.099
2668 
2669  // Find the length of the highest processor ID
2670  file_oss << (this->n_processors());
2671  unsigned int field_width = cast_int<unsigned int>(file_oss.str().size());
2672 
2673  if (verbose)
2674  libMesh::out << "field_width=" << field_width << std::endl;
2675 
2676  file_oss.str(""); // reset the string stream
2677  file_oss << base_filename
2678  << '.' << this->n_processors()
2679  << '.' << std::setfill('0') << std::setw(field_width) << this->processor_id();
2680 
2681  // Return the resulting string
2682  return file_oss.str();
2683 }
2684 
2685 } // namespace libMesh
2686 
2687 #endif // #if defined(LIBMESH_HAVE_NEMESIS_API) && defined(LIBMESH_HAVE_EXODUS_API)
void set_union(T &data, const unsigned int root_id) const
double abs(double a)
const std::set< boundary_id_type > & get_side_boundary_ids() const
Nemesis_IO_Helper(const ParallelObject &parent, bool verbose=false, bool single_precision=false)
std::map< subdomain_id_type, std::vector< dof_id_type > > subdomain_map
std::vector< std::string > elem_var_names
std::vector< int > num_global_node_counts
std::vector< int > global_nodeset_ids
virtual void initialize(std::string title, const MeshBase &mesh, bool use_discontinuous=false) override
void write_var_names(ExodusVarType type, std::vector< std::string > &names)
std::vector< int > global_sideset_ids
void compute_num_global_nodesets(const MeshBase &pmesh)
std::vector< int > node_cmap_ids
std::vector< int > exodus_elem_num_to_libmesh
void allgather(const T &send, std::vector< T, A > &recv) const
std::vector< int > node_mape
void compute_num_global_elem_blocks(const MeshBase &pmesh)
if(!eq) SETERRQ2(((PetscObject) dm) -> comm, PETSC_ERR_ARG_WRONG, "DM of type %s, not of type %s",((PetscObject) dm) ->type, DMLIBMESH)
EXODUS_EXPORT int ex_put_node_set_param(int exoid, ex_entity_id node_set_id, int64_t num_nodes_in_set, int64_t num_dist_in_set)
The base class for all geometric element types.
Definition: elem.h:100
MeshBase & mesh
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Definition: int_range.h:104
ExodusII_IO_Helper::Conversion assign_conversion(std::string type_str)
void put_elem_map(std::vector< int > &elem_mapi, std::vector< int > &elem_mapb)
EXODUS_EXPORT int ex_put_elem_block(int exoid, ex_entity_id elem_blk_id, const char *elem_type, int64_t num_elem_this_blk, int64_t num_nodes_per_elem, int64_t num_attr)
const Parallel::Communicator & comm() const
std::vector< int > num_global_node_df_counts
EXODUS_EXPORT int ex_put_coord(int exoid, const void *x_coor, const void *y_coor, const void *z_coor)
IterBase * end
virtual SimpleRange< element_iterator > active_element_ptr_range()=0
std::vector< std::vector< int > > elem_cmap_side_ids
std::vector< std::vector< int > > node_cmap_proc_ids
void put_ns_param_global(std::vector< int > &global_nodeset_ids, std::vector< int > &num_global_node_counts, std::vector< int > &num_global_node_df_counts)
void put_eb_info_global(std::vector< int > &global_elem_blk_ids, std::vector< int > &global_elem_blk_cnts)
void compute_border_node_ids(const MeshBase &pmesh)
const BoundaryInfo & get_boundary_info() const
Definition: mesh_base.h:131
void write_nodal_values(int var_id, const std::vector< Real > &values, int timestep)
void check_existing_vars(ExodusVarType type, std::vector< std::string > &names, std::vector< std::string > &names_from_file)
EXODUS_EXPORT int ex_put_init(int exoid, const char *title, int64_t num_dim, int64_t num_nodes, int64_t num_elem, int64_t num_elem_blk, int64_t num_node_sets, int64_t num_side_sets)
virtual void write_elements(const MeshBase &mesh, bool use_discontinuous=false) override
std::vector< int > num_global_side_df_counts
Base class for Mesh.
Definition: mesh_base.h:77
std::map< int, int > libmesh_elem_num_to_exodus
void build_side_list(std::vector< dof_id_type > &element_id_list, std::vector< unsigned short int > &side_list, std::vector< boundary_id_type > &bc_id_list) const
const std::set< boundary_id_type > & get_node_boundary_ids() const
void build_element_and_node_maps(const MeshBase &pmesh)
EXODUS_EXPORT int ex_put_elem_var(int exoid, int time_step, int elem_var_index, ex_entity_id elem_blk_id, int64_t num_elem_this_blk, const void *elem_var_vals)
void put_init_global(dof_id_type num_nodes_global, dof_id_type num_elems_global, unsigned num_elem_blks_global, unsigned num_node_sets_global, unsigned num_side_sets_global)
virtual SimpleRange< element_iterator > active_local_element_ptr_range()=0
EXODUS_EXPORT int ex_put_node_set(int exoid, ex_entity_id node_set_id, const void_int *node_set_node_list)
processor_id_type n_processors() const
void build_node_list(std::vector< dof_id_type > &node_id_list, std::vector< boundary_id_type > &bc_id_list) const
const dof_id_type n_nodes
Definition: tecplot_io.C:68
virtual void write_nodal_coordinates(const MeshBase &mesh, bool use_discontinuous=false) override
std::vector< int > num_global_side_counts
void active_family_tree_by_side(std::vector< const Elem *> &family, const unsigned int side, const bool reset=true) const
Definition: elem.C:1630
void push_back_entry(const std::string &name)
virtual unsigned int n_nodes() const =0
std::vector< int > global_elem_blk_ids
virtual void initialize_element_variables(std::vector< std::string > names, const std::vector< std::set< subdomain_id_type >> &vars_active_subdomains) override
virtual element_iterator active_local_elements_begin()=0
std::vector< std::vector< int > > elem_cmap_elem_ids
EXODUS_EXPORT int ex_put_side_set(int exoid, ex_entity_id side_set_id, const void_int *side_set_elem_list, const void_int *side_set_side_list)
std::set< unsigned > border_elem_ids
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:245
std::set< unsigned > internal_node_ids
std::vector< int > node_mapi
std::vector< std::vector< int > > elem_cmap_proc_ids
void put_cmap_params(std::vector< int > &node_cmap_ids, std::vector< int > &node_cmap_node_cnts, std::vector< int > &elem_cmap_ids, std::vector< int > &elem_cmap_elem_cnts)
std::set< unsigned > border_node_ids
EXODUS_EXPORT int ex_put_names(int exoid, ex_entity_type obj_type, char *names[])
virtual dof_id_type parallel_n_nodes() const =0
std::set< int > nodes_attached_to_local_elems
An object whose state is distributed along a set of processors.
virtual dof_id_type parallel_n_elem() const =0
void compute_num_global_sidesets(const MeshBase &pmesh)
std::map< int, int > libmesh_node_num_to_exodus
void compute_internal_and_border_elems_and_internal_nodes(const MeshBase &pmesh)
void put_elem_cmap(std::vector< std::vector< int >> &elem_cmap_elem_ids, std::vector< std::vector< int >> &elem_cmap_side_ids, std::vector< std::vector< int >> &elem_cmap_proc_ids)
std::map< subdomain_id_type, unsigned > local_subdomain_counts
std::map< int, std::vector< int > > block_id_to_elem_connectivity
virtual void write_nodesets(const MeshBase &mesh) override
EXODUS_EXPORT int ex_put_side_set_param(int exoid, ex_entity_id side_set_id, int64_t num_side_in_set, int64_t num_dist_fact_in_set)
std::map< unsigned, std::set< unsigned > >::iterator proc_nodes_touched_iterator
std::unordered_set< const Node * > & node_set
Definition: mesh_tools.C:2142
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
void put_node_map(std::vector< int > &node_mapi, std::vector< int > &node_mapb, std::vector< int > &node_mape)
EXODUS_EXPORT int ex_put_node_num_map(int exoid, const void_int *node_map)
std::vector< int > elem_cmap_elem_cnts
EXODUS_EXPORT int ex_put_elem_conn(int exoid, ex_entity_id elem_blk_id, const void_int *connect)
std::string construct_nemesis_filename(const std::string &base_filename)
void swap(Iterator &lhs, Iterator &rhs)
std::vector< int > elem_cmap_ids
static const bool value
Definition: xdr_io.C:109
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:504
void put_ss_param_global(std::vector< int > &global_sideset_ids, std::vector< int > &num_global_side_counts, std::vector< int > &num_global_side_df_counts)
virtual void write_sidesets(const MeshBase &mesh) override
std::vector< int > node_cmap_node_cnts
std::map< unsigned, std::set< unsigned > > proc_nodes_touched_intersections
std::vector< int > exodus_node_num_to_libmesh
void write_element_values(const MeshBase &mesh, const NumericVector< Number > &parallel_soln, const std::vector< std::string > &names, int timestep, const std::vector< std::set< subdomain_id_type >> &vars_active_subdomains)
void write_exodus_initialization_info(const MeshBase &pmesh, const std::string &title)
void put_init_info(unsigned num_proc, unsigned num_proc_in_file, const char *ftype)
EXODUS_EXPORT int ex_put_elem_var_tab(int exoid, int num_elem_blk, int num_elem_var, int *elem_var_tab)
void put_node_cmap(std::vector< std::vector< int >> &node_cmap_node_ids, std::vector< std::vector< int >> &node_cmap_proc_ids)
EXODUS_EXPORT int ex_update(int exoid)
std::vector< int > elem_mapb
IterBase * data
std::vector< int > node_mapb
virtual const Node * node_ptr(const dof_id_type i) const =0
std::vector< std::vector< int > > node_cmap_node_ids
processor_id_type processor_id() const
std::map< unsigned, std::set< std::pair< unsigned, unsigned > > >::iterator proc_border_elem_sets_iterator
EXODUS_EXPORT int ex_put_elem_num_map(int exoid, const void_int *elem_map)
OStreamProxy out(std::cout)
processor_id_type processor_id() const
Definition: dof_object.h:717
virtual element_iterator active_local_elements_end()=0
virtual ElemType type() const =0
long double min(long double a, double b)
A geometric point in (x,y,z) space.
Definition: point.h:38
std::vector< int > global_elem_blk_cnts
void put_loadbal_param(unsigned num_internal_nodes, unsigned num_border_nodes, unsigned num_external_nodes, unsigned num_internal_elems, unsigned num_border_elems, unsigned num_node_cmaps, unsigned num_elem_cmaps)
virtual void create(std::string filename) override
std::vector< int > elem_mapi
std::map< unsigned, std::set< std::pair< unsigned, unsigned > > > proc_border_elem_sets
void put_n_coord(unsigned start_node_num, unsigned num_nodes, std::vector< Real > &x_coor, std::vector< Real > &y_coor, std::vector< Real > &z_coor)
std::set< unsigned > internal_elem_ids
uint8_t dof_id_type
Definition: id_types.h:64
void write_nodal_solution(const NumericVector< Number > &parallel_soln, const std::vector< std::string > &names, int timestep, const std::vector< std::string > &output_names)
virtual void localize(std::vector< T > &v_local) const =0