distributed_vector.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2017 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 #include "libmesh/libmesh_common.h"
21 
22 // C++ includes
23 #include <cstdlib> // *must* precede <cmath> for proper std:abs() on PGI, Sun Studio CC
24 #include <cmath> // for std::abs
25 #include <limits> // std::numeric_limits<T>::min()
26 
27 // Local Includes
29 #include "libmesh/dense_vector.h"
31 #include "libmesh/parallel.h"
32 #include "libmesh/tensor_tools.h"
33 
34 namespace libMesh
35 {
36 
37 
38 
39 //--------------------------------------------------------------------------
40 // DistributedVector methods
41 template <typename T>
43 {
44  // This function must be run on all processors at once
45  parallel_object_only();
46 
47  libmesh_assert (this->initialized());
48  libmesh_assert_equal_to (_values.size(), _local_size);
49  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
50 
51  T local_sum = 0.;
52 
53  for (numeric_index_type i=0; i<local_size(); i++)
54  local_sum += _values[i];
55 
56  this->comm().sum(local_sum);
57 
58  return local_sum;
59 }
60 
61 
62 
63 template <typename T>
65 {
66  // This function must be run on all processors at once
67  parallel_object_only();
68 
69  libmesh_assert (this->initialized());
70  libmesh_assert_equal_to (_values.size(), _local_size);
71  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
72 
73  double local_l1 = 0.;
74 
75  for (numeric_index_type i=0; i<local_size(); i++)
76  local_l1 += std::abs(_values[i]);
77 
78  this->comm().sum(local_l1);
79 
80  return local_l1;
81 }
82 
83 
84 
85 template <typename T>
87 {
88  // This function must be run on all processors at once
89  parallel_object_only();
90 
91  libmesh_assert (this->initialized());
92  libmesh_assert_equal_to (_values.size(), _local_size);
93  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
94 
95  double local_l2 = 0.;
96 
97  for (numeric_index_type i=0; i<local_size(); i++)
98  local_l2 += TensorTools::norm_sq(_values[i]);
99 
100  this->comm().sum(local_l2);
101 
102  return std::sqrt(local_l2);
103 }
104 
105 
106 
107 template <typename T>
109 {
110  // This function must be run on all processors at once
111  parallel_object_only();
112 
113  libmesh_assert (this->initialized());
114  libmesh_assert_equal_to (_values.size(), _local_size);
115  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
116 
117  Real local_linfty = 0.;
118 
119  for (numeric_index_type i=0; i<local_size(); i++)
120  local_linfty = std::max(local_linfty,
121  static_cast<Real>(std::abs(_values[i]))
122  ); // Note we static_cast so that both
123  // types are the same, as required
124  // by std::max
125 
126  this->comm().max(local_linfty);
127 
128  return local_linfty;
129 }
130 
131 
132 
133 template <typename T>
135 {
136  libmesh_assert (this->closed());
137  libmesh_assert (this->initialized());
138  libmesh_assert_equal_to (_values.size(), _local_size);
139  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
140 
141  add(1., v);
142 
143  return *this;
144 }
145 
146 
147 
148 template <typename T>
150 {
151  libmesh_assert (this->closed());
152  libmesh_assert (this->initialized());
153  libmesh_assert_equal_to (_values.size(), _local_size);
154  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
155 
156  add(-1., v);
157 
158  return *this;
159 }
160 
161 
162 
163 template <typename T>
165 {
166  libmesh_assert_equal_to(size(), v.size());
167 
168  DistributedVector<T> & v_vec = cast_ref<DistributedVector<T> &>(v);
169 
170  std::size_t val_size = _values.size();
171 
172  for(std::size_t i=0; i<val_size; i++)
173  _values[i] = _values[i] / v_vec._values[i];
174 
175  return *this;
176 }
177 
178 
179 
180 
181 template <typename T>
183 {
184  for (numeric_index_type i=0; i<local_size(); i++)
185  {
186  // Don't divide by zero
187  libmesh_assert_not_equal_to (_values[i], T(0));
188 
189  _values[i] = 1. / _values[i];
190  }
191 }
192 
193 
194 
195 
196 template <typename T>
198 {
199  for (numeric_index_type i=0; i<local_size(); i++)
200  {
201  // Replace values by complex conjugate
202  _values[i] = libmesh_conj(_values[i]);
203  }
204 }
205 
206 
207 
208 
209 
210 template <typename T>
212 {
213  libmesh_assert (this->initialized());
214  libmesh_assert_equal_to (_values.size(), _local_size);
215  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
216 
217  for (numeric_index_type i=0; i<local_size(); i++)
218  _values[i] += v;
219 }
220 
221 
222 
223 template <typename T>
225 {
226  libmesh_assert (this->initialized());
227  libmesh_assert_equal_to (_values.size(), _local_size);
228  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
229 
230  add (1., v);
231 }
232 
233 
234 
235 template <typename T>
236 void DistributedVector<T>::add (const T a, const NumericVector<T> & v)
237 {
238  libmesh_assert (this->initialized());
239  libmesh_assert_equal_to (_values.size(), _local_size);
240  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
241 
242  add(a, v);
243 }
244 
245 
246 
247 template <typename T>
248 void DistributedVector<T>::scale (const T factor)
249 {
250  libmesh_assert (this->initialized());
251  libmesh_assert_equal_to (_values.size(), _local_size);
252  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
253 
254  for (std::size_t i=0; i<local_size(); i++)
255  _values[i] *= factor;
256 }
257 
258 template <typename T>
260 {
261  libmesh_assert (this->initialized());
262  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
263 
264  for (numeric_index_type i=0; i<local_size(); i++)
265  this->set(i,std::abs(_values[i]));
266 }
267 
268 
269 
270 
271 
272 template <typename T>
274 {
275  // This function must be run on all processors at once
276  parallel_object_only();
277 
278  // Make sure the NumericVector passed in is really a DistributedVector
279  const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&V);
280 
281  // Make sure that the two vectors are distributed in the same way.
282  libmesh_assert_equal_to ( this->first_local_index(), v->first_local_index() );
283  libmesh_assert_equal_to ( this->last_local_index(), v->last_local_index() );
284 
285  // The result of dotting together the local parts of the vector.
286  T local_dot = 0;
287 
288  for (std::size_t i=0; i<this->local_size(); i++)
289  local_dot += this->_values[i] * v->_values[i];
290 
291  // The local dot products are now summed via MPI
292  this->comm().sum(local_dot);
293 
294  return local_dot;
295 }
296 
297 
298 
299 template <typename T>
302 {
303  libmesh_assert (this->initialized());
304  libmesh_assert_equal_to (_values.size(), _local_size);
305  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
306 
307  for (std::size_t i=0; i<local_size(); i++)
308  _values[i] = s;
309 
310  return *this;
311 }
312 
313 
314 
315 template <typename T>
318 {
319  // Make sure the NumericVector passed in is really a DistributedVector
320  const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&v_in);
321 
322  *this = *v;
323 
324  return *this;
325 }
326 
327 
328 
329 template <typename T>
332 {
334  this->_is_closed = v._is_closed;
335 
336  _global_size = v._global_size;
337  _local_size = v._local_size;
338  _first_local_index = v._first_local_index;
339  _last_local_index = v._last_local_index;
340 
341  if (v.local_size() == this->local_size())
342  _values = v._values;
343 
344  else
345  libmesh_error_msg("v.local_size() = " << v.local_size() << " must be equal to this->local_size() = " << this->local_size());
346 
347  return *this;
348 }
349 
350 
351 
352 template <typename T>
354 DistributedVector<T>::operator = (const std::vector<T> & v)
355 {
356  libmesh_assert (this->initialized());
357  libmesh_assert_equal_to (_values.size(), _local_size);
358  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
359 
360  if (v.size() == local_size())
361  _values = v;
362 
363  else if (v.size() == size())
364  for (std::size_t i=first_local_index(); i<last_local_index(); i++)
365  _values[i-first_local_index()] = v[i];
366 
367  else
368  libmesh_error_msg("Incompatible sizes in DistributedVector::operator=");
369 
370  return *this;
371 }
372 
373 
374 
375 template <typename T>
377 
378 {
379  libmesh_assert (this->initialized());
380  libmesh_assert_equal_to (_values.size(), _local_size);
381  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
382 
383  DistributedVector<T> * v_local = cast_ptr<DistributedVector<T> *>(&v_local_in);
384 
385  v_local->_first_local_index = 0;
386 
387  v_local->_global_size =
388  v_local->_local_size =
389  v_local->_last_local_index = size();
390 
391  v_local->_is_initialized =
392  v_local->_is_closed = true;
393 
394  // Call localize on the vector's values. This will help
395  // prevent code duplication
396  localize (v_local->_values);
397 
398 #ifndef LIBMESH_HAVE_MPI
399 
400  libmesh_assert_equal_to (local_size(), size());
401 
402 #endif
403 }
404 
405 
406 
407 template <typename T>
409  const std::vector<numeric_index_type> &) const
410 {
411  libmesh_assert (this->initialized());
412  libmesh_assert_equal_to (_values.size(), _local_size);
413  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
414 
415  // TODO: We don't yet support the send list; this is inefficient:
416  localize (v_local_in);
417 }
418 
419 
420 
421 template <typename T>
422 void DistributedVector<T>::localize (std::vector<T> & v_local,
423  const std::vector<numeric_index_type> & indices) const
424 {
425  // Resize v_local so there is enough room to hold all the local values.
426  v_local.resize(indices.size());
427 
428  // We need to know who has the values we want, so get everyone's _local_size
429  std::vector<numeric_index_type> local_sizes;
430  this->comm().allgather (_local_size, local_sizes);
431 
432  // Make a vector of partial sums of local sizes
433  std::vector<numeric_index_type> local_size_sums(this->n_processors());
434  local_size_sums[0] = local_sizes[0];
435  for (numeric_index_type i=1; i<local_sizes.size(); i++)
436  local_size_sums[i] = local_size_sums[i-1] + local_sizes[i];
437 
438  // We now fill in 'requested_ids' based on the indices. Also keep
439  // track of the local index (in the indices vector) for each of
440  // these, since we need that when unpacking.
441  std::vector<std::vector<numeric_index_type> >
442  requested_ids(this->n_processors()),
443  local_requested_ids(this->n_processors());
444 
445  // We'll use this typedef a couple of times below.
446  typedef typename std::vector<numeric_index_type>::iterator iter_t;
447 
448  // For each index in indices, determine which processor it is on.
449  // This is an O(N*log(p)) algorithm that uses std::upper_bound().
450  // Note: upper_bound() returns an iterator to the first entry which is
451  // greater than the given value.
452  for (numeric_index_type i=0; i<indices.size(); i++)
453  {
454  iter_t ub = std::upper_bound(local_size_sums.begin(),
455  local_size_sums.end(),
456  indices[i]);
457 
458  std::iterator_traits<iter_t>::difference_type on_proc
459  = std::distance(local_size_sums.begin(), ub);
460 
461  requested_ids[on_proc].push_back(indices[i]);
462  local_requested_ids[on_proc].push_back(i);
463  }
464 
465  // First trade indices, then trade values with all other processors.
466  for (processor_id_type pid=0; pid<this->n_processors(); pid++)
467  {
468  // Another data structure which holds the ids that other processors request us to fill.
469  std::vector<numeric_index_type> requests_for_me_to_fill;
470 
471  // Trade my requests with processor procup and procdown
472  const processor_id_type procup = (this->processor_id() + pid) % this->n_processors();
473  const processor_id_type procdown = (this->n_processors() + this->processor_id() - pid) % this->n_processors();
474 
475  this->comm().send_receive (procup, requested_ids[procup],
476  procdown, requests_for_me_to_fill);
477 
478  // The first send/receive we did was for indices, the second one will be
479  // for corresponding floating point values, so create storage for that now...
480  std::vector<T> values_to_send(requests_for_me_to_fill.size());
481 
482  for (std::size_t i=0; i<requests_for_me_to_fill.size(); i++)
483  {
484  // The index of the requested value
485  const numeric_index_type requested_index = requests_for_me_to_fill[i];
486 
487  // Transform into local numbering, and get requested value.
488  values_to_send[i] = _values[requested_index - _first_local_index];
489  }
490 
491  // Send the values we were supposed to send, receive the values
492  // we were supposed to receive.
493  std::vector<T> values_to_receive(requested_ids[procup].size());
494  this->comm().send_receive (procdown, values_to_send,
495  procup, values_to_receive);
496 
497  // Error checking: make sure that requested_ids[procup] is same
498  // length as values_to_receive.
499  if (requested_ids[procup].size() != values_to_receive.size())
500  libmesh_error_msg("Requested " << requested_ids[procup].size() << " values, but received " << values_to_receive.size() << " values.");
501 
502  // Now write the received values to the appropriate place(s) in v_local
503  for (std::size_t i=0; i<values_to_receive.size(); i++)
504  {
505  // Get the index in v_local where this value needs to be inserted.
506  numeric_index_type local_requested_index = local_requested_ids[procup][i];
507 
508  // Actually set the value in v_local
509  v_local[local_requested_index] = values_to_receive[i];
510  }
511  }
512 }
513 
514 
515 
516 template <typename T>
518  const numeric_index_type last_local_idx,
519  const std::vector<numeric_index_type> & send_list)
520 {
521  // Only good for serial vectors
522  libmesh_assert_equal_to (this->size(), this->local_size());
523  libmesh_assert_greater (last_local_idx, first_local_idx);
524  libmesh_assert_less_equal (send_list.size(), this->size());
525  libmesh_assert_less (last_local_idx, this->size());
526 
527  const numeric_index_type my_size = this->size();
528  const numeric_index_type my_local_size = (last_local_idx - first_local_idx + 1);
529 
530  // Don't bother for serial cases
531  if ((first_local_idx == 0) &&
532  (my_local_size == my_size))
533  return;
534 
535 
536  // Build a parallel vector, initialize it with the local
537  // parts of (*this)
538  DistributedVector<T> parallel_vec(this->comm());
539 
540  parallel_vec.init (my_size, my_local_size, true, PARALLEL);
541 
542  // Copy part of *this into the parallel_vec
543  for (numeric_index_type i=first_local_idx; i<=last_local_idx; i++)
544  parallel_vec._values[i-first_local_idx] = _values[i];
545 
546  // localize like normal
547  parallel_vec.localize (*this, send_list);
548 }
549 
550 
551 
552 template <typename T>
553 void DistributedVector<T>::localize (std::vector<T> & v_local) const
554 {
555  // This function must be run on all processors at once
556  parallel_object_only();
557 
558  libmesh_assert (this->initialized());
559  libmesh_assert_equal_to (_values.size(), _local_size);
560  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
561 
562  v_local = this->_values;
563 
564  this->comm().allgather (v_local);
565 
566 #ifndef LIBMESH_HAVE_MPI
567  libmesh_assert_equal_to (local_size(), size());
568 #endif
569 }
570 
571 
572 
573 template <typename T>
574 void DistributedVector<T>::localize_to_one (std::vector<T> & v_local,
575  const processor_id_type pid) const
576 {
577  // This function must be run on all processors at once
578  parallel_object_only();
579 
580  libmesh_assert (this->initialized());
581  libmesh_assert_equal_to (_values.size(), _local_size);
582  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
583 
584  v_local = this->_values;
585 
586  this->comm().gather (pid, v_local);
587 
588 #ifndef LIBMESH_HAVE_MPI
589  libmesh_assert_equal_to (local_size(), size());
590 #endif
591 }
592 
593 
594 
595 template <typename T>
597  const NumericVector<T> &)
598 //void DistributedVector<T>::pointwise_mult (const NumericVector<T> & vec1,
599 // const NumericVector<T> & vec2)
600 {
601  libmesh_not_implemented();
602 }
603 
604 
605 
606 //--------------------------------------------------------------
607 // Explicit instantiations
608 template class DistributedVector<Number>;
609 
610 } // namespace libMesh
bool closed()
Definition: libmesh.C:279
double abs(double a)
virtual Real l1_norm() const libmesh_override
virtual T dot(const NumericVector< T > &V) const libmesh_override
virtual numeric_index_type size() const =0
T libmesh_conj(T a)
virtual NumericVector< T > & operator-=(const NumericVector< T > &v) libmesh_override
virtual NumericVector< T > & operator+=(const NumericVector< T > &v) libmesh_override
virtual numeric_index_type last_local_index() const libmesh_override
numeric_index_type _last_local_index
virtual void conjugate() libmesh_override
uint8_t processor_id_type
Definition: id_types.h:99
virtual void pointwise_mult(const NumericVector< T > &vec1, const NumericVector< T > &vec2) libmesh_override
virtual void init(const numeric_index_type N, const numeric_index_type n_local, const bool fast=false, const ParallelType ptype=AUTOMATIC) libmesh_override
virtual void add(const numeric_index_type i, const T value) libmesh_override
numeric_index_type _global_size
long double max(long double a, double b)
virtual void localize_to_one(std::vector< T > &v_local, const processor_id_type proc_id=0) const libmesh_override
libmesh_assert(j)
virtual void abs() libmesh_override
virtual numeric_index_type local_size() const libmesh_override
virtual numeric_index_type first_local_index() const libmesh_override
dof_id_type numeric_index_type
Definition: id_types.h:92
virtual void localize(std::vector< T > &v_local) const libmesh_override
virtual NumericVector< T > & operator/=(NumericVector< T > &v) libmesh_override
virtual Real l2_norm() const libmesh_override
virtual void scale(const T factor) libmesh_override
numeric_index_type _local_size
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
bool initialized()
Definition: libmesh.C:272
virtual NumericVector< T > & operator=(const T s) libmesh_override
virtual Real linfty_norm() const libmesh_override
virtual numeric_index_type size() const libmesh_override
virtual T sum() const libmesh_override
processor_id_type processor_id()
Definition: libmesh_base.h:96
virtual void reciprocal() libmesh_override
numeric_index_type _first_local_index
processor_id_type n_processors()
Definition: libmesh_base.h:88