Rocstar  1.0
Rocstar multiphysics simulation application
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
RFC_Window_transfer_comm.C
Go to the documentation of this file.
1 /* *******************************************************************
2  * Rocstar Simulation Suite *
3  * Copyright@2015, Illinois Rocstar LLC. All rights reserved. *
4  * *
5  * Illinois Rocstar LLC *
6  * Champaign, IL *
7  * www.illinoisrocstar.com *
8  * sales@illinoisrocstar.com *
9  * *
10  * License: See LICENSE file in top level of distribution package or *
11  * http://opensource.org/licenses/NCSA *
12  *********************************************************************/
13 /* *******************************************************************
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, *
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES *
16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND *
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR *
18  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE *
21  * USE OR OTHER DEALINGS WITH THE SOFTWARE. *
22  *********************************************************************/
23 /* $Id: RFC_Window_transfer_comm.C,v 1.22 2008/12/06 08:43:29 mtcampbe Exp $ */
24 
25 #include "RFC_Window_transfer.h"
26 #include <cstdio>
27 
28 #include <limits>
29 #define QUIET_NAN std::numeric_limits<Real>::quiet_NaN()
30 
32 
33 // Obtain the list of incident subfaces in each pane of the opposite mesh.
34 void
36 incident_faces( std::map< int, std::vector<int> > &opp_subface_lists) const {
37  RFC_assertion(opp_subface_lists.empty());
38 
39  // Loop through the panes to generate subface list
40  for (Pane_set::const_iterator
41  pi=_pane_set.begin(); pi != _pane_set.end(); ++pi) {
42  const RFC_Pane_transfer &pane = (const RFC_Pane_transfer &)*pi->second;
43 
44  for ( int i=0, size=pane.size_of_subfaces(); i<size; ++i) {
45  Face_ID oppf = pane._subface_counterparts[i];
46  opp_subface_lists[ oppf.pane_id].push_back( oppf.face_id);
47  }
48  }
49 }
50 
51 // Create copies of the panes from the opposite subface lists.
52 void
55 
56  std::map< int, std::vector<int> > opp_subface_lists;
57  opp_win.incident_faces( opp_subface_lists);
58 
59  // First, make local copies of topology information of remote panes.
60  std::vector< int> pane_ids; pane_ids.reserve( opp_subface_lists.size());
61 
62  for ( std::map< int, std::vector<int> >::const_iterator
63  it=opp_subface_lists.begin(); it!=opp_subface_lists.end(); ++it) {
64  pane_ids.push_back( it->first);
65  }
66  replicate_metadata( &pane_ids[0], pane_ids.size());
67 
68  // Fill in _recv_faces and _recv_nodes for each remote pane.
69  // Loop through the subface_lists
70  for ( std::map< int, std::vector<int> >::const_iterator
71  it=opp_subface_lists.begin(); it!=opp_subface_lists.end(); ++it) {
72 
73  RFC_Pane_transfer &pn = pane( it->first);
74  if (pn.is_master()) continue;
75 
76  RFC_assertion(pn._recv_faces.empty());
77  // Loop through the subfaces of pn
78  const std::vector< int> &subface_list = it->second;
79  std::set<int> face_list;
80  std::set<int> node_list;
81  for ( int i=0, size=subface_list.size(); i<size; ++i) {
82  int parent = pn._subface_parents[ subface_list[i]-1];
83  face_list.insert( parent);
84 
85  Element_node_enumerator ene( pn.base(), parent);
86 
87  for ( int k=0, kn=ene.size_of_nodes(); k<kn; ++k)
88  node_list.insert( ene[k]);
89 
90  // Loop through the subnodes of the subface, and insert the nodes
91  // in their host facets.
92  Three_tuple<int> &f = pn._subfaces[ subface_list[i]-1];
93  for ( int j=0; j<3; ++j) {
94  int parent = pn._subnode_parents[ f[j]-1].face_id;
95  Element_node_enumerator ene( pn.base(), parent);
96 
97  for ( int k=0, kn=ene.size_of_nodes(); k<kn; ++k)
98  node_list.insert( ene[k]);
99  }
100  }
101 
102  pn._recv_faces.reserve( face_list.size());
103  pn._recv_faces.insert( pn._recv_faces.end(), face_list.begin(), face_list.end());
104  pn._recv_nodes.reserve( node_list.size());
105  pn._recv_nodes.insert( pn._recv_nodes.end(), node_list.begin(), node_list.end());
106  }
107 
108  // Fill in _send_faces and _send_nodes for each local pane.
109  const std::map<int, std::pair<int, int> > &opp_panemap = opp_win._pane_map;
110  int rank=comm_rank();
111 
112  // Loop through the panes.
113  for (Pane_set::iterator pi=_pane_set.begin(); pi!=_pane_set.end(); ++pi) {
114 
115  // For the current pane, obtain a list of its faces that
116  // need to be sent to each remote process.
118 
119  // Creaet a list and put into the following object
120  std::map< int, std::set<int> > sfs;
121  std::map< int, std::set<int> > sns;
122 
123  for ( int i=0, size=pane.size_of_subfaces(); i<size; ++i) {
124  // If the counterpart of a subface is owned by a remote process P,
125  // insert the parent of the subface into the face list for P.
126  Face_ID oppf = pane._subface_counterparts[i];
127  int remote_rank = opp_panemap.find( oppf.pane_id)->second.first;
128 
129  if ( remote_rank != rank) {
130  int parent = pane._subface_parents[i];
131  sfs[ remote_rank].insert( parent);
132 
133  std::set<int> &ns=sns[remote_rank];
134  Element_node_enumerator ene( pane.base(), parent);
135 
136  for ( int k=0, kn=ene.size_of_nodes(); k<kn; ++k)
137  ns.insert( ene[k]);
138 
139  // Loop through the subnodes of the subface, and insert the nodes
140  // in their host facets.
141  Three_tuple<int> &f = pane._subfaces[ i];
142  for ( int j=0; j<3; ++j) {
143  int parent = pane._subnode_parents[ f[j]-1].face_id;
144  Element_node_enumerator ene( pane.base(), parent);
145 
146  for ( int k=0, kn=ene.size_of_nodes(); k<kn; ++k)
147  ns.insert( ene[k]);
148  }
149  }
150  }
151 
152  // Copy from sfs to pane._send_faces
153  RFC_assertion(pane._send_faces.empty());
154  for ( std::map<int, std::set<int> >::const_iterator
155  it=sfs.begin(); it!=sfs.end(); ++it) {
156  std::vector<int> &vec=pane._send_faces[ it->first];
157  vec.reserve(it->second.size());
158  vec.insert( vec.end(), it->second.begin(), it->second.end());
159  }
160 
161  // Copy from sns to pane._send_nodes
162  RFC_assertion(pane._send_nodes.empty());
163  for ( std::map<int, std::set<int> >::const_iterator
164  it=sns.begin(); it!=sns.end(); ++it) {
165  std::vector<int> &vec=pane._send_nodes[ it->first];
166  vec.reserve(it->second.size());
167  vec.insert( vec.end(), it->second.begin(), it->second.end());
168  }
169  }
170 }
171 
172 // Cache a copy of the meta-data for the given panes
173 void
175  // Determine the owners of the panes
176  std::vector< int> npanes_recv( comm_size(), 0);
177  std::vector< int> npanes_send( comm_size(), 0);
178  int rank=comm_rank();
179 
180  for ( int i=0; i<n; ++i) {
181  RFC_assertion( _pane_map.find(pane_ids[i]) != _pane_map.end());
182  std::pair< int, int> p = _pane_map.find( pane_ids[i])->second;
183  if ( p.first != rank) ++npanes_recv[ p.first];
184  }
185 
186  MPI_Alltoall( &npanes_recv[0], 1, MPI_INT,
187  &npanes_send[0], 1, MPI_INT, _comm);
188 
189  // Create a buffer for the displacements of the buffer for pane ids.
190  std::vector<int> displs_recv( npanes_recv.size()+1);
191  counts_to_displs( npanes_recv, displs_recv);
192  std::vector<int> displs_send( npanes_send.size()+1);
193  counts_to_displs( npanes_send, displs_send);
194 
195  std::vector< int> ids_recv( displs_recv.back());
196  for ( int i=0; i<n; ++i) {
197  std::pair< int, int> p = _pane_map.find( pane_ids[i])->second;
198  if ( p.first != rank) ids_recv[ displs_recv[p.first]++] = pane_ids[i];
199  }
200  counts_to_displs( npanes_recv, displs_recv);
201 
202  std::vector< int> ids_send( displs_send.back());
203 
204  MPI_Alltoallv(&ids_recv[0], &npanes_recv[0],&displs_recv[0], MPI_INT,
205  &ids_send[0], &npanes_send[0],&displs_send[0], MPI_INT, _comm);
206 
207 
208  // Prepare the data structures for sending.
209  for ( int i=0, size=npanes_send.size(), k=0; i<size; ++i) {
210  for ( int j=0; j<npanes_send[i]; ++j, ++k) {
211  init_send_buffer( ids_send[k], i);
212  }
213  }
214 
215  // Prepare the data structures for receiving data (replication)
216  for ( int i=0, size=npanes_recv.size(), k=0; i<size; ++i) {
217  for ( int j=0; j<npanes_recv[i]; ++j, ++k) {
218  init_recv_buffer( ids_recv[k], i);
219  }
220  }
221 
222  _replicated = true;
223 }
224 
225 // Cache a copy of the given facial data. Also cache coordinates if
226 // replicate_coor is true.
227 void
229  bool replicate_coor) {
230  std::vector< MPI_Request> other_requests, recv_requests;
231 
232  other_requests.reserve( _replic_panes.size()+_panes_to_send.size());
233  recv_requests.reserve( _replic_panes.size());
234 
235  std::vector< RFC_Pane_transfer*> recv_panes;
236  recv_panes.reserve(_replic_panes.size());
237 
238  int totalNumPanes = _pane_map.size();
239  int d = data.dimension(), ierr;
240 
241  // Loop through the replicated panes
242  // Initiate receive of data buffers from remote processes
243  for (std::map< int, RFC_Pane_transfer*>::iterator
244  it=_replic_panes.begin(),iend=_replic_panes.end(); it != iend; ++it) {
245  RFC_Pane_transfer *p=it->second;
246  p->_data_buf_id = data.id();
247 
248  p->_data_buf.reserve( p->size_of_faces()*d);
249  p->_data_buf.resize( p->_recv_faces.size()*d, QUIET_NAN);
250 
251  std::pair< int, int> s = _pane_map.find( p->id())->second;
252  MPI_Request req;
253  ierr = MPI_Irecv( &p->_data_buf[0], p->_data_buf.size()*sizeof(Real),
254  MPI_BYTE, s.first, 100+s.second, _comm, &req);
255  RFC_assertion( ierr==0);
256  recv_requests.push_back( req);
257  recv_panes.push_back( it->second);
258 
259  if ( replicate_coor) {
260  p->_coor_buf.resize( p->size_of_faces()*3);
261  ierr = MPI_Irecv( &p->_coor_buf[0],
262  p->_recv_faces.size()*3*sizeof(Real), MPI_BYTE,
263  s.first, 100+totalNumPanes+s.second, _comm, &req);
264  RFC_assertion( ierr==0);
265  other_requests.push_back( req);
266  }
267  }
268 
269  // Initiate send of data buffers to remote processes
270  for (std::set< std::pair<int, RFC_Pane_transfer*> >::const_iterator
271  it=_panes_to_send.begin(),iend=_panes_to_send.end(); it!=iend; ++it){
272  RFC_Pane_transfer *p=it->second;
273 
274  std::pair< int, int> s = _pane_map.find( p->id())->second;
275 
276  // Initialize the send buffer
277  std::vector<int> &send_faces=p->_send_faces[it->first];
278  std::vector<Real> &data_buf=p->_send_buffers[it->first];
279  const Real *addr = p->pointer(data.id());
280 
281  // We only allocate buffer if all entries are not sent
282  if ( int(p->_send_faces.size()) != p->size_of_faces()) {
283  data_buf.resize( send_faces.size()*d, QUIET_NAN);
284 
285  for ( int i=send_faces.size()-1; i>=0; --i) {
286  for ( int j=d-1; j>=0; --j) {
287  data_buf[i*d+j] = addr[(send_faces[i]-1)*d+j];
288  }
289  }
290  addr = &data_buf[0];
291  }
292 
293  MPI_Request req;
294  ierr = MPI_Isend( const_cast<Real*>(addr), send_faces.size()*d*sizeof(Real),
295  MPI_BYTE, it->first, 100+s.second, _comm, &req);
296 
297  RFC_assertion( ierr==0);
298  other_requests.push_back( req);
299 
300  if ( replicate_coor) {
301  ierr = MPI_Isend( const_cast<Real*>(p->coordinates()),
302  p->_send_faces[it->first].size()*3*sizeof(Real), MPI_BYTE,
303  it->first, 100+totalNumPanes+s.second, _comm, &req);
304  RFC_assertion( ierr==0);
305  other_requests.push_back( req);
306  }
307  }
308 
309  // Processing received data arrays
310  while ( !recv_requests.empty()) {
311  int index;
312  MPI_Status stat;
313 
314  ierr = MPI_Waitany(recv_requests.size(), &recv_requests[0], &index, &stat);
315  RFC_assertion( ierr==0);
316  RFC_Pane_transfer *p=recv_panes[index];
317 
318  // Reorganize the data buffer as a dense array, and fill
319  // unused entries by NaN.
320  std::vector<Real> &buf = p->_data_buf;
321  buf.resize( p->size_of_nodes()*d, QUIET_NAN);
322 
323  std::vector<int> &faces = p->_recv_faces;
324  for ( int i=faces.size()-1; i>=0; --i) {
325  if ( i==faces[i]-1) break;
326  for ( int j=d-1; j>=0; --j) {
327  buf[(faces[i]-1)*d+j] = buf[i*d+j];
328  buf[i*d+j] = QUIET_NAN;
329  }
330  }
331 
332  // Remove the received message from the list
333  recv_requests.erase( recv_requests.begin()+index);
334  recv_panes.erase( recv_panes.begin()+index);
335  }
336 
337  // Wait for all send requests to finish
338  wait_all( other_requests.size(), &other_requests[0]);
339 }
340 
341 // Cache a copy of the given nodal data. Also cache coordinates if
342 // replicate_coor is true.
343 void
345  bool replicate_coor) {
346  std::vector< MPI_Request> other_requests, recv_requests;
347 
348  other_requests.reserve( _replic_panes.size()+_panes_to_send.size());
349  recv_requests.reserve( _replic_panes.size());
350 
351  std::vector< RFC_Pane_transfer*> recv_panes;
352  recv_panes.reserve(_replic_panes.size());
353 
354  int totalNumPanes = _pane_map.size();
355  int d = data.dimension(), ierr;
356 
357  // Loop through the replicated panes
358  // Initiate receive of data buffers from remote processes
359  for (std::map< int, RFC_Pane_transfer*>::iterator
360  it=_replic_panes.begin(),iend=_replic_panes.end(); it != iend; ++it) {
361  RFC_Pane_transfer *p=it->second;
362  p->_data_buf_id = data.id();
363 
364  p->_data_buf.reserve( p->size_of_nodes()*d);
365  p->_data_buf.resize( p->_recv_nodes.size()*d);
366 
367  std::pair< int, int> s = _pane_map.find( p->id())->second;
368  MPI_Request req;
369  ierr = MPI_Irecv( &p->_data_buf[0], p->_data_buf.size()*sizeof(Real),
370  MPI_BYTE, s.first, 100+s.second, _comm, &req);
371  RFC_assertion( ierr==0);
372  recv_requests.push_back( req);
373  recv_panes.push_back( it->second);
374 
375  if ( replicate_coor) {
376  p->_coor_buf.resize( p->size_of_nodes()*3);
377  ierr = MPI_Irecv( &p->_coor_buf[0],
378  p->_recv_nodes.size()*3*sizeof(Real), MPI_BYTE,
379  s.first, 100+totalNumPanes+s.second, _comm, &req);
380  RFC_assertion( ierr==0);
381  other_requests.push_back( req);
382  }
383  }
384 
385  // Initiate send of data buffers to remote processes
386  for (std::set< std::pair<int, RFC_Pane_transfer*> >::const_iterator
387  it=_panes_to_send.begin(),iend=_panes_to_send.end(); it!=iend; ++it){
388  RFC_Pane_transfer *p=it->second;
389 
390  std::pair< int, int> s = _pane_map.find( p->id())->second;
391 
392  // Initialize the send buffer
393  std::vector<int> &send_nodes=p->_send_nodes[it->first];
394  std::vector<Real> &data_buf=p->_send_buffers[it->first];
395  const Real *addr = p->pointer(data.id());
396 
397  // We only allocate buffer if all entries are not sent
398  if ( int(p->_send_nodes.size()) != p->size_of_nodes()) {
399  data_buf.resize( send_nodes.size()*d);
400 
401  for ( int i=send_nodes.size()-1; i>=0; --i) {
402  for ( int j=d-1; j>=0; --j) {
403  data_buf[i*d+j] = addr[(send_nodes[i]-1)*d+j];
404  }
405  }
406  addr = &data_buf[0];
407  }
408 
409  MPI_Request req;
410  ierr = MPI_Isend( const_cast<Real*>(addr), send_nodes.size()*d*sizeof(Real),
411  MPI_BYTE, it->first, 100+s.second, _comm, &req);
412  RFC_assertion( ierr==0);
413  other_requests.push_back( req);
414 
415  if ( replicate_coor) {
416  ierr = MPI_Isend( const_cast<Real*>(p->coordinates()),
417  p->_send_nodes[it->first].size()*3*sizeof(Real), MPI_BYTE,
418  it->first, 100+totalNumPanes+s.second, _comm, &req);
419  RFC_assertion( ierr==0);
420  other_requests.push_back( req);
421  }
422  }
423 
424  // Processing received data arrays
425  while ( !recv_requests.empty()) {
426  int index;
427  MPI_Status stat;
428 
429  ierr = MPI_Waitany(recv_requests.size(), &recv_requests[0], &index, &stat);
430  RFC_assertion( ierr==0);
431 
432  RFC_Pane_transfer *p=recv_panes[index];
433  std::vector<Real> &buf = p->_data_buf;
434 
435  // Reorganize the data buffer as a dense array, and fill
436  // unused entries by NaN.
437  std::vector<int> &nodes = p->_recv_nodes;
438  buf.resize( p->size_of_nodes()*d, QUIET_NAN);
439 
440  for ( int i=nodes.size()-1; i>=0; --i) {
441  int lid = nodes[i]-1;
442  if ( i==lid) break;
443  for ( int j=d-1; j>=0; --j) {
444  buf[lid*d+j] = buf[i*d+j];
445  buf[i*d+j] = QUIET_NAN;
446  }
447  }
448 
449  // Remove the received message from the list
450  recv_requests.erase( recv_requests.begin()+index);
451  recv_panes.erase( recv_panes.begin()+index);
452  }
453 
454  // Wait for all send requests to finish
455  wait_all( other_requests.size(), &other_requests[0]);
456 }
457 
458 void
460  std::vector<void *> ptrs; ptrs.reserve( _pane_set.size());
461 
462  for (Pane_set::iterator pi=_pane_set.begin(); pi != _pane_set.end(); ++pi) {
464 
465  ptrs.push_back( pane.pointer( data.id()));
466  }
467 
468  _map_comm.init( &ptrs[0], COM_DOUBLE, data.dimension());
469 
470  _map_comm.begin_update_shared_nodes();
471  _map_comm.reduce_on_shared_nodes( op);
472  _map_comm.end_update_shared_nodes();
473 }
474 
475 void
477  std::vector<void *> ptrs; ptrs.reserve( _pane_set.size());
478 
479  for (Pane_set::iterator pi=_pane_set.begin(); pi != _pane_set.end(); ++pi) {
481 
482  ptrs.push_back( pane.pointer( data.id()));
483  }
484 
485  _map_comm.init( &ptrs[0], COM_DOUBLE, data.dimension());
486 
487  _map_comm.begin_update_shared_nodes();
488  _map_comm.reduce_maxabs_on_shared_nodes();
489  _map_comm.end_update_shared_nodes();
490 }
491 
492 //=================== Lower level communication routines
493 void
495  int ierr = MPI_Barrier( _comm); RFC_assertion( ierr==0);
496 }
497 
498 bool
500  int rank; MPI_Comm_rank( _comm, &rank);
501  return (rank==0);
502 }
503 
504 void
505 RFC_Window_transfer::wait_all( int n, MPI_Request *requests) {
506  if ( n>0) {
507  std::vector<MPI_Status> statuses(n);
508  int ierr = MPI_Waitall( n, requests, &statuses[0]);
509  RFC_assertion( ierr==0);
510  }
511 }
512 
513 void
514 RFC_Window_transfer::wait_any( int n, MPI_Request *requests,
515  int *index, MPI_Status *stat) {
516  if ( n>0) {
517  MPI_Status s; if ( stat==NULL) stat = &s;
518  int ierr = MPI_Waitany( n, requests, index, stat);
519  RFC_assertion( ierr==0);
520  }
521 }
522 
523 void
524 RFC_Window_transfer::allreduce( Real *data, int n, MPI_Op op) const {
525  std::vector<Real> buf( data, data+n);
526  RFC_assertion( sizeof( Real) == sizeof( double));
527  MPI_Allreduce( &buf[0], data, n, MPI_DOUBLE, op, _comm);
528 }
529 
530 void
531 RFC_Window_transfer::init_send_buffer( int pane_id, int to_rank) {
532  _panes_to_send.insert( std::pair<int,RFC_Pane_transfer*>( to_rank,
533  &pane(pane_id)));
534 }
535 
537 
538 void
539 RFC_Window_transfer::init_recv_buffer( int pane_id, int from_rank) {
540 
541  RFC_assertion( _pane_set.find( pane_id) == _pane_set.end());
542 
543  COM::Pane *base_pane = new COM::Pane( (COM::Window*)NULL, pane_id);
544  RFC_Pane_transfer *pane = new RFC_Pane_transfer( base_pane, color());
545  _replic_panes[ pane_id] = pane;
546 
547  std::string fname = get_sdv_fname( _prefix.c_str(), pane_id, _IO_format);
548  if ( _IO_format == SDV_BINARY) {
549  std::ifstream is( fname.c_str()); RFC_assertion( is);
550 
551  pane->read_binary( is, NULL, base_pane);
552  }
553  else {
554  // Read in using Rocin.
556  int hdl_read = COM_get_function_handle( "RFC_IN.read_windows");
557  int hdl_obtain = COM_get_function_handle( "RFC_IN.obtain_attribute");
558 
559  // Define the base-window and sdv-window names
560  std::string base_material = get_prefix_base( _prefix.c_str());
561  std::string sdv_material = base_material+"_sdv";
562 
563  std::string buf_wname(_bufwin_prefix); buf_wname.append( base_material);
564  std::string sdv_wname=buf_wname+"_sdv";
565 
566  // Read the parent pane and the subdivided pane from the given file.
567  MPI_Comm comm_self = MPI_COMM_SELF;
568  COM_call_function( hdl_read, fname.c_str(), _bufwin_prefix,
569  (base_material+" "+sdv_material).c_str(), &comm_self);
570 
571  int hdl_all = COM_get_attribute_handle( (buf_wname+".all").c_str());
572  COM_call_function( hdl_obtain, &hdl_all, &hdl_all, &pane_id);
573  hdl_all = COM_get_attribute_handle( (sdv_wname+".all").c_str());
574  COM_call_function( hdl_obtain, &hdl_all, &hdl_all, &pane_id);
575 
576  pane->read_rocin( sdv_wname, buf_wname, base_pane);
577 
578  // Delete the windows created by Rocin.
579  COM_delete_window( buf_wname.c_str());
580  COM_delete_window( sdv_wname.c_str());
581 
582  // Unload Rocin
584  }
585 
586  pane->init();
587 }
588 
589 void
591  // Loop through the replicated panes
592  std::map< int, RFC_Pane_transfer*>::iterator it=_replic_panes.begin();
593  std::map< int, RFC_Pane_transfer*>::iterator iend=_replic_panes.end();
594  for ( ; it != iend; ++it) {
595  it->second->_data_buf_id = -1;
596  std::vector<Real> t;
597  it->second->_data_buf.swap( t); // clear up the memory space
598  }
599 }
600 
602 
603 
604 
605 
606 
607 
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_BYTE
void replicate_data(const Facial_data_const &data, bool replicate_coor)
Replicate the given data from remote processes onto local process.
MAP::Pane_communicator _map_comm
#define QUIET_NAN
RFC_Pane_transfer & pane(const int pid)
An adaptor for enumerating node IDs of an element.
std::map< int, RFC_Pane_transfer * > _replic_panes
int face_id
the local id within the pane starting from 1.
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_COMM_SELF
const NT & d
void wait_any(int n, MPI_Request *requests, int *index, MPI_Status *stat=NULL)
void COM_delete_window(const char *wname)
Definition: roccom_c++.h:94
Size dimension() const
j indices k indices k
Definition: Indexing.h:6
const Real * coordinates() const
std::vector< int > _recv_faces
double s
Definition: blastest.C:80
void counts_to_displs(const std::vector< int > &counts, std::vector< int > &displs) const
Base * base()
The id of its base COM::Pane object.
void wait_all(int n, MPI_Request *requests)
std::vector< Real > _coor_buf
int pane_id
the id of the owner pane.
double Real
Definition: mapbasic.h:322
void barrier() const
Block until all processes of the window have reached here.
int COM_get_attribute_handle(const char *waname)
Definition: roccom_c++.h:412
std::map< int, std::vector< Real > > _send_buffers
int color() const
The color of the window for overlay or for data transfer (BLUE or GREEN).
int size_of_faces() const
The total number of faces in the pane.
std::vector< Three_tuple< int > > _subfaces
const std::string _prefix
static const char * _bufwin_prefix
std::vector< Edge_ID > _subnode_parents
Edge ids of parents.
#define RFC_END_NAME_SPACE
Definition: rfc_basic.h:29
void clear_replicated_data()
Clear all the replicate data but keep metadata.
void reduce_maxabs_to_all(Nodal_data &)
#define COM_UNLOAD_MODULE_STATIC_DYNAMIC(moduleName, windowString)
Definition: roccom_basic.h:113
void incident_faces(std::map< int, std::vector< int > > &) const
Obtain the list of incident faces in each pane of the opposite mesh.
void read_binary(std::istream &is, std::vector< int > *b2v_all=NULL, COM::Pane *p=NULL)
**********************************************************************Rocstar Simulation Suite Illinois Rocstar LLC All rights reserved ****Illinois Rocstar LLC IL **www illinoisrocstar com **sales illinoisrocstar com WITHOUT WARRANTY OF ANY **EXPRESS OR INCLUDING BUT NOT LIMITED TO THE WARRANTIES **OF FITNESS FOR A PARTICULAR PURPOSE AND **NONINFRINGEMENT IN NO EVENT SHALL THE CONTRIBUTORS OR **COPYRIGHT HOLDERS BE LIABLE FOR ANY DAMAGES OR OTHER WHETHER IN AN ACTION OF TORT OR **Arising OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE **USE OR OTHER DEALINGS WITH THE SOFTWARE **********************************************************************INTERFACE SUBROUTINE knode iend
std::map< int, std::pair< int, int > > _pane_map
int size_of_nodes() const
The total number of nodes in the pane.
Definition: Rocin.h:64
blockLoc i
Definition: read.cpp:79
#define RFC_BEGIN_NAME_SPACE
Definition: rfc_basic.h:28
const NT & n
int size_of_subfaces() const
The total number of faces in the subdivision of the pane.
void COM_call_function(const int wf, int argc,...)
Definition: roccom_c.C:48
Pane_set _pane_set
The set of panes contained in the window.
j indices j
Definition: Indexing.h:6
static const char * get_prefix_base(const char *prefix)
const double pi
std::vector< int > _subface_parents
Face ids of the parents of the subfaces.
void replicate_metadata(int *pane_ids, int n)
Replicate the metadata of a remote pane only the local process.
void read_rocin(const std::string &sdv_wname, const std::string &parent_wname="", COM::Pane *p=NULL)
Read in using Rocin.
std::map< int, std::vector< int > > _send_nodes
static int rank
Definition: advectest.C:66
for(;;)
int id() const
void init_recv_buffer(int pane_id, int from_rank)
std::set< std::pair< int, RFC_Pane_transfer * > > _panes_to_send
#define RFC_assertion
Definition: rfc_basic.h:65
static std::string get_sdv_fname(const char *prefix, int pane_id, const int format=SDV_BINARY)
void reduce_to_all(Nodal_data &, MPI_Op)
#define COM_LOAD_MODULE_STATIC_DYNAMIC(moduleName, windowString)
Definition: roccom_basic.h:111
bool is_root() const
Check whether the process has rank 0.
int COM_get_function_handle(const char *wfname)
Definition: roccom_c++.h:428
std::map< int, std::vector< int > > _send_faces
std::vector< Face_ID > _subface_counterparts
Ids of counterparts of faces.
std::vector< Real > _data_buf
A global ID of a face.
void init_send_buffer(int pane_id, int to_rank)
#define COM_EXTERN_MODULE(moduleName)
Definition: roccom_basic.h:116
void allreduce(Array_n &arr, MPI_Op op) const
std::vector< int > _recv_nodes