Rocstar  1.0
Rocstar multiphysics simulation application
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Pane_ghost_connectivity.C
Go to the documentation of this file.
1 /* *******************************************************************
2  * Rocstar Simulation Suite *
3  * Copyright@2015, Illinois Rocstar LLC. All rights reserved. *
4  * *
5  * Illinois Rocstar LLC *
6  * Champaign, IL *
7  * www.illinoisrocstar.com *
8  * sales@illinoisrocstar.com *
9  * *
10  * License: See LICENSE file in top level of distribution package or *
11  * http://opensource.org/licenses/NCSA *
12  *********************************************************************/
13 /* *******************************************************************
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, *
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES *
16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND *
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR *
18  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE *
21  * USE OR OTHER DEALINGS WITH THE SOFTWARE. *
22  *********************************************************************/
23 // $Id: Pane_ghost_connectivity.C,v 1.8 2009/01/22 23:56:53 gzagaris Exp $
24 
28 /* Initial version by P. Alexander, 3/14/2006
29  */
30 #include <cstring>
31 #include <cstdlib>
32 #include <iostream>
33 #include <iomanip>
34 #include <algorithm>
36 #include "Rocmap.h"
37 #include "Roccom_base.h"
38 #include "Pane_connectivity.h"
39 #include "Pane_communicator.h"
40 #include "Dual_connectivity.h"
41 #include "Pane.h"
42 #include "Element_accessors.h"
43 
45 
46 typedef vector<vector<int> > pane_i_vector;
47 typedef vector<int>::iterator i_vector_iter;
48 typedef vector<set<set<int> > > pane_i_set_set;
49 
51 init(){
52 
53  // Get pointers to all local panes
54  _buf_window->panes(_panes);
55 
56  _npanes = (int)_panes.size();
57 
58  for(int i=0; i<_npanes; ++i){
59  _panes[i]->set_ignore_ghost(false);
60  }
61 
62  // Make sure that we have the shared-node pconn information.
63  MAP::Pane_connectivity pc(_buf_window->attribute(COM::COM_MESH),
64  _buf_window->get_communicator());
65 
66  pc.compute_pconn(_buf_window->attribute(COM::COM_PCONN));
67 
68  // Determine which nodes are shared
70 
71  // Get the list of communicating panes
72  get_cpanes();
73 
74  // Create data structures for the total node ordering
75  _w_n_gorder =
76  _buf_window->new_attribute("n_gorder",'n',COM_INT,1,"");
77  _buf_window->resize_array(_w_n_gorder,0);
78  _buf_window->init_done();
79 
80  _etype_str[Connectivity::ST1] = ":st1:";
81  _etype_str[Connectivity::ST2] = ":st2:";
82  _etype_str[Connectivity::ST3] = ":st3:";
84  _etype_str[Connectivity::BAR3] = ":b3:";
86  _etype_str[Connectivity::TRI6] = ":t6:";
90  _etype_str[Connectivity::TET4] = ":T4:";
98  _etype_str[Connectivity::HEX20] = ":B20:";
100 }
101 
104 
105  // Determine the total node ordering
107 
108  vector< pane_i_vector > gelem_lists;
109  pane_i_vector comm_sizes;
110  vector<vector<map<pair<int,int>,int> > > nodes_to_send;
111  vector<vector<deque<int> > > elems_to_send;
112 
113  get_ents_to_send(gelem_lists,
114  nodes_to_send,
115  elems_to_send,
116  comm_sizes);
117 
118  // Communicate calculated ghost information
119  vector<pane_i_vector> recv_info;
120  send_gelem_lists(gelem_lists,
121  recv_info,
122  comm_sizes);
123 
124  vector<vector<int> > elem_renumbering;
125  vector<vector<map<pair<int,int>,int> > > nodes_to_recv;
126  process_received_data(recv_info,
127  elem_renumbering,
128  nodes_to_recv);
129 
130  finalize_pconn( nodes_to_send,
131  nodes_to_recv,
132  elems_to_send,
133  elem_renumbering,
134  recv_info);
135 }
136 
137 // Get a total ordering of nodes in the form of a pair <P,N> where
138 // P is the "owner pane" and N is the nodes id on the owner pane.
141 
142  // Resize per-pane data structures
143  _p_gorder.resize(_npanes);
144  _local_nodes.resize(_npanes);
145 
146  // On each node determine P, the pane responsible for numbering the node
147  for(int i=0; i < (int)(_npanes); ++i){
148 
149  int pane_id = _panes[i]->id();
150  int nrnodes = _panes[i]->size_of_real_nodes();
151 
152  // Initialize P values of complete ordering to the current pane
153  _p_gorder[i].clear();
154  _p_gorder[i].resize(nrnodes,pane_id);
155 
156  // Obtain the pane connectivity of the local pane.
157  const Attribute *pconn = _panes[i]->attribute(COM::COM_PCONN);
158 
159  // The pconn MAY/MAY not list the number of communicating-panes
160  // in the shared nodes section.Handle as though its not there.
161  const int *vs
162  = (const int*)pconn->pointer()+MAP::Pane_connectivity::pconn_offset();
163 
164  // Loop through communicating panes for shared nodes.
165  for ( int j=0, index=0, nj=_cpanes[i].size();
166  j<nj; ++j, index+=vs[index+1]+2) {
167 
168  // Skip panes which the pconn refers to, but which are not
169  // in the current window. May result from partial inheritance.
170  while ( _buf_window->owner_rank(vs[index])<0)
171  index+=vs[index+1]+2;
172 
173  // Update P values for the current list of shared nodes
174  for(int k=0; k<vs[index+1]; ++k){
175  // EDIT
176  if(vs[index] > _p_gorder[i][vs[index+2+k]-1])
177  _p_gorder[i][vs[index+2+k]-1] = vs[index];
178  }
179  }
180  }
181 
182  // Set the values of N on nodes for which this pane is responsible
183  for(int i=0; i < (int)(_npanes); ++i){
184 
185  int pane_id = _panes[i]->id();
186  int nrnodes = _panes[i]->size_of_real_nodes();
187  int n_gorder_id = _w_n_gorder->id();
188 
189  // There are window-level and pane-level attributes, we need the
190  // pane level attributes to obtain a pointer to data.
191  Attribute* p_n_gorder =
192  const_cast<Attribute*>(_panes[i]->attribute(n_gorder_id));
193  int * n_gorder_ptr =
194  reinterpret_cast<int*>(p_n_gorder->pointer());
195 
196  for(int j=0; j< nrnodes; ++j){
197  if(_p_gorder[i][j] == pane_id)
198  n_gorder_ptr[j] = j+1;
199  else
200  n_gorder_ptr[j] = 0;
201  }
202  }
203 
204  // Update shared nodes across all panes using a max reduce operation
205  MAP::Pane_communicator pc(_buf_window, _buf_window->get_communicator());
206  pc.init(_w_n_gorder);
207  pc.begin_update_shared_nodes();
208  pc.reduce_on_shared_nodes(MPI_MAX);
209  pc.end_update_shared_nodes();
210 
211  // Store a mapping from the total node-ordering to the local node id:
212  // NOTE: can I make _local_nodes linear and just sort it once?
213  for(int i=0; i < (int)(_npanes); ++i){
214 
215  int nrnodes = _panes[i]->size_of_real_nodes();
216  int n_gorder_id = _w_n_gorder->id();
217 
218  Attribute* p_n_gorder =
219  const_cast<Attribute*>(_panes[i]->attribute(n_gorder_id));
220  int * n_gorder_ptr =
221  reinterpret_cast<int*>(p_n_gorder->pointer());
222 
223  for(int j=0; j< nrnodes; ++j)
224  _local_nodes[i].
225  insert(make_pair(make_pair(_p_gorder[i][j],
226  n_gorder_ptr[j])
227  ,j+1));
228  }
229 }
230 
231 // Determine elements/nodes to be ghosted on adjacent panes.
233 get_ents_to_send(vector<pane_i_vector > &gelem_lists,
234  vector<vector<map<pair<int,int>,int> > > &nodes_to_send,
235  vector<vector<deque<int> > > &elems_to_send,
236  pane_i_vector &comm_sizes){
237 
238  // sets of adjacent elements and nodes
239  vector<vector<set<int> > > adj_eset;
240  vector<vector<set<int> > > adj_nset;
241  set<int>::iterator eset_pos;
242 
243  // resize per-local-pane data structures
244  gelem_lists.resize(_npanes);
245  adj_eset.resize(_npanes);
246  adj_nset.resize(_npanes);
247  comm_sizes.resize(_npanes);
248  nodes_to_send.resize(_npanes);
249  elems_to_send.resize(_npanes);
250 
251  for(int i=0; i < _npanes; ++i){
252 
253  int n_comm_panes = _cpanes[i].size();
254  adj_eset[i].resize(n_comm_panes);
255  adj_nset[i].resize(n_comm_panes);
256  comm_sizes[i].resize(n_comm_panes,0);
257  nodes_to_send[i].resize(n_comm_panes);
258  elems_to_send[i].resize(n_comm_panes);
259 
260  set<int> cur_eset;
261 
262  // Obtain the pane connectivity of the local pane.
263  const Attribute *pconn = _panes[i]->attribute(COM::COM_PCONN);
264 
265  MAP::Pane_dual_connectivity dc(_panes[i],0);
266 
267  const int *vs
268  = (const int*)pconn->pointer() +
269  MAP::Pane_connectivity::pconn_offset();
270 
271  int vs_size
272  = pconn->size_of_real_items() -
273  MAP::Pane_connectivity::pconn_offset();
274 
275  // Loop through communicating panes for shared nodes.
276  for ( int j=0, index=0; j<n_comm_panes; ++j, index+=vs[index+1]+2) {
277 
278  // skiping nonexistent panes to get to next communication pane
279  while ( _buf_window->owner_rank(vs[index])<0) {
280  index+=vs[index+1]+2;
281  COM_assertion_msg( index<=vs_size, "Invalid communication map");
282  }
283 
284  for(int k=0; k<vs[index+1]; ++k){
285 
286  // get elements incident on the shared node:
287  vector<int> elist;
288  adj_nset[i][j].insert(vs[index+2+k]);
289  dc.incident_elements(vs[index+2+k],elist);
290 
291  // remember elements adj. to this pane
292  for(unsigned int ii=0; ii<elist.size(); ++ii)
293  cur_eset.insert(elist[ii]);
294  }
295 
296  adj_eset[i][j] = cur_eset;
297 
298  // Calculate size of data to send. For every element, send its type
299  // and a list of its nodes in complete ordering format
300  for(eset_pos = cur_eset.begin(); eset_pos != cur_eset.end(); ++eset_pos){
301  comm_sizes[i][j] += 1 + 2*((_panes[i]->connectivity(*eset_pos))
302  ->size_of_nodes_pe());
303  }
304  cur_eset.clear();
305  }
306  }
307 
308  int n_gorder_id = _w_n_gorder->id();
309 
310  // fill in pane comm data
311  for(int i=0; i < _npanes; ++i){
312 
313  Attribute* p_n_gorder =
314  const_cast<Attribute*>(_panes[i]->attribute(n_gorder_id));
315  int * n_gorder_ptr =
316  reinterpret_cast<int*>(p_n_gorder->pointer());
317 
318  gelem_lists[i].resize(_cpanes[i].size());
319 
320  // fill in the comm buffers
321  for(int j=0, nj = comm_sizes[i].size(); j<nj; ++j){
322 
323  int index = 0;
324 
325  gelem_lists[i][j].resize(comm_sizes[i][j]);
326 
327  for(eset_pos = adj_eset[i][j].begin();
328  eset_pos != adj_eset[i][j].end();
329  ++eset_pos){
330 
331  vector<int> nodes;
332  COM::Element_node_enumerator ene(_panes[i],*eset_pos);
333  elems_to_send[i][j].push_back(*eset_pos);
334  ene.get_nodes(nodes);
335 
336  // store type
337  gelem_lists[i][j][index++] = ene.type();
338 
339  for(int k=0, nk = ene.size_of_nodes(); k<nk; ++k){
340  // store nodes in (P,N) format
341  int P = _p_gorder[i][nodes[k]-1];
342  int N = n_gorder_ptr[nodes[k]-1];
343  gelem_lists[i][j][index++] = P;
344  gelem_lists[i][j][index++] = N;
345 
346  // Send nodes which aren't shared w/ this processor
347  if(adj_nset[i][j].find(nodes[k]) == adj_nset[i][j].end())
348  nodes_to_send[i][j].insert(make_pair(make_pair(P,N),nodes[k]));
349  }
350  }
351  }
352  }
353 
354  // We are finished w/ the total ordering at this point, free up some space
355  _buf_window->delete_attribute("n_gorder");
356  _buf_window->init_done();
357 
358  _p_gorder.clear();
359 }
360 
361 // Determine # of ghost nodes to receive and map (P,N) to ghost node ids
362 // Also determine # ghost elements of each type to receive
365  vector<pane_i_vector> &recv_info,
366  vector<vector<int> > &elem_renumbering,
367  vector<vector<map<pair<int,int>,int> > > &nodes_to_recv){
368 
369  map<pair<int,int>,int>::iterator pos1, pos2;
370 
371  elem_renumbering.resize(_npanes);
372  nodes_to_recv.resize(_npanes);
373 
374  for(int i=0; i<_npanes; ++i){
375 
376  int n_real_nodes = _panes[i]->size_of_real_nodes();
377  int next_node_id = n_real_nodes + 1;
378  int comm_npanes = recv_info[i].size();
379  elem_renumbering[i].resize((int)Connectivity::TYPE_MAX_CONN+1,0);
380  nodes_to_recv[i].resize(comm_npanes);
381 
382  for(int j=0; j< comm_npanes; ++j){
383 
384  int recv_size = recv_info[i][j].size();
385  int index = 0;
386 
387  while(index < recv_size){
388  int type = recv_info[i][j][index];
389  int nnodes = Connectivity::size_of_nodes_pe(type);
390  ++elem_renumbering[i][type+1];
391 
392  // Examine element's nodes, labeling those seen for the first time.
393  for(int k=1; k<=2*nnodes; k+=2){
394 
395  int P = recv_info[i][j][index+k];
396  int N = recv_info[i][j][index+k+1];
397 
398  pos1 =
399  nodes_to_recv[i][j].find(make_pair(P,N));
400 
401  pos2 =
402  _local_nodes[i].find(make_pair(P,N));
403 
404  // If we haven't seen this node at all, give it an id, otherwise
405  // use the existing id
406  int cur_node_id = next_node_id;
407  if(pos2 == _local_nodes[i].end())
408  _local_nodes[i].insert(make_pair(make_pair(P,N),next_node_id++));
409  else
410  cur_node_id = pos2->second;
411 
412  // If we haven't seen this node from the current adjacent pane,
413  // and we don't have a local real copy of the node, then
414  // remember to receive it
415 
416  if(pos1 == nodes_to_recv[i][j].end()
417  && cur_node_id > n_real_nodes)
418  nodes_to_recv[i][j].insert(make_pair(make_pair(P,N),cur_node_id));
419  }
420  index += 2*nnodes+1;
421  }
422  }
423  }
424 }
425 
426 // Take the data we've collected and turn it into the pconn
427 // Remember that there are 5 blocks in the pconn:
428 // 1) Shared node info - already exists
429 // 2) Real Nodes to Send
430 // Data in nodes_to_send.. should be in correct order
431 // 3) Ghost Nodes to Receive
432 // Data in nodes_to_recv
433 // 4) Real Cells to Send
434 // In elems_to_send
435 // 5) Ghost Cells to Receive
436 // Combine elem_renumbering with recv_info
437 //
438 // Also need to calculate the connectivity tables for the
439 // new ghost elements. Do this while looking through recv_info
440 // for GCR
441 
443 finalize_pconn(vector<vector<map<pair<int,int>,int> > > &nodes_to_send,
444  vector<vector<map<pair<int,int>,int> > > &nodes_to_recv,
445  vector<vector<deque<int> > > &elems_to_send,
446  vector<vector<int> > &elem_renumbering,
447  vector<pane_i_vector> &recv_info){
448 
449  map<pair<int,int>,int>::iterator rns_pos,gnr_pos;
450  vector<vector<int> > node_pos;
451 
452  // Buff for #elmts to recv from each incident pane
453  vector<vector<int> > n_elem;
454 
455  // Get the name of the buffer window
456  string buf_name(_buf_window->name());
457 
458  // Save ptrs to conn tables so we don't have to look up
459  // as each element's connectivity is registered
460  vector<vector<int*> > conn_ptr(_npanes);
461 
462  node_pos.resize(_npanes);
463  n_elem.resize(_npanes);
464 
465  // Determine buffer space required:
466  // 1 (#comm panes) + 2 per adj pane (comm pane id and #entities)
467  // + total #entries in entity list (ie node lists for nodes to receive)
468  for(int i=0; i< _npanes; ++i){
469 
470  int gcr_size =1, rcs_size =1, gnr_size =1, rns_size = 1;
471  int n_comm_panes = _cpanes[i].size();
472  int pane_id = _panes[i]->id();
473 
474  // Real nodes to send
475  for(int j=0, nj = nodes_to_send[i].size(); j<nj; ++j)
476  rns_size += 2+nodes_to_send[i][j].size();
477 
478  // Ghost nodes to receive
479  for(int j=0, nj = nodes_to_recv[i].size(); j<nj; ++j)
480  gnr_size += 2+nodes_to_recv[i][j].size();
481 
482  // Real cells to send
483  for(int j=0, nj = (int)elems_to_send[i].size(); j<nj; ++j)
484  rcs_size += 2+elems_to_send[i][j].size();
485 
486  // Ghost cells to receive
487  n_elem[i].resize(n_comm_panes,0);
488  for(int j=0, nj = (int)_cpanes[i].size(); j<nj; ++j){
489  gcr_size += 2;
490  for(int ind=0, size = (int)recv_info[i][j].size();
491  ind < size;
492  ind += 1+2*Connectivity::size_of_nodes_pe(recv_info[i][j][ind])){
493  gcr_size++;
494  n_elem[i][j]++;
495  }
496  }
497 
498  node_pos[i].assign(elem_renumbering[i].begin(),elem_renumbering[i].end());
499 
500  // Make room for pointers to all potential connectivity tables
501  conn_ptr[i].resize(Connectivity::TYPE_MAX_CONN,NULL);
502 
503  // Resize connectivity tables
504  for(int j=0; j<Connectivity::TYPE_MAX_CONN; ++j){
505 
506  int nelems = elem_renumbering[i][j+1];
507  elem_renumbering[i][j+1] += elem_renumbering[i][j];
508 
509  if(nelems >0){
510 
511  const string conn_name = _etype_str[j]+"virtual";
512 
513  int nnodes = Connectivity::size_of_nodes_pe(j);
514 
515  // Resize connectivity table and keep a pointer to its buffer
516  void* addr;
517  _buf_window->set_size(conn_name.c_str(), pane_id, nelems,nelems);
518  _buf_window->resize_array(conn_name.c_str(), pane_id, &addr,nnodes,nelems);
519 
520  conn_ptr[i][j] = (int*)addr;
521  COM_assertion_msg(addr!= NULL, "Could not allocate space for connectivity table");
522  }
523  }
524 
525  // Resize pconn
526  Attribute *pconn = NULL;
527 
528  pconn = _panes[i]->attribute(COM::COM_PCONN);
529  int rsize = pconn->size_of_real_items();
530  int gsize = rns_size + gnr_size + rcs_size + gcr_size;
531 
532  void *addr;
533  _buf_window->set_size("pconn", pane_id, rsize+gsize,gsize);
534  _buf_window->resize_array("pconn",pane_id,&addr);
535 
536  pconn = _panes[i]->attribute(COM::COM_PCONN);
537 
538  int* pconn_ptr = (int*)addr;
539 
540  int rns_ind = rsize;
541  int gnr_ind = rns_ind + rns_size;
542  int rcs_ind = gnr_ind + gnr_size;
543  int gcr_ind = rcs_ind + rcs_size;
544 
545  // each block begins w/ # of communicating blocks
546  pconn_ptr[rns_ind++] = n_comm_panes;
547  pconn_ptr[gnr_ind++] = n_comm_panes;
548  pconn_ptr[rcs_ind++] = n_comm_panes;
549  pconn_ptr[gcr_ind++] = n_comm_panes;
550 
551  // Offset to start of ghost element_ids
552  int real_offset = _panes[i]->size_of_real_elements()+1;
553 
554  // My current implementation only sends nodes to panes w/ a ghost
555  // copy of a local element, so there are the same number of communicating
556  // panes in each pconn block.
557  // If the code is generalized in the future, there may need to be
558  // separate loops for some pconn blocks.
559  for(int j=0; j <n_comm_panes; ++j){
560 
561  // Write communicating-pane id to buffer
562  int comm_pane_id = _cpanes[i][j];
563  pconn_ptr[rns_ind++] = comm_pane_id;
564  pconn_ptr[gnr_ind++] = comm_pane_id;
565  pconn_ptr[rcs_ind++] = comm_pane_id;
566  pconn_ptr[gcr_ind++] = comm_pane_id;
567 
568  // Write number of enties to buffer
569  pconn_ptr[rns_ind++] = nodes_to_send[i][j].size();
570  pconn_ptr[gnr_ind++] = nodes_to_recv[i][j].size();
571  pconn_ptr[rcs_ind++] = elems_to_send[i][j].size();
572  pconn_ptr[gcr_ind++] = n_elem[i][j];
573 
574  // Write entities to ghost pconn buffers
575  for(rns_pos = nodes_to_send[i][j].begin();
576  rns_pos != nodes_to_send[i][j].end(); ++rns_pos)
577  pconn_ptr[rns_ind++] = rns_pos->second;
578 
579  for(gnr_pos = nodes_to_recv[i][j].begin();
580  gnr_pos != nodes_to_recv[i][j].end(); ++gnr_pos)
581  pconn_ptr[gnr_ind++] = gnr_pos->second;
582 
583  for(int k=0, nk = (int)elems_to_send[i][j].size(); k<nk; ++k)
584  pconn_ptr[rcs_ind++] = elems_to_send[i][j][k];
585 
586  // The GCR block is more complicated because we want all ghost elements
587  // of a single type to have contiguous element ids, which is required
588  // by Roccom if we want to register one connectivity table per type
589  int recv_size = recv_info[i][j].size();
590  int index = 0;
591  while(index < recv_size){
592 
593  int elem_type = recv_info[i][j][index];
594  int nnodes = Connectivity::size_of_nodes_pe(elem_type);
595 
596  // id offset within the correct connectivity table
597  int conn_offset = node_pos[i][elem_type]++;
598 
599  pconn_ptr[gcr_ind++] = real_offset + elem_renumbering[i][elem_type]++;
600 
601  // Write out ghost element's nodes
602  for(int k=1; k <= 2*nnodes; k+=2){
603 
604  map<pair<int,int>,int>::iterator pos;
605  pos = _local_nodes[i].
606  find(make_pair(recv_info[i][j][index+k],
607  recv_info[i][j][index+k+1]));
608  COM_assertion(pos != _local_nodes[i].end());
609  conn_ptr[i][elem_type][nnodes*conn_offset+(k-1)/2] = pos->second;
610  }
611 
612  index += 2*nnodes+1;
613  }
614  }
615 
616  int new_size = _local_nodes[i].size();
617  int new_gsize = new_size - _panes[i]->size_of_real_nodes();
618 
619  // 1) Extend nodal coords to accommodate ghost nodes
620  _buf_window->
621  set_size("nc", pane_id, new_size, new_gsize);
622 
623  _buf_window->resize_array("nc", pane_id, &addr,3);
624 
625  }
626  // 4) Update ghost nodal coordinates using newly constructed pconn
627  MAP::Rocmap::update_ghosts(_buf_window->attribute(COM::COM_NC));
628 }
629 
630 // Determine communicating panes for shared nodes. Look through pconn
631 // twice, once to determine the # of communicating panes, and again
632 // to fill in the properly sized vector ..
633 
636 
637  // Resize per-local-pane data structures
638  _cpanes.resize(_npanes);
639 
640  for(int i=0; i < (_npanes); ++i){
641 
642  // Obtain the pconn Attribute of the local pane.
643  const Attribute *pconn = _panes[i]->attribute(COM::COM_PCONN);
644 
645  // Use the pconn offset and get a pointer to pconn data
646  const int *vs =
647  (const int*)pconn->pointer()+MAP::Pane_connectivity::pconn_offset();
648  int vs_size =
649  pconn->size_of_real_items()-MAP::Pane_connectivity::pconn_offset();
650 
651  int n_cpanes = 0;
652  for (int j=0, nj=vs_size; j<nj; j+=vs[j+1]+2) {
653  if (_buf_window->owner_rank( vs[j]) >=0)
654  ++n_cpanes;
655  }
656  _cpanes[i].resize(n_cpanes);
657  int cpane_ind = 0;
658  for (int j=0, nj=vs_size; j<nj; j+=vs[j+1]+2) {
659  if (_buf_window->owner_rank( vs[j]) >=0){
660  _cpanes[i][cpane_ind] = vs[j];
661  ++cpane_ind;
662  }
663  }
664  }
665 }
666 
668 send_gelem_lists(vector<pane_i_vector> &gelem_lists,
669  vector<pane_i_vector> &recv_info,
670  pane_i_vector &comm_sizes){
671 
672  pane_i_vector size_buffer;
673 
674  size_buffer.resize(_npanes);
675 
676  for(int i=0; i < _npanes; ++i)
677  size_buffer[i].resize(_cpanes[i].size(),1);
678 
679  vector<pane_i_vector> send_buffer;
680  send_buffer.resize(_npanes);
681 
682  // Transfer comm_sizes into send_buffer
683  for(int i=0; i < _npanes; ++i){
684  send_buffer[i].resize(_cpanes[i].size());
685  for(int j=0, nj=_cpanes[i].size(); j<nj; ++j){
686  send_buffer[i][j].resize(1);
687  send_buffer[i][j][0] = comm_sizes[i][j];
688  }
689  }
690 
691  // Tell adj. panes how much information I'm sending them
692  send_pane_info(send_buffer,
693  recv_info,
694  size_buffer);
695 
696  // Transfer received sizes into size_buffer
697  for(int i=0; i < _npanes; ++i){
698  for(int j=0, nj=_cpanes[i].size(); j<nj; ++j){
699  size_buffer[i][j] = recv_info[i][j][0];
700  }
701  }
702 
703  // Send pane comm data
704  send_pane_info(gelem_lists,
705  recv_info,
706  size_buffer);
707 }
708 
709 // Send arbitrary amount of data to another pane.
710 // send_info = data to send
711 // recv_info = buffer for receiving data
712 // comm_sizes = amount of data to receive
713 // cpanes = list of communicating panes
715 send_pane_info(vector<pane_i_vector> &send_info,
716  vector<pane_i_vector> &recv_info,
717  pane_i_vector &comm_sizes){
718 
719  const Window::Proc_map &proc_map = _buf_window->proc_map();
720  int total_npanes = proc_map.size();
721  int tag_max = total_npanes*total_npanes;
722  recv_info.resize(_npanes);
723 
724  // Determine a mapping from user-defined pane ids to a set of
725  // internal pane IDs, which are unique and contiguous across all processes
726  // to be used for defining unique tags for MPI messages.
727  map<int,int> lpaneid_map;
728  map<int,int>::const_iterator it=proc_map.begin();
729  for ( int i=0; i<total_npanes; ++i, ++it)
730  lpaneid_map[ it->first] = i;
731 
732  vector<MPI_Request> reqs_send, reqs_recv;
733  int int_size = sizeof(int);
734  MPI_Request req;
735  MPI_Comm mycomm = _buf_window->get_communicator();
736  int myrank = COMMPI_Initialized() ?
737  COMMPI_Comm_rank(mycomm) : 0;
738 
739  // initiate mpi sends and recieves
740  for(int i=0; i< _npanes; ++i){
741 
742  recv_info[i].resize(_cpanes[i].size());
743  int lpid = lpaneid_map.find(_panes[i]->id())->second;
744 
745  for(int j=0, nj = _cpanes[i].size(); j<nj; ++j){
746 
747  recv_info[i][j].resize(comm_sizes[i][j],0);
748 
749  const int lqid = lpaneid_map.find(_cpanes[i][j])->second;
750  int adjrank = _buf_window->owner_rank(_cpanes[i][j]);
751 
752  int stag = 100 + ((lpid > lqid) ?
753  lpid*total_npanes+lqid : lqid*total_npanes+lpid);
754  int rtag = stag;
755  MPI_Comm comm = mycomm;
756 
757  // pane sending to self, copy data directly
758  if(myrank == adjrank && lpid == lqid)
759  memcpy(&send_info[i][j][0], &recv_info[i][j][0],
760  int_size*send_info[i][j].size());
761 
762  // sending to other panes, use COMMPI
763  else{
764  // process sending to self
765  if (myrank == adjrank){
766  // make tags unique
767  if(lpid > lqid)
768  stag += tag_max;
769  else
770  rtag += tag_max;
771  comm = MPI_COMM_SELF;
772  adjrank = 0;
773  }
774 
775  int ierr =
776  COMMPI_Isend(&send_info[i][j][0],int_size*send_info[i][j].size(),
777  MPI_BYTE, adjrank, stag, comm, &req);
778  COM_assertion(ierr==0);
779  reqs_send.push_back(req);
780 
781  ierr = COMMPI_Irecv(&recv_info[i][j][0],int_size*comm_sizes[i][j],
782  MPI_BYTE, adjrank, rtag, comm, &req);
783  COM_assertion(ierr==0);
784  reqs_recv.push_back(req);
785  }
786  }
787  }
788  // wait for MPI communication to finish
789  if(mycomm != MPI_COMM_NULL){
790 
791  int ierr, index;
792  MPI_Status status;
793 
794  while(!reqs_recv.empty()){
795  ierr = MPI_Waitany(reqs_recv.size(),&reqs_recv[0],
796  &index, &status);
797  COM_assertion(ierr == 0);
798  reqs_recv.erase(reqs_recv.begin()+index);
799  }
800 
801  if(reqs_send.size()){
802  ierr = MPI_Waitany(reqs_send.size(),&reqs_send[0],
803  &index, &status);
804  COM_assertion(ierr == 0);
805  reqs_send.erase(reqs_send.begin()+index);
806  }
807  }
808 }
809 
812 
813  _is_shared_node.resize(_npanes);
814 
815  //First, get the list of shared nodes.
816  for(int i=0; i < (int)(_npanes); ++i){
817  // Obtain the pane connectivity of the local pane.
818  const COM::Attribute *pconn = _panes[i]->attribute(COM::COM_PCONN);
819  // Use the pconn offset
820  const int *vs = (const int*)pconn->pointer() +
821  MAP::Pane_connectivity::pconn_offset();
822  int vs_size=pconn->size_of_real_items() -
823  MAP::Pane_connectivity::pconn_offset();
824 
825  _is_shared_node[i].resize(_panes[i]->size_of_real_nodes(),0);
826 
827  // Determine the number of communicating panes for shared nodes.
828  int count=0;
829  for (int j=0, nj=vs_size; j<nj; j+=vs[j+1]+2) {
830  if (_buf_window->owner_rank( vs[j]) >=0) ++count;
831  }
832 
833  int index = 0;
834  // Loop through communicating panes for shared nodes.
835  for ( int j=0; j<count; ++j, index+=vs[index+1]+2) {
836  // We skip the panes that are not in the current window
837  while ( _buf_window->owner_rank(vs[index])<0) {
838  index+=vs[index+1]+2;
839  COM_assertion_msg( index<=vs_size, "Invalid communication map");
840  }
841  // Add shared nodes to the list
842  for(int k=0; k<vs[index+1]; ++k){
843  _is_shared_node[i][vs[index+2+k]-1] = 1;
844  }
845  }
846  }
848 }
849 
851 mark_elems_from_nodes(std::vector<std::vector<bool> > &marked_nodes,
852  std::vector<std::vector<bool> > &marked_elems){
853 
854 
855  marked_elems.clear();
856  marked_elems.resize(_npanes);
857 
858  //Loop through panes
859  for(int i=0; i < (int)(_npanes); ++i){
860 
861  marked_elems[i].clear();
862  marked_elems[i].resize(_panes[i]->size_of_real_elements(),false);
863 
864  // Loop through real elements.
865  // Mark for quality check if they contain shared nodes.
866  int s_real_elems = _panes[i]->size_of_real_elements();
867  std::vector<int> nodes;
868  for(int j=1; j<= s_real_elems; ++j){
869  COM::Element_node_enumerator ene(_panes[i],j);
870  ene.get_nodes(nodes);
871  for(int k=0, nk=nodes.size(); k<nk; ++k){
872  if (marked_nodes[i][nodes[k]-1])
873  marked_elems[i][j-1] = true;
874  }
875  }
876  }
877 }
878 
880 
881 
882 
883 
884 
885 
int COMMPI_Comm_rank(MPI_Comm c)
Definition: commpi.h:162
int COMMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request *request)
Begins a nonblocking receive.
Definition: commpi.C:131
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_BYTE
#define MAP_END_NAMESPACE
Definition: mapbasic.h:29
void send_gelem_lists(vector< vector< vector< int > > > &gelem_lists, vector< vector< vector< int > > > &recv_info, vector< vector< int > > &comm_sizes)
#define COM_assertion(EX)
Error checking utility similar to the assert macro of the C language.
Utility for constructing pane connectivities in parallel.
void process_received_data(vector< vector< vector< int > > > &recv_info, vector< vector< int > > &elem_renumbering, vector< vector< map< pair< int, int >, int > > > &nodes_to_recv)
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_COMM_SELF
std::vector< std::vector< int > > _cpanes
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_GROUP_EMPTY INTEGER MPI_MAX
Contains the prototypes for the Pane object.
An Attribute object is a data member of a window.
Definition: Attribute.h:51
j indices k indices k
Definition: Indexing.h:6
#define COM_assertion_msg(EX, msg)
void finalize_pconn(vector< vector< map< pair< int, int >, int > > > &nodes_to_send, vector< vector< map< pair< int, int >, int > > > &nodes_to_recv, vector< vector< deque< int > > > &elems_to_send, vector< vector< int > > &elem_renumbering, vector< vector< vector< int > > > &recv_info)
string _etype_str[COM::Connectivity::TYPE_MAX_CONN]
void send_pane_info(vector< vector< vector< int > > > &send_info, vector< vector< vector< int > > > &recv_info, vector< vector< int > > &comm_sizes)
Handles communication of shared nodes, ghost nodes or ghost cells across panes.
const int total_npanes
Definition: ex1.C:94
int COMMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request)
Begins a nonblocking send.
Definition: commpi.C:112
vector< vector< int > > _p_gorder
std::vector< std::vector< bool > > _is_shared_elem
const void * pointer() const
Obtain a constant pointer to the physical address.
Definition: Attribute.h:150
MAP_BEGIN_NAMESPACE typedef vector< vector< int > > pane_i_vector
std::vector< std::vector< bool > > _is_shared_node
vector< map< pair< int, int >, int > > _local_nodes
Utility for constructing pane ghost connecvtivities in parallel.
blockLoc i
Definition: read.cpp:79
int size_of_real_items() const
Obtain the number of real items in the attribute.
Definition: Attribute.C:167
void int int * nk
Definition: read.cpp:74
j indices j
Definition: Indexing.h:6
std::map< int, int > Proc_map
Definition: Window.h:61
Size size_of_nodes_pe() const
Get the number of nodes per element of the current connectivity table.
Definition: Connectivity.h:147
void mark_elems_from_nodes(std::vector< std::vector< bool > > &marked_nodes, std::vector< std::vector< bool > > &marked_elems)
#define MAP_BEGIN_NAMESPACE
Definition: mapbasic.h:28
void int * nj
Definition: read.cpp:74
vector< set< set< int > > > pane_i_set_set
int id() const
Obtain the id (or index) of the attribute.
Definition: Attribute.h:120
int COMMPI_Initialized()
Definition: commpi.h:168
vector< int >::iterator i_vector_iter
void get_ents_to_send(vector< vector< vector< int > > > &gelem_lists, vector< vector< map< pair< int, int >, int > > > &nodes_to_send, vector< vector< deque< int > > > &elems_to_send, vector< vector< int > > &comm_sizes)
Determine elements/nodes to be ghosted on adjacent panes.
Contains declaration of the base class for Roccom implementations.