Rocstar  1.0
Rocstar multiphysics simulation application
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
meshtest1.C
Go to the documentation of this file.
1 #include <ctime>
2 
3 #include "Profiler.H"
4 #include "Global.H"
5 #include "FEM.H"
6 #ifdef PMETIS
7 #include "parmetis.h"
8 #else
9 #include "metis.h"
10 #endif
11 
15 class MeshTestComLine : public ComLineObject
16 {
17 public:
18  MeshTestComLine(const char *args[])
19  : ComLineObject(args)
20  {};
21  void Initialize(){
22  AddOption('h',"help");
23  AddOption('p',"partition",2,"number");
24  AddOption('v',"verb",1,"level");
25  AddOption('c',"clone",2,"number");
26  AddOption('a',"assembly");
27  AddOption('m',"mesh");
28  AddOption('d',"debug");
29  AddOption('k',"checking");
30  AddOption('r',"reorient");
31  AddOption('g',"generate");
32  AddOption('s',"sparse");
33  AddOption('t',"metis");
34  AddOption('n',"renumber");
35  AddArgument("input",1);
36  AddHelp("metis","Metis testing stub.");
37  AddHelp("sparse","Write out the sparse matrix for visualization.");
38  AddHelp("clone","Generate <number> of partitions identical to the input mesh.");
39  AddHelp("generate","Generate a uniform mesh with N nodes and quit.");
40  AddHelp("checking","Paranoid and insanely verbose dumping of all important arrays to Log.");
41  AddHelp("partition","Performs Metis partitioning of input mesh into <number> partitions.");
42  AddHelp("assembly","Performs assembly test.");
43  AddHelp("mesh","Performs mesh tests.");
44  AddHelp("help","Prints this long version of help.");
45  AddHelp("verb","Makes the test more verbose. Default level is 1.");
46  AddHelp("config","Specifies the name of the configuration file.");
47  AddHelp("out","Specifies the name of the output file.");
48  AddHelp("renumber","Uses ParMETIS to do optimal graph reordering.");
49  AddArgHelp("input","Mode dependent arguments");
50  std::ostringstream Ostr;
51  // Ostr << "Use fixed problem size in scalability analysis. Only makes"
52  // << "\n\t\tsense when scalability mode is enabled.";
53  // Ostr.str("");
54  Ostr << "Test tool for exercising the mesh library.";
55  _description.assign(Ostr.str());
56  };
57 };
58 
59 
60 void GetStatistics(std::ostream &Ostr,Mesh::Connectivity &con)
61 {
62  Ostr << "Size = " << con.size() << std::endl
63  << "Nelem = " << con.Nelem() << std::endl;
64  double mean = 0.0;
65  Mesh::IndexType max = 0;
66  Mesh::IndexType min = 100000000;
67  Mesh::IndexType minvalue = 10000000;
68  Mesh::IndexType maxvalue = 0;
69  Mesh::IndexType nvalues = 0;
70  double meanvalue = 0.0;
71  for(Mesh::IndexType iii = 0;iii < con.Nelem();iii++){
72  mean += con.Esize(iii+1);
73  if(max < con.Esize(iii+1))
74  max = con.Esize(iii+1);
75  if(min > con.Esize(iii+1))
76  min = con.Esize(iii+1);
77  for(Mesh::IndexType jjj = 0;jjj < con.Esize(iii+1);jjj++){
78  if(minvalue > con[iii][jjj])
79  minvalue = con[iii][jjj];
80  if(maxvalue < con[iii][jjj])
81  maxvalue = con[iii][jjj];
82  meanvalue += con[iii][jjj];
83  nvalues++;
84 
85  }
86  }
87  Ostr << "Esizes(min/max/mean): ("
88  << min << "/" << max << "/"
89  << mean/con.Nelem() << ")"
90  << std::endl
91  << "Flattened Size = " << nvalues
92  << std::endl
93  << "Values(min/max/mean): ("
94  << minvalue << "/"
95  << maxvalue << "/"
96  << meanvalue/nvalues << ")"
97  << std::endl;
98 }
99 
100 // *** Warning ***
101 // This is called *after* the local dof id's have been mapped to the new dof id's and the
102 // info.doffset has been reset to an appropriate value. The NodalDofs array should be the new
103 // global numbers with the doffset applied. We need local dof id's (i.e. 1 - ndof_local) in
104 // order to do the stiffness matrix properly. So we need to determine the new "rank ordering"
105 // (i.e. determined the doffset).
107  Mesh::Connectivity &NodalDofs,
108  Mesh::Connectivity &ElementDofs,
109  Mesh::Connectivity &RemoteNodalDofs,
110  std::vector<Mesh::Border> &borders,
112  std::vector<Mesh::IndexType> &order,
113  Comm::CommunicatorObject &comm,
114  std::ostream *Out,bool verbose)
115 {
116  // First, build and post the receives for the new remote nodal dof id's
117  std::vector<std::vector<Mesh::IndexType> > RcvBuf;
118  unsigned int nborders = borders.size();
119  RcvBuf.resize(nborders);
120  for(unsigned int i = 0;i < nborders;i++){
121  RcvBuf[i].resize(borders[i].remote_dofcount);
122  // Go ahead and post the receives for the new dof id's
123  comm.ARecv<Mesh::IndexType>(RcvBuf[i],borders[i].rpart-1); // async recv
124  if(Out && verbose)
125  *Out << "Posting receive for " << RcvBuf[i].size() << " dofs from "
126  << borders[i].rpart << std::endl;
127  }
128  comm.Barrier();
129  if(Out && verbose)
130  *Out << "All procs receive posted in GlobalDofReMapExchange." << std::endl;
131 
132  // Now form and send the new global dofs for nodes I own
133  // *** Warning ***
134  // This routine assumes that the dofs passed in the NodalDofs array need
135  // to be offset by info.doffset. If this is not the case, make sure that
136  // info.doffset is set to 0 upon entry.
137  std::vector<std::vector<Mesh::IndexType> > SndBuf;
138  SndBuf.resize(nborders);
139  for(unsigned int i = 0;i < nborders;i++){
140  SndBuf[i].resize(0);
141  std::vector<Mesh::IndexType>::iterator rni = borders[i].nrecv.begin();
142  while(rni != borders[i].nrecv.end()){
143  std::vector<Mesh::IndexType>::iterator ldi = NodalDofs[*rni-1].begin();
144  while(ldi != NodalDofs[*rni-1].end()){
145  SndBuf[i].push_back(order[*ldi-1]+1); // + info.doffset? nah
146  ldi++;
147  }
148  rni++;
149  }
150  if(Out && verbose){
151  *Out << "Posting send of " << SndBuf[i].size() << " nodal dofs to "
152  << borders[i].rpart << ":" << std::endl;
153  DumpContents(*Out,SndBuf[i]," ");
154  *Out << std::endl;
155  }
156  // borders[i].recvsize = SndBuf[i].size();
157  comm.ASend<Mesh::IndexType>(SndBuf[i],borders[i].rpart-1); // asynchronous send
158  }
159  comm.WaitAll();
160  comm.ClearRequests();
161  comm.Barrier();
162  if(Out && verbose)
163  *Out << "All procs send posted 2." << std::endl;
164 
165  // Unpack the received global dofs into the RemoteNodalDofs array
166  for(unsigned int i = 0;i < nborders;i++){
167  std::vector<Mesh::IndexType>::iterator rdi = RcvBuf[i].begin();
168  std::vector<Mesh::IndexType>::iterator sni = borders[i].nsend.begin();
169  if(Out && verbose){
170  *Out << "Unpacking recv'd dofs:" << std::endl;
171  DumpContents(*Out,RcvBuf[i]," ");
172  *Out << std::endl;
173  }
174  while(sni != borders[i].nsend.end()){
175  Mesh::IndexType rnindex = *sni - info.nlocal - 1;
176  std::vector<Mesh::IndexType>::iterator ndi = RemoteNodalDofs[rnindex].begin();
177  while(ndi != RemoteNodalDofs[rnindex].end())
178  *ndi++ = *rdi++;
179  sni++;
180  }
181  }
182  comm.Barrier();
183  //
184  // All Nodal Dofs have been updated. Now need to update the communication buffers
185 
186  // Now calculate the indices of the stiffness entries I have for the remote
187  // nodal dofs.
188 
190  Mesh::IndexType number_of_nodes = NodalDofs.size();
191  if(Out && verbose)
192  *Out << "Forming dual connectivity for " << number_of_nodes << " nodes." << std::endl;
193  ec.Inverse(dc,number_of_nodes);
194  if(Out && verbose)
195  *Out << "dual done." << std::endl;
196  // std::vector<std::vector<Mesh::IndexType> > SndBuf(nborders);
197  std::vector<Mesh::IndexType> ssizes(nborders,0);
198  for(unsigned int i = 0;i < nborders;i++){
199  // borders[i].data.SendAp.resize(0);
200  borders[i].data.SendAi.resize(0);
201  // borders[i].data.SendAp.push_back(0);
202  std::vector<Mesh::IndexType>::iterator sni = borders[i].nsend.begin();
203  while(sni != borders[i].nsend.end()){
204  // The list is the same for each border *node*
205  std::list<Mesh::IndexType> doflist;
206  // Count up the dof contributions from every local
207  // element that touches the remote node
208  std::vector<Mesh::IndexType>::iterator ei = dc[*sni-1].begin();
209  while(ei != dc[*sni-1].end()){ // for every element touching the remote node
210  std::vector<Mesh::IndexType>::iterator eni = ec[*ei-1].begin();
211  while(eni != ec[*ei-1].end()){ // loop thru the element's nodes
212  std::vector<Mesh::IndexType>::iterator ndi;
213  std::vector<Mesh::IndexType>::iterator ndend;
214  if(*eni > info.nlocal){ // then it's a remote node
215  ndi = RemoteNodalDofs[*eni - info.nlocal - 1].begin();
216  ndend = RemoteNodalDofs[*eni - info.nlocal -1].end();
217  while(ndi != ndend)
218  doflist.push_back(*ndi++);
219  }
220  else{
221  // offset = info.doffset;
222  ndi = NodalDofs[*eni-1].begin();
223  ndend = NodalDofs[*eni-1].end();
224  while(ndi != ndend){
225  doflist.push_back(order[*ndi-1]+1);
226  ndi++;
227  }
228  }
229  eni++;
230  }
231  eni = ElementDofs[*ei-1].begin();
232  while(eni != ElementDofs[*ei-1].end()){
233  doflist.push_back(order[*eni-1]+1);
234  eni++;
235  }
236  ei++;
237  }
238  doflist.sort();
239  doflist.unique();
240  std::list<Mesh::IndexType>::iterator dli = doflist.begin();
241  while(dli != doflist.end())
242  borders[i].data.SendAi.push_back(*dli++);
243  sni++;
244  }
245  if(Out && verbose){
246  *Out << "SendAp for color " << borders[i].rpart << ":" << std::endl;
247  DumpContents(*Out,borders[i].data.SendAp," ");
248  *Out << std::endl
249  << "SendAi for color " << borders[i].rpart << ":" << std::endl;
250  DumpContents(*Out,borders[i].data.SendAi," ");
251  *Out << std::endl;
252  }
253  }
254  comm.Barrier();
255  if(Out && verbose)
256  *Out << "All procs have remapped SendAi." << std::endl;
257  // By now, the remote processor has sent us an array of sizes for the nz entries
258  // relevant to each local *node*. To get the total size of the stiffness block
259  // that will be sent, we need to multiply by the number of actual dofs living
260  // on the local node. Right now, we are just interested in the dof id's that
261  // we will be receiving/node.
262  for(unsigned int i = 0;i < nborders;i++){
263  // borders[i].data.RecvAp.resize(RcvBuf[i].size()+1);
264  // std::vector<Mesh::IndexType>::iterator rbi = RcvBuf[i].begin();
265  // std::vector<Mesh::IndexType>::iterator rai = borders[i].data.RecvAp.begin();
266  // *rai = 0;
267  // while(rbi != RcvBuf[i].end()){
268  // Mesh::IndexType pv = *rai++;
269  // *rai = pv + *rbi++;
270  // }
271  if(Out && verbose){
272  *Out << "RecvAP from partition " << borders[i].rpart << ":" << std::endl;
273  DumpContents(*Out,borders[i].data.RecvAp," ");
274  *Out << std::endl;
275  }
276  // We can go ahead and post receives for the Ai
277  std::vector<Mesh::IndexType>::reverse_iterator rsi = borders[i].data.RecvAp.rbegin();
278  unsigned int totalsize = *rsi;
279  borders[i].data.RecvAi.resize(totalsize);
280  if(Out && verbose)
281  *Out << "Posting RecvAi (" << borders[i].data.RecvAi.size()
282  << ") from partition" << borders[i].rpart << std::endl;
283  comm.ARecv<Mesh::IndexType>(borders[i].data.RecvAi,borders[i].rpart-1); // async recv
284  // Note how we've already conveniently filled the SendAi, send it out now
285  comm.ASend<Mesh::IndexType>(borders[i].data.SendAi,borders[i].rpart-1); // async recv
286  if(Out && verbose){
287  *Out << "Posting SendAi (" << borders[i].data.SendAi.size()
288  << ") to partition" << borders[i].rpart << ":" << std::endl;
289  DumpContents(*Out,borders[i].data.SendAi," ");
290  *Out << std::endl;
291  }
292  }
293  comm.WaitAll();
294  comm.ClearRequests();
295  comm.Barrier();
296  if(Out && verbose){
297  for(unsigned int i = 0;i < nborders;i++){
298  *Out << "Recieved RecvAi (" << borders[i].data.RecvAi.size()
299  << ") from partition" << borders[i].rpart << ":" << std::endl;
300  DumpContents(*Out,borders[i].data.RecvAi," ");
301  *Out << std::endl;
302  }
303  }
304 }
305 
306 void TestMPI(std::vector<Mesh::Border> &borders,Mesh::Connectivity &NodalDofs,
307  Mesh::Connectivity &RemoteNodalDofs,Mesh::PartInfo &info,
308  Comm::CommunicatorObject &comm,std::ostream *Out,bool verbose)
309 {
310  unsigned int okcount = 0;
311  unsigned int bcount = 0;
312  for(unsigned int ntrial = 0;ntrial < 100;ntrial++){
313  comm.Barrier();
314  if(Out && verbose)
315  *Out << "Inside MPI Test(" << ntrial << ")" << std::endl;
316  // First just exchange the sizes (in number of dofs) for each node
317  //
318  // Build the receive buffers and post the receives
319  // Mesh::IndexType doffset = info.doffset;
320  std::vector<std::vector<Mesh::IndexType> > RcvBuf;
321  unsigned int nborders = borders.size();
322  RcvBuf.resize(nborders);
323  for(unsigned int i = 0;i < nborders;i++){
324  // For each remotely owned node, we will receive one integer indicating the
325  // number of dofs on that remotely owned node
326  RcvBuf[i].resize(borders[i].nsend.size());
327  if(Out && verbose)
328  *Out << "Posting receive for " << borders[i].nsend.size() << " nodes from "
329  << borders[i].rpart << std::endl << std::flush;
330  comm.ARecv<Mesh::IndexType>(RcvBuf[i],borders[i].rpart-1); // async recv
331  }
332  comm.Barrier();
333  if(Out && verbose)
334  *Out << "All procs receives posted." << std::endl;
335  // Build the send buffer for sending the number of dofs on each
336  // node on the partition boundary that I own
337  std::vector<std::vector<Mesh::IndexType> > SndBuf;
338  SndBuf.resize(nborders);
339  for(unsigned int i = 0;i < nborders;i++){
340  SndBuf[i].resize(borders[i].nrecv.size());
341  unsigned int j = 0;
342  std::vector<Mesh::IndexType>::iterator rni = borders[i].nrecv.begin();
343  while(rni != borders[i].nrecv.end()){
344  SndBuf[i][j] = NodalDofs[*rni-1].size();
345  rni++;
346  j++;
347  }
348  if(Out && verbose){
349  *Out << "Posting send of " << borders[i].nrecv.size() << " node's dof sizes to "
350  << borders[i].rpart << std::endl;
351  *Out << "Sending: ";
352  DumpContents(*Out,SndBuf[i]," ");
353  *Out << std::endl << std::flush;
354  }
355  comm.ASend<Mesh::IndexType>(SndBuf[i],borders[i].rpart-1); // asynchronous send
356  }
357  comm.WaitAll();
358  comm.ClearRequests();
359  comm.Barrier();
360  for(unsigned int i = 0;i < nborders;i++){
361  *Out << "Received(" << i+1 << "): ";
362  DumpContents(*Out,RcvBuf[i]," ");
363  *Out << std::endl << std::flush;
364  if(RcvBuf[i][0] == 1)
365  okcount++;
366  else
367  bcount++;
368  }
369  }
370  *Out << "Bad Receives: " << bcount << std::endl << std::flush;
371  comm.Barrier();
372 }
373 
374 void GlobalDofExchange(std::vector<Mesh::Border> &borders,Mesh::Connectivity &NodalDofs,
375  Mesh::Connectivity &RemoteNodalDofs,Mesh::PartInfo &info,
376  Comm::CommunicatorObject &comm,std::ostream *Out,bool verbose)
377 {
378  if(Out && verbose)
379  *Out << "Inside Global Dof Exchange" << std::endl;
380  // First just exchange the sizes (in number of dofs) for each node
381  //
382  // Build the receive buffers and post the receives
383  Mesh::IndexType doffset = info.doffset;
384  std::vector<std::vector<Mesh::IndexType> > RcvBuf;
385  unsigned int nborders = borders.size();
386  RcvBuf.resize(nborders);
387  for(unsigned int i = 0;i < nborders;i++){
388  // For each remotely owned node, we will receive one integer indicating the
389  // number of dofs on that remotely owned node
390  RcvBuf[i].resize(borders[i].nsend.size());
391  if(Out && verbose)
392  *Out << "Posting receive for " << borders[i].nsend.size() << " nodes from "
393  << borders[i].rpart << std::endl;
394  comm.ARecv<Mesh::IndexType>(RcvBuf[i],borders[i].rpart-1); // async recv
395  }
396  comm.Barrier();
397  if(Out && verbose)
398  *Out << "All procs receives posted." << std::endl;
399  // Build the send buffer for sending the number of dofs on each
400  // node on the partition boundary that I own
401  std::vector<std::vector<Mesh::IndexType> > SndBuf;
402  SndBuf.resize(nborders);
403  for(unsigned int i = 0;i < nborders;i++){
404  SndBuf[i].resize(borders[i].nrecv.size());
405  unsigned int j = 0;
406  std::vector<Mesh::IndexType>::iterator rni = borders[i].nrecv.begin();
407  while(rni != borders[i].nrecv.end()){
408  SndBuf[i][j] = NodalDofs[*rni-1].size();
409  rni++;
410  j++;
411  }
412  if(Out && verbose)
413  *Out << "Posting send of " << borders[i].nrecv.size() << " node's dof sizes to "
414  << borders[i].rpart << std::endl;
415  comm.ASend<Mesh::IndexType>(SndBuf[i],borders[i].rpart-1); // asynchronous send
416  }
417  comm.WaitAll();
418  comm.ClearRequests();
419  comm.Barrier();
420 
421  if(Out && verbose)
422  *Out << "All procs sends posted." << std::endl;
423  // Unpack the receive buffer and appropriately size the NodalDof array for
424  // each remote node.
425  std::vector<unsigned int> totalsizes(nborders,0);
426  for(unsigned int i = 0;i < nborders;i++){
427  std::vector<Mesh::IndexType>::iterator sni = borders[i].nsend.begin();
428  unsigned int j = 0;
429  if(Out && verbose){
430  *Out << "Received node sizes (in dofs) from " << borders[i].rpart
431  << ":" << std::endl;
432  DumpContents(*Out,RcvBuf[i]);
433  *Out << std::endl;
434  }
435  while(sni != borders[i].nsend.end()){
436  totalsizes[i] += RcvBuf[i][j];
437 
438  //
439  // For now, we make sure that the number of local dofs
440  // on a remote node are actually the number of remote dofs
441  // on the remote node. This is a consistency check.
442  //
443  assert(NodalDofs[*sni-1].size() == RcvBuf[i][j]);
444 
445  RemoteNodalDofs[*sni-info.nlocal-1].resize(RcvBuf[i][j++]);
446  sni++;
447  }
448  }
449  comm.Barrier();
450  if(Out && verbose)
451  *Out << "All procs node sizes received." << std::endl;
452 
453  // That phase is finished now, we can re-use the send and receive buffers
454  // In the part where we exchange the *actual* global dof numbers
455  for(unsigned int i = 0;i < nborders;i++){
456  RcvBuf[i].resize(totalsizes[i]);
457  borders[i].remote_dofcount = totalsizes[i];
458  // Go ahead and post the receives for the actual dofs
459  comm.ARecv<Mesh::IndexType>(RcvBuf[i],borders[i].rpart-1); // async recv
460  if(Out && verbose)
461  *Out << "Posting receive for " << RcvBuf[i].size() << " dofs from "
462  << borders[i].rpart << std::endl;
463  }
464  comm.Barrier();
465  if(Out && verbose)
466  *Out << "All procs receive posted 2." << std::endl;
467 
468  // Now pack and send off the actual global dof numbers for the
469  // dofs that I own.
470  for(unsigned int i = 0;i < nborders;i++){
471  SndBuf[i].resize(0);
472  std::vector<Mesh::IndexType>::iterator rni = borders[i].nrecv.begin();
473  while(rni != borders[i].nrecv.end()){
474  std::vector<Mesh::IndexType>::iterator ldi = NodalDofs[*rni-1].begin();
475  while(ldi != NodalDofs[*rni-1].end()){
476  SndBuf[i].push_back(*ldi+doffset);
477  ldi++;
478  }
479  rni++;
480  }
481  if(Out && verbose){
482  *Out << "Posting send of " << SndBuf[i].size() << " nodal dofs to "
483  << borders[i].rpart << ":" << std::endl;
484  DumpContents(*Out,SndBuf[i]," ");
485  *Out << std::endl;
486  }
487  borders[i].recvsize = SndBuf[i].size();
488  comm.ASend<Mesh::IndexType>(SndBuf[i],borders[i].rpart-1); // asynchronous send
489  }
490  comm.WaitAll();
491  comm.ClearRequests();
492  comm.Barrier();
493  if(Out && verbose)
494  *Out << "All procs send posted 2." << std::endl;
495 
496  // Unpack the received global dofs into the NodalDofs array
497  for(unsigned int i = 0;i < nborders;i++){
498  std::vector<Mesh::IndexType>::iterator rdi = RcvBuf[i].begin();
499  std::vector<Mesh::IndexType>::iterator sni = borders[i].nsend.begin();
500  if(Out && verbose){
501  *Out << "Unpacking recv'd dofs:" << std::endl;
502  DumpContents(*Out,RcvBuf[i]," ");
503  *Out << std::endl;
504  }
505  while(sni != borders[i].nsend.end()){
506  Mesh::IndexType rnindex = *sni - info.nlocal - 1;
507  std::vector<Mesh::IndexType>::iterator ndi = RemoteNodalDofs[rnindex].begin();
508  while(ndi != RemoteNodalDofs[rnindex].end())
509  *ndi++ = *rdi++;
510  sni++;
511  }
512  }
513  comm.Barrier();
514 
515 }
516 
517 void BuildBorderStiffness(Mesh::Connectivity &ec,std::vector<Mesh::Border> &borders,
518  Mesh::Connectivity &NodalDofs,Mesh::Connectivity &ElementDofs,
519  Mesh::Connectivity &RemoteNodalDofs,
520  Mesh::PartInfo &info,Comm::CommunicatorObject &comm,
521  std::ostream *Out,bool verbose)
522 {
523  unsigned int nborders = borders.size();
524  // For each *node* that I own on the partition boundary, there will be
525  // a number of stiffness entries coming from the remote process. Which is
526  // some <number> * the number of dofs on the local node. This
527  // first communication is to figure out that <number>. The size of the
528  // receive buffer here is the number of local nodes on the boundary.
529  std::vector<std::vector<Mesh::IndexType> > RcvBuf(nborders);
530  std::vector<Mesh::IndexType> rsizes(nborders,0);
531  for(unsigned int i = 0;i < nborders;i++){
532  rsizes[i] = borders[i].nrecv.size();
533  RcvBuf[i].resize(rsizes[i]);
534  if(Out && verbose)
535  *Out << "Posting receives for " << rsizes[i] << " sizes of stiffness entries "
536  << "from " << borders[i].rpart << "." << std::endl;
537  comm.ARecv<Mesh::IndexType>(RcvBuf[i],borders[i].rpart-1); // async recv
538  }
539  comm.Barrier();
540  if(Out && verbose)
541  *Out << "All procs have posted recvs." << std::endl;
542 
543  // Now calculate the *number of* stiffness entries I have for the remote
544  // nodal dofs. The number of them will be the size of the unique list of
545  // local dofs that talk to the remote node. We'll need the dual connectivity
546  // to calculate this, so we build it here on the fly.
548  Mesh::IndexType number_of_nodes = NodalDofs.size();
549  if(Out && verbose)
550  *Out << "Forming dual connectivity for " << number_of_nodes << " nodes." << std::endl;
551  ec.Inverse(dc,number_of_nodes);
552  if(Out && verbose)
553  *Out << "dual done." << std::endl;
554  std::vector<std::vector<Mesh::IndexType> > SndBuf(nborders);
555  std::vector<Mesh::IndexType> ssizes(nborders,0);
556  for(unsigned int i = 0;i < nborders;i++){
557  borders[i].data.SendAp.resize(0);
558  borders[i].data.SendAi.resize(0);
559  borders[i].data.SendAp.push_back(0);
560  std::vector<Mesh::IndexType>::iterator sni = borders[i].nsend.begin();
561  while(sni != borders[i].nsend.end()){
562  // The list is the same for each border *node*
563  std::list<Mesh::IndexType> doflist;
564  // Count up the dof contributions from every local
565  // element that touches the remote node
566  std::vector<Mesh::IndexType>::iterator ei = dc[*sni-1].begin();
567  while(ei != dc[*sni-1].end()){ // for every element touching the remote node
568  std::vector<Mesh::IndexType>::iterator eni = ec[*ei-1].begin();
569  while(eni != ec[*ei-1].end()){ // loop thru the element's nodes
570  std::vector<Mesh::IndexType>::iterator ndi;
571  std::vector<Mesh::IndexType>::iterator ndend;
573  if(*eni > info.nlocal){ // then it's a remote node
574  ndi = RemoteNodalDofs[*eni - info.nlocal - 1].begin();
575  ndend = RemoteNodalDofs[*eni - info.nlocal -1].end();
576  }
577  else{
578  offset = info.doffset;
579  ndi = NodalDofs[*eni-1].begin();
580  ndend = NodalDofs[*eni-1].end();
581  }
582  while(ndi != ndend){ // Loop thru the node's dofs and record them into the doflist
583  doflist.push_back(*ndi + offset);
584  ndi++;
585  }
586  eni++;
587  }
588  eni = ElementDofs[*ei-1].begin();
589  while(eni != ElementDofs[*ei-1].end()){
590  doflist.push_back(*eni+info.doffset);
591  eni++;
592  }
593  ei++;
594  }
595  doflist.sort();
596  doflist.unique();
597  // The row size for one dof is doflist.size(), however, you will be
598  // sending that many entries for every DOF on the remote node - so
599  // the total size is rowsize * ndof. Let the remote processor deal
600  // with that detail.
601  SndBuf[i].push_back(doflist.size());
602  borders[i].data.SendAp.push_back(doflist.size() + *(borders[i].data.SendAp.rbegin()));
603  std::list<Mesh::IndexType>::iterator dli = doflist.begin();
604  while(dli != doflist.end())
605  borders[i].data.SendAi.push_back(*dli++);
606  sni++;
607  }
608  if(Out && verbose){
609  *Out << "SendAp for color " << borders[i].rpart << ":" << std::endl;
610  DumpContents(*Out,borders[i].data.SendAp," ");
611  *Out << std::endl
612  << "SendAi for color " << borders[i].rpart << ":" << std::endl;
613  DumpContents(*Out,borders[i].data.SendAi," ");
614  *Out << std::endl;
615  *Out << "Posting send of " << SndBuf[i].size() << " stiffness row sizes "
616  << "to " << borders[i].rpart << ":" << std::endl;
617  DumpContents(*Out,SndBuf[i]," ");
618  *Out << std::endl;
619  }
620  comm.ASend<Mesh::IndexType>(SndBuf[i],borders[i].rpart-1); // async recv
621  }
622  comm.WaitAll();
623  if(verbose){
624  *Out << "Receive Buffer: " << std::endl;
625  for(unsigned int i = 0;i < nborders;i++){
626  *Out << "Partition " << i+1 << ": ";
627  DumpContents(*Out,RcvBuf[i]," ");
628  *Out << std::endl;
629  }
630  }
631  comm.ClearRequests();
632  comm.Barrier();
633  if(Out && verbose)
634  *Out << "All procs have posted sends." << std::endl;
635  // By now, the remote processor has sent us an array of sizes for the nz entries
636  // relevant to each local *node*. To get the total size of the stiffness block
637  // that will be sent, we need to multiply by the number of actual dofs living
638  // on the local node. Right now, we are just interested in the dof id's that
639  // we will be receiving/node.
640  for(unsigned int i = 0;i < nborders;i++){
641  borders[i].data.RecvAp.resize(RcvBuf[i].size()+1);
642  std::vector<Mesh::IndexType>::iterator rbi = RcvBuf[i].begin();
643  std::vector<Mesh::IndexType>::iterator rai = borders[i].data.RecvAp.begin();
644  *rai = 0;
645  while(rbi != RcvBuf[i].end()){
646  Mesh::IndexType pv = *rai++;
647  *rai = pv + *rbi++;
648  }
649  if(Out && verbose){
650  *Out << "RecvAP from partition " << borders[i].rpart << ":" << std::endl;
651  DumpContents(*Out,borders[i].data.RecvAp," ");
652  *Out << std::endl;
653  }
654  // We can go ahead and post receives for the Ai
655  // This is not the way to determine total size any more, we've changed our
656  // Ap array to the traditional representation [0 ..... nentries_Ai].
657  // while(rsi != borders[i].data.RecvAp.end())
658  // totalsize += *rsi++;
659  // Now we just need the last value
660  std::vector<Mesh::IndexType>::reverse_iterator rsi = borders[i].data.RecvAp.rbegin();
661  unsigned int totalsize = *rsi;
662  borders[i].data.RecvAi.resize(totalsize);
663  if(Out && verbose)
664  *Out << "Posting RecvAi (" << borders[i].data.RecvAi.size()
665  << ") from partition" << borders[i].rpart << std::endl;
666  comm.ARecv<Mesh::IndexType>(borders[i].data.RecvAi,borders[i].rpart-1); // async recv
667  // Note how we've already conveniently filled the SendAi, send it out now
668  comm.ASend<Mesh::IndexType>(borders[i].data.SendAi,borders[i].rpart-1); // async recv
669  if(Out && verbose){
670  *Out << "Posting SendAi (" << borders[i].data.SendAi.size()
671  << ") to partition" << borders[i].rpart << ":" << std::endl;
672  DumpContents(*Out,borders[i].data.SendAi," ");
673  *Out << std::endl;
674  }
675  }
676  comm.WaitAll();
677  comm.ClearRequests();
678  comm.Barrier();
679  if(Out && verbose){
680  for(unsigned int i = 0;i < nborders;i++){
681  *Out << "Recieved RecvAi (" << borders[i].data.RecvAi.size()
682  << ") from partition" << borders[i].rpart << ":" << std::endl;
683  DumpContents(*Out,borders[i].data.RecvAi," ");
684  *Out << std::endl;
685  }
686  }
687 }
688 
689 unsigned int AllocateCommBuffers(std::vector<Mesh::Border> &borders,
690  Mesh::Connectivity &NodalDofs,std::ostream *Out,bool verbose)
691 {
692  unsigned int nborders = borders.size();
693  unsigned int nalloc = 0;
694  for(unsigned int i = 0;i < nborders;i++){
695  unsigned int nval_send = 0;
696  unsigned int nval_recv = 0;
697  std::vector<Mesh::IndexType>::iterator ni = borders[i].nrecv.begin();
698  std::vector<Mesh::IndexType>::iterator Api = borders[i].data.RecvAp.begin();
699  while(ni != borders[i].nrecv.end()){
700  Mesh::IndexType ndof = NodalDofs[*ni - 1].size();
701  Mesh::IndexType begindex = *Api++;
702  nval_recv += ((*Api-begindex) * ndof);
703  // Api++;
704  ni++;
705  }
706  if(Out && verbose)
707  *Out << "Allocating buffer(" << nval_recv << ") for receiving stiffness blocks"
708  << " from " << borders[i].rpart << "." << std::endl;
709  borders[i].data.RecvBuffer.resize(nval_recv,0.0);
710  borders[i].data.RecvBuffer.swap(borders[i].data.RecvBuffer);
711  nalloc += nval_recv;
712  ni = borders[i].nsend.begin();
713  Api = borders[i].data.SendAp.begin();
714  // because the first val is 0
715  while(ni != borders[i].nsend.end()){
716  Mesh::IndexType ndof = NodalDofs[*ni - 1].size();
717  Mesh::IndexType begindex = *Api++;
718  nval_send += ((*Api-begindex) * ndof);
719  // Api++;
720  ni++;
721  }
722  if(Out && verbose)
723  *Out << "Allocating buffer(" << nval_send << ") for sending stiffness blocks"
724  << " to " << borders[i].rpart << "." << std::endl;
725  borders[i].data.SendBuffer.resize(nval_send,1.0);
726  borders[i].data.SendBuffer.swap(borders[i].data.SendBuffer);
727  nalloc+=nval_send;
728  }
729  return(nalloc);
730 }
731 
732 int RegisterCommBuffers(std::vector<Mesh::Border> &borders,Comm::CommunicatorObject &comm)
733 {
734  unsigned int nborders = borders.size();
735  for(unsigned int i = 0;i < nborders;i++)
736  {
737  if(comm.SetSend(borders[i].data.SendBuffer,borders[i].rpart-1) < 0)
738  return 1;
739  if(comm.SetRecv(borders[i].data.RecvBuffer,borders[i].rpart-1) < 0)
740  return 1;
741  }
742  return(0);
743 }
744 
745 
747  Mesh::IndexType &nglobaldof,Mesh::IndexType &nremotedof)
748 {
749  Mesh::IndexType number_of_elements = ElementDofs.size();
750  Mesh::IndexType number_of_nodes = NodalDofs.size();
751  nglobaldof = 0;
752  nremotedof = 0;
753  for(Mesh::IndexType pp = 0;pp < number_of_elements;pp++){
754  nglobaldof += ElementDofs[pp].size();
755  }
756  for(Mesh::IndexType pp = 0;pp < number_of_nodes;pp++){
757  if((pp+1) <= info.nlocal)
758  nglobaldof += NodalDofs[pp].size();
759  else
760  nremotedof += NodalDofs[pp].size();
761  }
762 }
763 
764 int
765 main(int argc,char *argv[])
766 {
767  std::ostream* StdOut = NULL; // program output
768  std::ostream* ErrOut = NULL; // program errors
769  std::ostream* LogOut = NULL; // log for each proc (if debugging on)
770  std::ofstream LogFile;
771  Global::GlobalObj<std::string,Mesh::IndexType,Profiler::ProfilerObj> global;
772  Comm::CommunicatorObject comm(&argc,&argv);
773  unsigned int rank = comm.Rank();
774  unsigned int nproc = comm.Size();
775  global.Init("MeshTest",rank);
776 
777  if(rank == 0){
778  StdOut = &std::cout;
779  ErrOut = &std::cerr;
780  }
781  global.SetDebugLevel(0);
782  global.FunctionEntry("main");
783  MeshTestComLine comline((const char **)argv);
784  comline.Initialize();
785  int clerr = comline.ProcessOptions();
786  std::string sverb = comline.GetOption("verb");
787  std::string spart = comline.GetOption("partition");
788  std::string sclone = comline.GetOption("clone");
789  bool do_partitioning = !comline.GetOption("partition").empty();
790  bool do_mesh_testing = !comline.GetOption("mesh").empty();
791  bool do_assembly = !comline.GetOption("assembly").empty();
792  bool do_reorient = !comline.GetOption("reorient").empty();
793  bool debug = !comline.GetOption("debug").empty();
794  bool array_checking = !comline.GetOption("checking").empty();
795  bool do_generate = !comline.GetOption("generate").empty();
796  bool do_clone = !comline.GetOption("clone").empty();
797  bool do_dump_sparse = !comline.GetOption("sparse").empty();
798  // bool do_metis_test = !comline.GetOption("metis").empty();
799  bool do_renumbering = !comline.GetOption("renumber").empty();
800  Comm::DataTypes IndexDT = (sizeof(Mesh::IndexType) == sizeof(size_t) ?
801  Comm::DTSIZET : Comm::DTUINT);
802  if(!comline.GetOption("help").empty()){
803  if(StdOut)
804  *StdOut << comline.LongUsage() << std::endl;
805  comm.SetExit(1);
806  }
807  if(comm.Check())
808  return(0);
809 
810  if(clerr){
811  if(ErrOut)
812  *ErrOut << comline.ErrorReport() << std::endl
813  << std::endl << comline.ShortUsage() << std::endl;
814  comm.SetExit(1);
815  }
816  if(comm.Check())
817  return(1);
818  int verblevel = 0;
819  if(!sverb.empty()){
820  verblevel = 1;
821  if(sverb != ".true."){
822  std::istringstream Istr(sverb);
823  Istr >> verblevel;
824  }
825  }
826  Mesh::IndexType npartitions = 0;
827  if(!spart.empty() && spart != ".true."){
828  std::istringstream Istr(spart);
829  Istr >> npartitions;
830  }
831  if(!sclone.empty() && sclone != ".true."){
832  std::istringstream Istr(sclone);
833  Istr >> npartitions;
834  }
835  if(npartitions <= 0)
836  npartitions = 2;
837  if(nproc > 1){
838  std::ostringstream debugfilename;
839  debugfilename << comline.ProgramName() << ".output." << rank;
840  LogFile.open(debugfilename.str().c_str());
841  global.SetDebugStream(LogFile);
842  LogOut = &LogFile;
843  ErrOut = &LogFile;
844  if(verblevel > 1 && rank > 0)
845  StdOut = &LogFile;
846  }
847  else{
848  global.SetDebugStream(std::cerr);
849  LogOut=&std::cout;
850  ErrOut=&std::cout;
851  }
852  if(verblevel > 1 || debug){
853  global.SetDebugLevel(1);
854  }
855  if(debug && rank > 0)
856  StdOut = LogOut;
857  std::vector<std::string> infiles = comline.GetArgs();
858  if(infiles.size()==0) {
859  if(ErrOut)
860  *ErrOut << "MeshTest::Error: No input specified." << std::endl
861  << std::endl << comline.ShortUsage() << std::endl;
862  comm.SetExit(1);
863  }
864  if(comm.Check())
865  return(1);
866 
867  if(do_generate){
868  std::string MeshName;
869  if(!infiles.empty())
870  MeshName = infiles[0];
871  unsigned int N = 10;
872  if(infiles.size() > 1){
873  std::istringstream Instr(infiles[1]);
874  Instr >> N;
875  }
876  if(N < 2)
877  N = 2;
878  Mesh::IndexType number_of_nodes = N*N*N;
879  Mesh::IndexType number_of_elements = (N-1)*(N-1)*(N-1);
881  ec.resize(number_of_elements);
882  if(StdOut && verblevel)
883  *StdOut << "Creating cube mesh with " << number_of_nodes << " nodes, "
884  << number_of_elements << " elements." << std::endl;
885  Mesh::NodalCoordinates nc(number_of_nodes);
886  if(LogOut && debug)
887  *LogOut << "NC.size() = " << nc.Size() << std::endl;
888  Mesh::IndexType node_id = 1;
889  Mesh::IndexType element_id = 1;
890  for(unsigned int k = 0;k < N;k++){
891  double z = k;
892  for(unsigned int j = 0;j < N;j++){
893  double y = j;
894  for(unsigned int i = 0;i < N;i++){
895  double x = i;
896  nc.x(node_id) = x;
897  nc.y(node_id) = y;
898  nc.z(node_id++) = z;
899  if(k < (N-1) && j < (N-1) && i < (N-1)){
900  Mesh::IndexType ulnode = k*N*N + j*N + i%N + 1;
901  ec[element_id-1].push_back(ulnode);
902  ec[element_id-1].push_back(ulnode+N);
903  ec[element_id-1].push_back(ulnode+N+1);
904  ec[element_id-1].push_back(ulnode+1);
905  ec[element_id-1].push_back(ulnode+N*N);
906  ec[element_id-1].push_back(ulnode+N*N+N);
907  ec[element_id-1].push_back(ulnode+N*N+N+1);
908  ec[element_id-1].push_back(ulnode+N*N+1);
909  element_id++;
910  }
911  }
912  }
913  }
914  std::ofstream Ouf;
915  Ouf.open(MeshName.c_str());
916  Ouf << nc << std::endl << ec << std::endl;
917  Ouf.close();
918  }
919 
920 
921  if(do_mesh_testing){
922  // Mesh::MeshObject mesh;
923  std::ifstream Inf;
924  std::string MeshName;
925  if(!infiles.empty()){
926  MeshName = infiles[0];
927  }
928  std::ostringstream Ostr;
929  Ostr << MeshName;
930  if(nproc > 1)
931  Ostr << "." << rank+1 << ".pmesh";
932  Inf.open(Ostr.str().c_str());
933  if(!Inf){
934  if(ErrOut)
935  *ErrOut << "Failed to open " << Ostr.str() << std::endl;
936  comm.SetExit(1);
937  }
938  if(comm.Check())
939  return(1);
940 
941  Mesh::IndexType number_of_nodes = 0;
943  global.FunctionEntry("ReadCoord");
944  Inf >> nc;
945  global.FunctionExit("ReadCoord");
946  number_of_nodes = nc.Size();
947  if(StdOut && verblevel)
948  *StdOut << "Number of nodes: " << number_of_nodes << std::endl;
949  std::vector<double> bounds(6,0.0);
950  Mesh::GetCoordinateBounds(nc,bounds);
951  if(StdOut && verblevel)
952  *StdOut << "Mesh Bounds: (" << bounds[0] << "," << bounds[1]
953  << ") (" << bounds[2] << "," << bounds[3]
954  << ") (" << bounds[4] << "," << bounds[5] << ")" << std::endl;
955  Mesh::Connectivity econ;
956  global.FunctionEntry("ReadCon");
957  Inf >> econ;
958  global.FunctionExit("ReadCon");
959  Inf.close();
960  Mesh::IndexType number_of_elements = econ.Nelem();
961  if(StdOut && verblevel)
962  *StdOut << "Number of elements: " << number_of_elements
963  << std::endl;
964  Mesh::IndexType N = 0;
965  Mesh::IndexType N2 = 0;
966  if(infiles.size() > 1){
967  std::istringstream Istr(infiles[1]);
968  Istr >> N;
969  }
970  if(infiles.size() > 2){
971  std::istringstream Istr(infiles[2]);
972  Istr >> N2;
973  }
974  if(false){
975  Mesh::Connectivity::iterator ci = econ.begin();
976  Mesh::IndexType el = 1;
977  while(ci != econ.end()){
978  if(StdOut && verblevel)
979  *StdOut << "Element " << el++ << ": (";
980  std::vector<Mesh::IndexType>::iterator ni = ci->begin();
981  while(ni != ci->end()){
982  if(StdOut && verblevel)
983  *StdOut << *ni++;
984  if(ni != ci->end())
985  if(StdOut && verblevel)
986  *StdOut << ",";
987  }
988  if(StdOut && verblevel)
989  *StdOut << ")\n";
990  ci++;
991  }
992  }
994  global.FunctionEntry("DualCon");
995  econ.Inverse(dc,number_of_nodes);
996  global.FunctionExit("DualCon");
997  // Mesh::NeighborHood nl;
998  // global.FunctionEntry("Neighborhood");
999  // con.GetNeighborhood(nl,dc);
1000  // global.FunctionExit("Neighborhood");
1001  bool do_neighborhood = false;
1002  if(do_neighborhood){
1003  Mesh::Connectivity nl2;
1004  // Neighborhood populates an array of neighbors
1005  // for each element. A neighbor is any element
1006  // that shares a node with a given element.
1007  global.FunctionEntry("Neighborhood2");
1008  econ.GetNeighborhood(nl2,dc);
1009  global.FunctionExit("Neighborhood2");
1010 
1011 
1012  // std::vector<std::list<Mesh::IndexType> > element_neighbors;
1013  // global.FunctionEntry("CreateAdjEl Template");
1014  // Mesh::AdjEList<std::vector<std::vector<Mesh::IndexType> >,
1015  // std::vector<Mesh::IndexType> >
1016  // (element_neighbors,dc,number_of_elements);
1017  // global.FunctionExit("CreateAdjEl Template");
1018  // Mesh::IndexType nedges =
1019  // Mesh::NumberOfEdges< std::vector<std::list<Mesh::IndexType> >,
1020  // std::list<Mesh::IndexType> >(anodelist);
1021 
1022  if(false){
1023  // Mesh::NeighborHood::iterator ei = nl.begin();
1024  // Mesh::IndexType el = 1;
1025  // while(ei != nl.end()){
1026  // if(StdOut && verblevel) *StdOut << "Element " << el++ << " neighbors: ";
1027  // std::set<Mesh::IndexType>::iterator ni = ei->begin();
1028  // while(ni != ei->end()){
1029  // if(StdOut && verblevel) *StdOut << *ni++;
1030  // if(ni != ei->end())
1031  // if(StdOut && verblevel) *StdOut << ",";
1032  // }
1033  // if(StdOut && verblevel) *StdOut << std::endl;
1034  // ei++;
1035  // }
1036  // std::vector<std::list<Mesh::IndexType> >::iterator eni =
1037  // element_neighbors.begin();
1038  // Mesh::IndexType el = 1;
1039  // while(eni != element_neighbors.end()){
1040  // if(StdOut && verblevel) *StdOut << "Element " << el++ << " neighbors: ";
1041  // std::list<Mesh::IndexType>::iterator ni = eni->begin();
1042  // while(ni != eni->end()){
1043  // if(StdOut && verblevel) *StdOut << *ni++;
1044  // if(ni != eni->end())
1045  // if(StdOut && verblevel) *StdOut << ",";
1046  // }
1047  // if(StdOut && verblevel) *StdOut << std::endl;
1048  // eni++;
1049  // }
1050  Mesh::Connectivity::iterator ei2 = nl2.begin();
1051  Mesh::IndexType el = 1;
1052  while(ei2 != nl2.end()){
1053  if(StdOut && verblevel)
1054  *StdOut << "Element " << el++ << " neighbors: ";
1055  std::vector<Mesh::IndexType>::iterator ni = ei2->begin();
1056  while(ni != ei2->end()){
1057  if(StdOut && verblevel)
1058  *StdOut << *ni++;
1059  if(ni != ei2->end())
1060  if(StdOut && verblevel)
1061  *StdOut << ",";
1062  }
1063  if(StdOut && verblevel)
1064  *StdOut << std::endl;
1065  ei2++;
1066  }
1067  }
1068  }
1069  // Container Orientation Test
1070  if(false) {
1071  global.FunctionEntry("List Creation");
1072  std::vector<Mesh::IndexType> a;
1073  std::vector<Mesh::IndexType> b;
1074  a.resize(N);
1075  b.resize(N);
1076  for(Mesh::IndexType j = 1;j <= N;j++){
1077  a[j-1] = j;
1078  b[N-j] = (j+100)%N + 1;
1079  }
1080  global.FunctionExit("List Creation");
1081  global.FunctionEntry("Same Orientation");
1082  bool same_orientation = HaveSameOrientation(a,b);
1083  global.FunctionExit("Same Orientation");
1084  global.FunctionEntry("Opp Orientation");
1085  bool opp_orientation = HaveOppositeOrientation(a,b);
1086  global.FunctionExit("Opp Orientation");
1087  if(StdOut && verblevel)
1088  *StdOut << "Lists did " << (same_orientation ? "" : "not ")
1089  << "have same orientation.\n"
1090  << "Lists did " << (opp_orientation ? "" : "not ")
1091  << "have opposite orientation.\n";
1092  }
1093  global.FunctionEntry("Orient elements");
1094  Mesh::IndexType invertedcount = 0;
1095  for(Mesh::IndexType element_being_processed = 1;
1096  element_being_processed <= number_of_elements;
1097  element_being_processed++)
1098  {
1099  Mesh::IndexType index = element_being_processed - 1;
1100  Mesh::IndexType size_of_element = econ[index].size();
1101  Mesh::GenericElement ge(size_of_element);
1102  if(ge.Inverted(econ[index],nc)){
1103  invertedcount++;
1104  ge.ReOrient(econ[index]);
1105  }
1106  if(!ge.ShapeOK(econ[index],nc))
1107  *StdOut << "Element " << element_being_processed
1108  << " has bad shape!" << std::endl;
1109  }
1110  global.FunctionExit("Orient elements");
1111  nc.destroy();
1112  if(StdOut && verblevel)
1113  *StdOut << "Number of elements reoriented: " << invertedcount
1114  << std::endl;
1115  global.FunctionEntry("All Face Processing");
1116  Mesh::IndexType nface_estimate = static_cast<Mesh::IndexType>(2.2 * number_of_elements);
1117  // This next loop actually populates the data
1118  Mesh::IndexType number_of_faces = 0;
1119  Mesh::Connectivity F_N;
1120  Mesh::Connectivity E_F;
1121  Mesh::Connectivity F_E;
1122  std::vector<Mesh::SymbolicFace> sf;
1123  econ.SyncSizes();
1124  global.FunctionEntry("GetFaceConnectivites");
1125  econ.BuildFaceConnectivity(F_N,E_F,sf,dc);
1126  global.FunctionExit("GetFaceConnectivites");
1127  global.FunctionEntry("Face dual");
1128  E_F.Inverse(F_E,F_N.Nelem());
1129  global.FunctionExit("Face dual");
1130  global.FunctionEntry("Canned shrink");
1131  F_N.ShrinkWrap();
1132  E_F.ShrinkWrap();
1133  F_E.ShrinkWrap();
1134  std::vector<Mesh::SymbolicFace>(sf).swap(sf);
1135  global.FunctionExit("Canned shrink");
1136  global.FunctionEntry("Face syncsizes");
1137  F_E.SyncSizes();
1138  F_N.SyncSizes();
1139  global.FunctionExit("Face syncsizes");
1140 
1141  global.FunctionEntry("Face Stats");
1142  Mesh::IndexType canned_vol_tri = 0;
1143  Mesh::IndexType canned_vol_quad = 0;
1144  Mesh::IndexType canned_bndry_tri = 0;
1145  Mesh::IndexType canned_bndry_quad = 0;
1146  Mesh::Connectivity::iterator fei = F_E.begin();
1147  while(fei != F_E.end()){
1148  Mesh::IndexType faceid = (fei++ - F_E.begin()) + 1;
1149  if(F_E.Esize(faceid) == 1){
1150  if(F_N.Esize(faceid) == 3)
1151  canned_bndry_tri++;
1152  else
1153  canned_bndry_quad++;
1154  }
1155  else{
1156  if(F_N.Esize(faceid) == 3)
1157  canned_vol_tri++;
1158  else
1159  canned_vol_quad++;
1160  }
1161  }
1162  global.FunctionExit("Face Stats");
1163  if(StdOut && verblevel)
1164  *StdOut << "Estimated number of faces: " << nface_estimate << std::endl
1165  << "Actual Number of faces: " << F_N.Nelem() << "\n"
1166  << "Boundary faces: " << canned_bndry_tri + canned_bndry_quad << "\n"
1167  << "Volume faces: " << canned_vol_tri + canned_vol_quad << "\n"
1168  << canned_vol_quad +canned_bndry_quad << " Quads.\n"
1169  << canned_vol_tri + canned_bndry_tri << " Tris.\n"
1170  << canned_bndry_quad+canned_bndry_tri << " Boundary faces ("
1171  << canned_bndry_quad << "," << canned_bndry_tri << ")" << std::endl;
1172  global.FunctionExit("All Face Processing");
1173  // Mesh::Connectivity facecon_dual;
1174  // global.FunctionEntry("Face dual");
1175  // F_N.Inverse(facecon_dual,number_of_nodes);
1176  // global.FunctionExit("Face dual");
1177  global.FunctionEntry("Edges processing");
1178  // For each node, determine which nodes are connected by
1179  // traversing the faces containing the node.
1180  std::vector<std::list<Mesh::IndexType> > anodelist;
1181  global.FunctionEntry("CreateAdjNodes Template");
1182  CreateAdjacentNodeList<std::vector<std::vector<Mesh::IndexType> >,
1183  std::vector<Mesh::IndexType> >
1184  (anodelist,F_N,number_of_nodes);
1185  global.FunctionExit("CreateAdjNodes Template");
1186  Mesh::IndexType nedges =
1187  NumberOfEdges< std::vector<std::list<Mesh::IndexType> >,
1188  std::list<Mesh::IndexType> >(anodelist);
1189  global.FunctionExit("Edges processing");
1190  if(StdOut && verblevel)
1191  *StdOut << "Number of edges: " << nedges << std::endl;
1192  if(false){ // report the edges
1193  std::vector< std::list<Mesh::IndexType> >::iterator anli = anodelist.begin();
1194  Mesh::IndexType ii = 0;
1195  while(anli != anodelist.end())
1196  {
1197  if(StdOut && array_checking)
1198  *StdOut << "Node " << ++ii << " neighbors:";
1199  std::list<Mesh::IndexType>::iterator ni = anli->begin();
1200  while(ni != anli->end())
1201  {
1202  if(StdOut && array_checking)
1203  *StdOut << *ni++;
1204  if(ni != anli->end())
1205  if(StdOut && array_checking)
1206  *StdOut << ",";
1207  }
1208  if(StdOut && array_checking)
1209  *StdOut << std::endl;
1210  anli++;
1211  }
1212  }
1213  }
1214 
1215  comm.Barrier();
1216 
1217  // Comment
1218  if(do_assembly){
1219  global.FunctionEntry("Assembly Function");
1220  // Initial stab at fake assembly
1221  global.FunctionEntry("Initialization");
1222  // Mesh::NodalCoordinates nc;
1223  Mesh::Connectivity econ;
1225  Mesh::IndexType datasize = sizeof(Mesh::IndexType);
1226  Mesh::IndexType number_of_nodes = 0;
1227  Mesh::IndexType number_of_elements = 0;
1228  std::ifstream Inf;
1229  if(LogOut && debug)
1230  *LogOut << "DataSize = " << datasize << std::endl;
1231  if(true) {
1232  global.FunctionEntry("ReadCoord");
1233  std::string MeshName;
1234  if(!infiles.empty())
1235  MeshName = infiles[0];
1236  std::ostringstream FNOstr;
1237  if(nproc > 1){
1238  FNOstr << MeshName << "." << rank+1 << ".info";
1239  Inf.open(FNOstr.str().c_str());
1240  if(!Inf){
1241  if(ErrOut)
1242  *ErrOut << "Failed to open " << FNOstr.str() << std::endl;
1243  comm.SetExit(1);
1244  }
1245  if(comm.Check())
1246  return(1);
1247  Inf >> info.npart >> info.part >> info.nelem >> info.nnodes >> info.nborder >> info.nshared >> info.nown;
1248  info.nlocal = info.nnodes - info.nshared + info.nown;
1249  Inf.close();
1250  FNOstr.str("");
1251  FNOstr << MeshName << "." << rank+1 << ".pmesh";
1252  }
1253  else{
1254  info.nborder = 0;
1255  info.nshared = 0;
1256  FNOstr << MeshName;
1257  }
1258  Inf.open(FNOstr.str().c_str());
1259  if(!Inf){
1260  if(ErrOut)
1261  *ErrOut << "Failed to open " << FNOstr.str() << std::endl;
1262  comm.SetExit(1);
1263  }
1264  if(comm.Check())
1265  return(1);
1266  if(true){
1268  Inf >> nc;
1269  number_of_nodes = nc.Size();
1270  }
1271  global.FunctionExit("ReadCoord");
1272  global.FunctionEntry("ReadCon");
1273  Inf >> econ;
1274  econ.ShrinkWrap();
1275  global.FunctionExit("ReadCon");
1276  comm.Barrier();
1277  }
1278  number_of_elements = econ.Nelem();
1279  if(LogOut && verblevel > 1){
1280  Mesh::IndexType consize = GetTotalSize(econ);
1281  *LogOut << "Connectivity size(bytes): " << (consize+(2*number_of_elements))*datasize << std::endl;
1282  }
1283  if(nproc == 1){
1284  info.nnodes = number_of_nodes;
1285  info.nown = 0;
1286  info.nlocal = number_of_nodes;
1287  info.nelem = number_of_elements;
1288  }
1289  if(StdOut && verblevel)
1290  *StdOut << "Number of elements: " << number_of_elements
1291  << std::endl
1292  << "Number of nodes: " << number_of_nodes << std::endl
1293  << "Number of local nodes: " << info.nlocal << std::endl;
1294  std::vector<Mesh::Border> borders;
1295  std::list<Mesh::IndexType> remotecolorlist;
1296  Mesh::Connectivity bordernodes;
1297  Mesh::Connectivity gbordernodes;
1298  if(nproc > 1){
1299  Inf >> info.nborder;
1300  if(StdOut && verblevel)
1301  *StdOut << "Number of neighboring partitions: " << info.nborder << std::endl;
1302  borders.resize(info.nborder);
1303  bordernodes.resize(info.nborder);
1304  gbordernodes.resize(info.nborder);
1305  Mesh::IndexType totalrecv = 0;
1306  Mesh::IndexType totalsend = 0;
1307  for(Mesh::IndexType nn = 0;nn < info.nborder;nn++){
1308  Mesh::IndexType nrecv = 0;
1309  Mesh::IndexType nsend = 0;
1310  Inf >> borders[nn].rpart >> nrecv >> nsend;
1311  totalrecv += nrecv;
1312  totalsend += nsend;
1313  remotecolorlist.push_back(borders[nn].rpart);
1314  for(Mesh::IndexType ii = 0;ii < nrecv;ii++){
1315  Mesh::IndexType nodeid = 0;
1316  Inf >> nodeid;
1317  borders[nn].nrecv.push_back(nodeid);
1318  bordernodes[nn].push_back(nodeid);
1319  Inf >> nodeid;
1320  gbordernodes[nn].push_back(nodeid);
1321  }
1322  for(Mesh::IndexType ii = 0;ii < nsend;ii++){
1323  Mesh::IndexType nodeid = 0;
1324  Inf >> nodeid;
1325  borders[nn].nsend.push_back(nodeid);
1326  bordernodes[nn].push_back(nodeid);
1327  Inf >> nodeid;
1328  gbordernodes[nn].push_back(nodeid);
1329  }
1330  }
1331  if(LogOut && verblevel > 1){
1332  *LogOut << "Border description storage size(bytes): "
1333  << (totalrecv+totalsend)*datasize
1334  << std::endl;
1335  }
1336  }
1337  Inf.close();
1338  if(LogOut && debug && array_checking)
1339  *LogOut << "BorderNodes:" << std::endl
1340  << bordernodes << std::endl
1341  << "GlobalBorderNodes:" << std::endl
1342  << gbordernodes << std::endl;
1343  comm.Barrier();
1344  Mesh::IndexType N = 0;
1345  Mesh::IndexType N2 = 0;
1346  if(infiles.size() > 1){
1347  std::istringstream Istr(infiles[1]);
1348  Istr >> N;
1349  if(N < 0)
1350  N = 0;
1351  }
1352  if(infiles.size() > 2){
1353  std::istringstream Istr(infiles[2]);
1354  Istr >> N2;
1355  if(N2 < 0)
1356  N2 = 0;
1357  }
1358  comm.Barrier();
1359  if(LogOut && verblevel > 1)
1360  *LogOut << "All command line args processed." << std::endl;
1361  if(StdOut && verblevel){
1362  *StdOut << "Number of nodal dofs: " << N << std::endl
1363  << "Number of element dofs: " << N2 << std::endl;
1364  }
1365  if(LogOut && verblevel > 1)
1366  *LogOut << "Building dual connectivity..." << std::endl;
1367  Mesh::Connectivity dc;
1368  global.FunctionEntry("DualCon");
1369  econ.Inverse(dc,number_of_nodes);
1370  global.FunctionExit("DualCon");
1371  dc.ShrinkWrap();
1372  Mesh::Connectivity nl2;
1373  comm.Barrier();
1374  if(LogOut && debug){
1375  *LogOut << "Dual Con size(bytes):"
1376  << (GetTotalSize(dc)+2*dc.size())*datasize
1377  << std::endl;
1378  if(array_checking)
1379  *LogOut << "dual connectivity:" << std::endl
1380  << dc << std::endl;
1381  }
1382  // Neighborhood populates an array of neighbors
1383  // for each element. A neighbor is any element
1384  // that shares a node with a given element.
1385  if(LogOut && verblevel > 1)
1386  *LogOut << "Building element neighborhood." << std::endl;
1387  global.FunctionEntry("Neighborhood");
1388  econ.GetNeighborhood(nl2,dc);
1389  global.FunctionExit("Neighborhood");
1390  nl2.ShrinkWrap();
1391  if(LogOut && array_checking)
1392  *LogOut << "Element Neighborhood:" << std::endl
1393  << nl2 << std::endl;
1394  comm.Barrier();
1395  if(LogOut && debug)
1396  *LogOut << "Neighborhood size(bytes): "
1397  << (GetTotalSize(nl2)+(2*nl2.size()))*datasize
1398  << std::endl;
1399  Mesh::Connectivity NodalDofs;
1400  NodalDofs.resize(number_of_nodes);
1401  NodalDofs.Sync();
1402  Mesh::Connectivity ElementDofs;
1403  ElementDofs.resize(number_of_elements);
1404  ElementDofs.Sync();
1405  std::vector<FEM::FieldData> fields;
1406  Mesh::IndexType nfields = 0;
1407  if(N > 0){
1408  FEM::FieldData nfield;
1409  nfield.Order(1);
1410  nfield.Components(N);
1411  fields.push_back(nfield);
1412  nfields++;
1413  }
1414  if(N2 > 0){
1415  FEM::FieldData efield;
1416  efield.Order(0);
1417  efield.Components(N2);
1418  fields.push_back(efield);
1419  nfields++;
1420  }
1421 
1422  // Assigns dof numbers to each node and element into the provided
1423  // arrays.
1424  // global.FunctionEntry("Uniform DOF Assignment");
1425  // Mesh::IndexType ndoftotal = Mesh::AssignUniformDofs(econ,dc,ElementFields,
1426  // NodalFields,NodalDofs,
1427  // ElementDofs,fields);
1428  // global.FunctionExit("Uniform DOF Assignment");
1429  comm.Barrier();
1430  if(LogOut && verblevel > 1)
1431  *LogOut << "All processors finished prepping."
1432  << std::endl
1433  << "Now assigning DOFs." << std::endl;
1434  global.FunctionExit("Initialization");
1435 
1436  Mesh::IndexType ndoftotal = 0;
1437  Mesh::IndexType nlocal_dofs = 0;
1438  std::list<Mesh::IndexType> border_elements;
1439  // border_elements.resize(info.nborder);
1440  Mesh::IndexType nvolume_nodes = number_of_nodes - info.nshared;
1441  // Mesh::IndexType nlocal_nodes = nvolume_nodes + info.nown;
1442  std::vector<bool> is_border_element(number_of_elements,false);
1443  std::vector<bool> element_processed(number_of_elements,false);
1444  if(LogOut && verblevel)
1445  *LogOut << "Creating border element list for " << info.nshared
1446  << " border nodes and " << nvolume_nodes << " volume nodes."
1447  << std::endl;
1448  global.FunctionEntry("Assembly Prep");
1449  global.FunctionEntry("Find Border Elements");
1450  // At this time, we want border elements to be all of those which
1451  // have entities on the partition boundary. - try something else
1452  for(Mesh::IndexType nn = nvolume_nodes;nn < number_of_nodes;nn++){
1453  // for(Mesh::IndexType nn = nlocal_nodes;nn < number_of_nodes;nn++){
1454  std::vector<Mesh::IndexType>::iterator ei = dc[nn].begin();
1455  while(ei != dc[nn].end()){
1456  if(!element_processed[*ei-1]){
1457  element_processed[*ei-1] = true;
1458  border_elements.push_back(*ei);
1459  is_border_element[*ei-1] = true;
1460  }
1461  ei++;
1462  }
1463  }
1464  border_elements.sort();
1465  global.FunctionExit("Find Border Elements");
1466  if(LogOut && verblevel){
1467  *LogOut << "Found " << border_elements.size() << " border elements."
1468  << std::endl;
1469  if(array_checking){
1470  DumpContents(*LogOut,border_elements," ");
1471  *LogOut << std::endl;
1472  }
1473  }
1474  std::list<Mesh::IndexType> queue;
1475  if(true){
1476  Mesh::IndexType nprocessed = 0;
1477  std::vector<bool> neighbors_processed(number_of_elements,false);
1478  element_processed.clear();
1479  element_processed.resize(number_of_elements,false);
1480  queue.clear();
1481  std::list<Mesh::IndexType>::iterator qi = queue.begin();
1482  global.FunctionEntry("Element order queue");
1483  while(nprocessed < number_of_elements){
1484  bool from_queue = false;
1485  Mesh::IndexType elnum = 0;
1486  if(queue.empty()){
1487  queue.push_back(1);
1488  qi = queue.begin();
1489  }
1490  if(qi != queue.end()){
1491  elnum = *qi-1;
1492  from_queue = true;
1493  }
1494  else { // strangely not done, get next unprocessed element
1495  for(Mesh::IndexType i = 0;i < number_of_elements;i++)
1496  if(element_processed[i])
1497  elnum++;
1498  }
1499  if(!element_processed[elnum]){
1500  element_processed[elnum] = true;
1501  if(!from_queue){
1502  queue.push_back(elnum+1);
1503  }
1504  nprocessed++;
1505  }
1506  if(!neighbors_processed[elnum]){
1507  neighbors_processed[elnum] = true;
1508  std::vector<Mesh::IndexType>::iterator ni = nl2[elnum].begin();
1509  while(ni != nl2[elnum].end()){
1510  if(!element_processed[*ni-1]){
1511  element_processed[*ni-1] = true;
1512  queue.push_back(*ni);
1513  nprocessed++;
1514  }
1515  ni++;
1516  }
1517  }
1518  qi++;
1519  }
1520  global.FunctionExit("Element order queue");
1521  if(LogOut && debug && array_checking){
1522  *LogOut << "Element order queue:" << std::endl;
1523  DumpContents(*LogOut,queue," ");
1524  *LogOut << std::endl;
1525  }
1526  }
1527  nl2.destroy();
1528  Mesh::IndexType nvoldof = 1;
1529  Mesh::IndexType nborderdof = 1;
1530  Mesh::IndexType nshareddof = 0;
1531  Mesh::IndexType nremotedof = 0;
1532  Mesh::IndexType ndoftot = 0;
1533  global.FunctionEntry("Dof Assignment");
1534  if(true){
1535  if(nproc == 1 && false){
1536  std::vector<bool> node_processed(number_of_nodes,false);
1537  std::list<Mesh::IndexType>::iterator ei = queue.begin();
1538  while(ei != queue.end()){
1539  std::vector<Mesh::IndexType>::iterator ni = econ[*ei-1].begin();
1540  while(ni != econ[*ei-1].end()){
1541  if(!node_processed[*ni-1]){
1542  node_processed[*ni-1] = true;
1543  NodalDofs[*ni-1].resize(N);
1544  for(unsigned int i = 0;i < N;i++)
1545  NodalDofs[*ni-1][i] = nvoldof++;
1546  }
1547  ni++;
1548  }
1549  ElementDofs[*ei-1].resize(N2);
1550  for(unsigned int i = 0;i < N2;i++)
1551  ElementDofs[*ei-1][i] = nvoldof++;
1552  ei++;
1553  }
1554  }
1555  else { // nproc > 1 - this loop works for nproc == 1 now too
1556  std::vector<bool> node_processed(number_of_nodes,false);
1557  std::list<Mesh::IndexType>::iterator ei = queue.begin();
1558  while(ei != queue.end()){
1559  if(!is_border_element[*ei-1]){
1560  if(array_checking)
1561  *LogOut << "Assigning dofs for nonborder element " << *ei << std::endl;
1562  std::vector<Mesh::IndexType>::iterator ni = econ[*ei-1].begin();
1563  while(ni != econ[*ei-1].end()){
1564  if(!node_processed[*ni-1]){
1565  node_processed[*ni-1] = true;
1566  NodalDofs[*ni-1].resize(N);
1567  for(unsigned int i = 0;i < N;i++){
1568  if(array_checking)
1569  *LogOut << "node " << *ni << " getting dof " << nvoldof << std::endl;
1570  NodalDofs[*ni-1][i] = nvoldof++;
1571  }
1572  }
1573  ni++;
1574  }
1575  ElementDofs[*ei-1].resize(N2);
1576  for(unsigned int i = 0;i < N2;i++){
1577  if(array_checking)
1578  *LogOut << "element " << *ei << " getting dof " << nvoldof << std::endl;
1579  ElementDofs[*ei-1][i] = nvoldof++;
1580  }
1581  }
1582  ei++;
1583  }
1584  // 1st of three passes through border elements first picks up the
1585  // elements
1586  ei = border_elements.begin();
1587  while(ei != border_elements.end()){
1588  ElementDofs[*ei-1].resize(N2);
1589  for(unsigned int i = 0;i < N2;i++){
1590  if(array_checking)
1591  *LogOut << "border element " << *ei << " getting dof " << nvoldof << std::endl;
1592  ElementDofs[*ei-1][i] = nvoldof++;
1593  }
1594  ei++;
1595  }
1596  nvoldof--;
1597  // 2nd loop picks up all local nodes
1598  ei = border_elements.begin();
1599  while(ei != border_elements.end()){
1600  std::vector<Mesh::IndexType>::iterator ni = econ[*ei-1].begin();
1601  while(ni != econ[*ei-1].end()){
1602  if(!node_processed[*ni-1] && *ni <= info.nlocal){
1603  node_processed[*ni-1] = true;
1604  NodalDofs[*ni-1].resize(N);
1605  for(unsigned int i = 0;i < N;i++){
1606  if(array_checking)
1607  *LogOut << "owned border node " << *ni << " getting dof "
1608  << nvoldof+nborderdof << std::endl;
1609  NodalDofs[*ni-1][i] = (nvoldof + nborderdof++);
1610  }
1611  }
1612  ni++;
1613  }
1614  ei++;
1615  }
1616  nshareddof = nborderdof - 1;
1617  // 3rd loop puts tentative numbers on remote nodes
1618  // Note that the local doffing of remotely owned
1619  // entity has potential to not match across the
1620  // partition boundary. We will sync them up later
1621  // when we do remote dof discovery.
1622  ei = border_elements.begin();
1623  while(ei != border_elements.end()){
1624  std::vector<Mesh::IndexType>::iterator ni = econ[*ei-1].begin();
1625  while(ni != econ[*ei-1].end()){
1626  if(!node_processed[*ni-1]){
1627  node_processed[*ni-1] = true;
1628  NodalDofs[*ni-1].resize(N);
1629  for(unsigned int i = 0;i < N;i++){
1630  if(array_checking)
1631  *LogOut << "remote border node " << *ni << " getting local dof "
1632  << nvoldof+nborderdof << std::endl;
1633  NodalDofs[*ni-1][i] = (nvoldof + nborderdof++);
1634  }
1635  }
1636  ni++;
1637  }
1638  ei++;
1639  }
1640  nborderdof--;
1641  ndoftot = nvoldof + nborderdof;
1642  nremotedof = nborderdof - nshareddof;
1643  nlocal_dofs = nvoldof + nshareddof;
1644  if(StdOut && verblevel)
1645  *StdOut << "NDof TOTAL (local + adjacent remote): " << ndoftot << std::endl
1646  << "NDof Local (local total) DOFS: " << nlocal_dofs << std::endl
1647  << "NDof Remote (initial count of dofs owned remotely) DOFS: "
1648  << nremotedof << std::endl
1649  << "NDof Border (total of dofs on partition boundaries) DOFS: "
1650  << nborderdof << std::endl
1651  << "NDof Shared (dofs on the partition boundary that I own) DOFS: "
1652  << nshareddof << std::endl;
1653  if(LogOut && debug){
1654  *LogOut << "nborderelems = " << border_elements.size() << std::endl;
1655  border_elements.sort();
1656  border_elements.unique();
1657  *LogOut << "after sort, size = " << border_elements.size() << std::endl;
1658  }
1659  }
1660  }
1661  global.FunctionExit("Dof Assignment");
1662  if(StdOut && verblevel)
1663  *StdOut << "Number of local DOFs: " << nlocal_dofs << std::endl
1664  << "Number of Elements on partition boundaries: " << border_elements.size()
1665  << std::endl;
1666  if(LogOut && debug){
1667  *LogOut << "Total NodalDof storage(bytes): "
1668  << (GetTotalSize(NodalDofs)+(number_of_nodes*2))*datasize
1669  << std::endl
1670  << "Total ElementDof storage(bytes): "
1671  << (GetTotalSize(ElementDofs)+(number_of_elements*2))*datasize
1672  << std::endl;
1673  }
1674  comm.Barrier();
1675 
1676  NodalDofs.Sync();
1677  NodalDofs.SyncSizes();
1678  ElementDofs.Sync();
1679  ElementDofs.SyncSizes();
1680  if(LogOut && debug && array_checking)
1681  *LogOut << "NodalDofs: " << std::endl
1682  << NodalDofs << std::endl
1683  << "ElementDofs: " << std::endl
1684  << ElementDofs << std::endl;
1685 
1686 
1687  // Initial global dof counting
1688  // We need to shuffle things around here when we have
1689  // disparity in dofs on shared mesh entities. Right now
1690  // we know there is no disparity - so we just do the
1691  // straightforward thing.
1692  comm.Barrier();
1693  if(LogOut && debug)
1694  *LogOut << "All processors entering global dof counting."
1695  << std::endl;
1696  global.FunctionEntry("Global DOF counting");
1697  Mesh::IndexType nglobal_dofs = nlocal_dofs;
1698  std::vector<Mesh::IndexType> ngdofs_p(nproc,0);
1699  std::vector<Mesh::IndexType> nnbr_p(nproc,0);
1700  ngdofs_p[rank] = nglobal_dofs;
1701  ndoftotal = ndoftot;
1702  Mesh::IndexType nremote_dofs = ndoftotal - nglobal_dofs;
1703  comm.Barrier();
1704  if(LogOut && debug)
1705  *LogOut << "All processors done counting local dofs."
1706  << std::endl
1707  << "Now gathering ..." << std::endl;
1708  int commstat = comm.AllGather(nglobal_dofs,ngdofs_p);
1709  comm.Barrier();
1710  if(LogOut && debug){
1711  *LogOut << "Global Dofcount verification = " << nglobal_dofs << std::endl
1712  << "commstat = " << commstat << std::endl;
1713  if(array_checking){
1714  *LogOut << "NGDofs:" << std::endl;
1715  DumpContents(*LogOut,ngdofs_p);
1716  *LogOut << std::endl;
1717  }
1718  }
1719 
1720  info.doffset = 0;
1721  Mesh::IndexType ndof_global_total = 0;
1722  for(unsigned int nn1 = 0;nn1 < rank;nn1++)
1723  info.doffset += ngdofs_p[nn1];
1724  for(unsigned int nn2 = 0;nn2 < nproc;nn2++)
1725  ndof_global_total += ngdofs_p[nn2];
1726  if(StdOut && verblevel)
1727  *StdOut << "Number of global dofs on this proc: " << nglobal_dofs << std::endl
1728  << "Number of remote dofs on this proc: " << nremote_dofs << std::endl
1729  << "Number of global dofs over all procs: " << ndof_global_total << std::endl;
1730  if(StdOut && verblevel)
1731  *StdOut << "My global offset: " << info.doffset << std::endl;
1732  // We have enough info to form a message to all neighbors to let
1733  // them know the global dof numbers of border nodes that I own.
1734  // Loop thru all the borders and do it
1735  comm.Barrier();
1736  if(LogOut && debug)
1737  *LogOut << "All procs entering global dof exchange."
1738  << std::endl;
1739 
1740  // This section of code does remote dof discovery. That is,
1741  // it let's all of our neighbors know the global dof id's of
1742  // the mesh entities that we own on the partition boundary.
1743  // Likewise, we will receive the equivalent dof information from
1744  // the remote partitions.
1745  Mesh::IndexType nremote_nodes = info.nnodes - info.nlocal;
1746  Mesh::Connectivity RemoteNodalDofs;
1747  if(LogOut && verblevel)
1748  *LogOut << "Number of remote nodes: " << nremote_nodes << std::endl;
1749  RemoteNodalDofs.resize(nremote_nodes);
1750  RemoteNodalDofs.Sync();
1751  // TestMPI(borders,NodalDofs,RemoteNodalDofs,info,comm,LogOut,array_checking);
1752  // LogFile.close();
1753  // comm.Barrier();
1754  // sleep(10);
1755  // comm.Barrier();
1756  // comm.SetExit(1);
1757  // if(comm.Check())
1758  // return(1);
1759  // else{
1760  // *StdOut << "SLEEPING" << std::endl;
1761  // sleep(1000000);
1762  // }
1763  global.FunctionEntry("Nodal DOF Exchange");
1764  GlobalDofExchange(borders,NodalDofs,RemoteNodalDofs,info,comm,LogOut,array_checking);
1765  global.FunctionExit("Nodal DOF Exchange");
1766  RemoteNodalDofs.ShrinkWrap();
1767  RemoteNodalDofs.SyncSizes();
1768  comm.Barrier();
1769 
1770  global.FunctionEntry("Count Dofs");
1771  Mesh::IndexType nglobdof = 0;
1772  Mesh::IndexType nremotedof2 = 0;
1773  CountDofs(ElementDofs,NodalDofs,info,nglobdof,nremotedof2);
1774  global.FunctionExit("Count Dofs");
1775  if(StdOut && verblevel)
1776  *StdOut << "After global exchange and recount: " << std::endl
1777  << "Number of global dofs on this proc: " << nglobdof << std::endl
1778  << "Number of remote dofs on this proc: " << nremotedof2 << std::endl;
1779 
1780  assert(nglobdof == nglobal_dofs);
1781  // nremote_dofs = nremotedof;
1782  comm.Barrier();
1783  // Now we need to build the send and receive stiffnesses
1784  // The new way is to assume that the ElementDofs actually has ONLY elemental dofs, not
1785  // with the nodal dofs stacked on as previously. (it only hurts performance at this point)
1786  global.FunctionEntry("Build Communicated Stiffness");
1787  BuildBorderStiffness(econ,borders,NodalDofs,ElementDofs,RemoteNodalDofs,info,comm,LogOut,array_checking);
1788  global.FunctionExit("Build Communicated Stiffness");
1789  // Allocate the communication buffers
1790  unsigned int nalloc = AllocateCommBuffers(borders,NodalDofs,LogOut,debug);
1791  if(LogOut && debug)
1792  *LogOut << "Allocated " << nalloc*sizeof(double) << " bytes of communication buffers."
1793  << std::endl;
1794  comm.Barrier();
1795  if(LogOut && verblevel)
1796  *LogOut << "Registering Comm Buffers." << std::endl;
1797  // Register the communication buffers with the communication object
1798  if(RegisterCommBuffers(borders,comm)){
1799  if(ErrOut)
1800  *ErrOut << "Error setting registering the communication buffers."
1801  << std::endl;
1802  comm.SetExit(1);
1803  }
1804  if(comm.Check())
1805  return(1);
1806  global.FunctionExit("Global DOF counting");
1807  comm.Barrier();
1808  if(LogOut && debug)
1809  *LogOut << "All processors have exchanged interpartition DOF information."
1810  << std::endl
1811  << "Proceeding with dof sorting." << std::endl;
1812 
1813 
1814  global.FunctionEntry("Build Sparsity");
1815  // Now to actually build the sparsity pattern, LD[GD]
1816  // We have E[LED], N[LND], Nr[GD], LD:GD
1817  // We'll start by building a local version of K, LD[LD],
1818  // and using a straightforward mapping LD:GD to obtain LD[GD].
1819  // Then we'll add any needed global dofs coming from remote
1820  // processors.
1821  //
1822  // 1. Build local K, LD[GD]
1823  // - Need to somehow merge E[LED] and Nl[LND], since
1824  // we always assemble element-by-element, we should
1825  // rack up the LD on the elements in the order of
1826  // all nodal dofs first, then element dofs.
1827  //
1828  global.FunctionEntry("Create E[LD]");
1829  Mesh::Connectivity E_LD;
1830  E_LD.resize(number_of_elements);
1831  for(Mesh::IndexType en = 0;en < number_of_elements;en++){
1832  std::vector<Mesh::IndexType>::iterator eni = econ[en].begin();
1833  while(eni != econ[en].end()){ // for every node in this element
1834  Mesh::IndexType node_id = *eni++;
1835  std::vector<Mesh::IndexType>::iterator ndi = NodalDofs[node_id-1].begin();
1836  while(ndi != NodalDofs[node_id-1].end())
1837  E_LD[en].push_back(*ndi++); // add it's dofs to the list of dofs for this element
1838 
1839  }
1840  std::vector<Mesh::IndexType>::iterator edi = ElementDofs[en].begin();
1841  while(edi != ElementDofs[en].end())
1842  E_LD[en].push_back(*edi++); // add the element's dofs as well
1843  }
1844  global.FunctionEntry("E[LD] post");
1845  E_LD.ShrinkWrap();
1846  E_LD.Sync();
1847  E_LD.SyncSizes();
1848  // We need the sizes later
1849  std::vector<Mesh::IndexType> element_ndofs(number_of_elements,0);
1850  for(Mesh::IndexType ei = 0;ei < number_of_elements;ei++)
1851  element_ndofs[ei] = E_LD.Esize(ei+1);
1852  global.FunctionExit("E[LD] post");
1853  global.FunctionExit("Create E[LD]");
1854  //
1855  // Now E_LD is a list of all dofs for every element. It is an important point
1856  // that LD are the LD (i.e. the local dofs), because they run from 1-nlocal_dofs.
1857  // Since both the indices of E (1-number_of_elements) and dofs both are sequential
1858  // lists, we can do some nifty array manipulations to obtain the information we need.
1859  // First, we invert E[LD] to obtain LD[E]:
1860  Mesh::Connectivity LD_E;
1861  global.FunctionEntry("Invert E[LD]");
1862  E_LD.Inverse(LD_E,ndoftot);
1863  global.FunctionExit("Invert E[LD]");
1864  //
1865  // Now, there is one slight problem with our version of LD[E], in that it includes
1866  // LD which are actually just local numbers for remote dofs. We do not need the
1867  // stiffness rows for these. In fact, we already have created the stiffness blocks
1868  // for these rows when we created the communication buffers. We know those dofs are
1869  // all dofs with id's > nglobdof. If we don't take care of the problem now, we pay
1870  // a hefty memory and processing fee in the following steps.
1871  LD_E.Sync();
1872  assert(LD_E.Nelem() == ndoftot);
1873  for(Mesh::IndexType di = nglobdof;di < ndoftot;di++)
1874  LD_E[di].resize(0);
1875  LD_E.ShrinkWrap();
1876  LD_E.SyncSizes();
1877 
1878  // LD[E]%E[LD] = LD[LD] := Stiffness
1879  Mesh::Connectivity symbstiff;
1880  global.FunctionEntry("Sparsity Pattern");
1881  LD_E.GetNeighborhood(symbstiff,E_LD,false,false);
1882  global.FunctionExit("Sparsity Pattern");
1883 
1884  comm.Barrier();
1885  LD_E.destroy();
1886  E_LD.destroy();
1887 
1888  if(LogOut && debug && array_checking)
1889  *LogOut << "All Local K:" << std::endl << symbstiff
1890  << std::endl;
1891 
1892 
1893  // Now we need to do two things, update our "K" with the dofs which
1894  // are coming from remote procs and also map all the dof id's to
1895  // global. Then we're done building the parallel version of K.
1896  //
1897  // First, we need a map from local dof id's for the remote dofs
1898  // on the partition boundary to the global id's that have been
1899  // told to us by the remote processor.
1900  //
1901  // *Note: This code updates the local dof rows which already
1902  // contain local dof numbers for remotely owned dofs. These
1903  // remote dofs are just the dofs of the remotely owned border
1904  // nodes. This is just a mapping operation.
1905  //
1906  global.FunctionEntry("K update");
1907  if(true){
1908  std::vector<Mesh::IndexType> local_dof_to_global;
1909  local_dof_to_global.resize(ndoftot-nglobdof);
1910  for(Mesh::IndexType ni = info.nlocal;ni < number_of_nodes;ni++){
1911  std::vector<Mesh::IndexType>::iterator ndi = NodalDofs[ni].begin();
1912  std::vector<Mesh::IndexType>::iterator rdi = RemoteNodalDofs[ni-info.nlocal].begin();
1913  assert(NodalDofs[ni].size() == RemoteNodalDofs[ni-info.nlocal].size());
1914  while(ndi != NodalDofs[ni].end()){
1915  local_dof_to_global[*ndi-nglobdof-1] = *rdi;
1916  ndi++;
1917  rdi++;
1918  }
1919  }
1920  Mesh::Connectivity::iterator ConIt = symbstiff.begin();
1921  while(ConIt != symbstiff.end()){
1922  std::vector<Mesh::IndexType>::iterator dofIt = ConIt->begin();
1923  Mesh::IndexType ind = 0;
1924  while(dofIt != ConIt->end()){
1925  Mesh::IndexType dof_id = *dofIt++;
1926  if(dof_id <= nglobdof)
1927  (*ConIt)[ind++] = dof_id+info.doffset;
1928  else
1929  (*ConIt)[ind++] = local_dof_to_global[dof_id - nglobdof - 1];
1930  }
1931  ConIt++;
1932  }
1933  }
1934  //
1935  // Now update with the remote dof info.
1936  // *Note: This part takes the list of remote dofs (some of which are not even on
1937  // the border) which talk to our local dofs and updates the appropriate
1938  // local rows to contain the proper global ids of the remote dofs.
1939  // These values were not present before this section of code.
1940  // This is *not* a mapping operation.
1941  //
1942  for(unsigned int nn = 0;nn < info.nborder;nn++){
1943  std::vector<Mesh::IndexType>::iterator bni = borders[nn].nrecv.begin();
1944  std::vector<Mesh::IndexType>::iterator Api = borders[nn].data.RecvAp.begin();
1945  // (1) For every node that I own on this border...
1946  while(bni != borders[nn].nrecv.end()){
1947  Mesh::IndexType begindex = *Api++;
1948  std::vector<Mesh::IndexType>::iterator ndi = NodalDofs[*bni-1].begin();
1949  // (2) Go through it's local dof id's and..
1950  Mesh::IndexType nvals = *Api - begindex;
1951  while(ndi != NodalDofs[*bni-1].end()){
1952  // (3) add the remote dof id's to the corresponding local row
1953  for(unsigned int i = 0;i < nvals;i++)
1954  symbstiff[*ndi-1].push_back(borders[nn].data.RecvAi[begindex+i]);
1955  ndi++;
1956  }
1957  bni++;
1958  }
1959  }
1960  global.FunctionExit("K update");
1961  if(LogOut && debug && array_checking)
1962  *LogOut << "K after update:" << std::endl << symbstiff
1963  << std::endl;
1964  // Finally, sort and unique - oh yeah, yuck. this will be a
1965  // bottleneck. Yup, its slow alrite.
1966  global.FunctionEntry("K sort");
1967  Mesh::Connectivity::iterator ssIt = symbstiff.begin();
1968  while(ssIt != symbstiff.end()){
1969  std::sort(ssIt->begin(),ssIt->end());
1970  std::vector<Mesh::IndexType> tmp(ssIt->begin(),std::unique(ssIt->begin(),ssIt->end()));
1971  ssIt->swap(tmp);
1972  ssIt++;
1973  }
1974  global.FunctionExit("K sort");
1975  if(LogOut && debug && array_checking)
1976  *LogOut << "Sorted K:" << std::endl << symbstiff
1977  << std::endl;
1978 
1979  comm.Barrier();
1980  global.FunctionEntry("Create Stiffness");
1981  symbstiff.Truncate();
1982  symbstiff.ShrinkWrap();
1983  FEM::DummyStiffness<double,Mesh::IndexType,Mesh::Connectivity,std::vector<Mesh::IndexType> > k;
1984  k._dofs = &symbstiff;
1985  k.rank = rank;
1986  // Commented out for symbolic assembly
1987  k._data.resize(1000,0.0);
1988  k._sizes.resize(nglobdof+1,0);
1989  Mesh::Connectivity::iterator dci = symbstiff.begin();
1990  Mesh::IndexType indcnt = 1;
1991  while(indcnt <= nglobdof){
1992  k._sizes[indcnt] = dci->size()+k._sizes[indcnt-1];
1993  dci++;
1994  indcnt++;
1995  }
1996  // ngdofs_p = # global dofs on each processor
1997 
1998  Mesh::IndexType nnz = GetTotalSize(symbstiff);
1999 
2000  k._ndof = nglobdof;
2001  std::vector<Mesh::IndexType> ApLoc(1,k._sizes[nglobdof]);
2002  std::vector<Mesh::IndexType> ApTot(1,0);
2003 
2004  comm.Reduce(ApLoc,ApTot,IndexDT,Comm::SUMOP,0);
2005  if(LogOut && verblevel){
2006  std::vector<Mesh::IndexType>::iterator si = k._sizes.begin();
2007  std::vector<Mesh::IndexType>::iterator si2 = k._sizes.begin();
2008  Mesh::IndexType myav = 0;
2009  // Mesh::IndexType theav = 0;
2010  si++;
2011  while(si != k._sizes.end())
2012  myav += (*si++ - *si2++);
2013  myav /= (k._sizes.size()-1);
2014  *LogOut << "The average NZ per stiffness row is: " << myav << std::endl;
2015  }
2016 
2017  if(LogOut && debug)
2018  *LogOut << "Siffness storage(bytes): "
2019  << nnz*datasize << std::endl;
2020  Mesh::IndexType totsize = 0;
2021  MPI_Reduce(&k._sizes[nglobdof],&totsize,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD);
2022  if(StdOut && verblevel)
2023  *StdOut << "Total number of local Stiffness entries: " << nnz
2024  << ":" << k._sizes[nglobdof] << ":" << totsize << std::endl
2025  << "Total NZ over all procs: " << ApTot[0] << std::endl;
2026 
2027  if(do_dump_sparse){
2028  std::ostringstream Ostr;
2029  std::ostringstream ZeroStr;
2030  unsigned int pwr10 = 10;
2031  while(nproc/pwr10 && !(rank/pwr10)){
2032  ZeroStr << "0";
2033  pwr10 *= 10;
2034  }
2035  Ostr << infiles[0] << "_sparse_"
2036  << ZeroStr.str() << rank+1;
2037  std::ofstream Ouf;
2038  Ouf.open(Ostr.str().c_str());
2039  Ouf << ndof_global_total << std::endl;
2040  Ouf << k._ndof << std::endl;
2041  k.Dump(Ouf);
2042  Ouf.close();
2043  }
2044 
2045  global.FunctionExit("Create Stiffness");
2046  global.FunctionExit("Build Sparsity");
2047 
2048 
2049 
2050  std::vector<Mesh::IndexType> Order;
2051 
2052  // bool do_renumbering = true;
2053  if(do_renumbering){
2054  global.FunctionEntry("Renumbering");
2055  std::vector<idxtype> adj;
2056  std::vector<idxtype> xadj;
2057  std::vector<idxtype> vtxdist;
2058  vtxdist.resize(ngdofs_p.size()+1);
2059  std::vector<Mesh::IndexType>::iterator ngdi = ngdofs_p.begin();
2060  std::vector<idxtype>::iterator vtdi = vtxdist.begin();
2061  *vtdi = 0;
2062  while(ngdi != ngdofs_p.end()){
2063  idxtype nn = static_cast<idxtype>(*vtdi++ + *ngdi++);
2064  *vtdi = nn;
2065  }
2066  symbstiff.Truncate();
2067  symbstiff.graph_mode(info.doffset);
2068  if(array_checking && LogOut){
2069  *LogOut << "Graph K:" << std::endl << symbstiff
2070  << std::endl;
2071  }
2072  // this function does the 1-based to 0-based mapping
2073  global.FunctionEntry("Container2CSR");
2074  MultiContainer2CSR< std::vector< std::vector<Mesh::IndexType> >,
2075  std::vector<Mesh::IndexType>,
2076  std::vector<idxtype>,idxtype>
2077  (xadj,adj,symbstiff);
2078  symbstiff.matrix_mode(info.doffset);
2079  global.FunctionExit("Container2CSR");
2080  if(array_checking && LogOut){
2081  *LogOut << "Matrix K:" << std::endl << symbstiff
2082  << std::endl;
2083  }
2084  std::vector<idxtype> order(ngdofs_p[rank],0);
2085  std::vector<idxtype> sizes(ngdofs_p.size()*2,0);
2086  int communicator = MPI_COMM_WORLD;
2087  int numflag = 0;
2088  int options[5];
2089  options[0] = 1;
2090  options[1] = 3;
2091  options[2] = 15;
2092  if(array_checking){
2093  *LogOut << "Vtxdist:" << std::endl;
2094  DumpContents(*LogOut,vtxdist," ");
2095  *LogOut << std::endl << "Xadj:" << std::endl;
2096  DumpContents(*LogOut,xadj," ");
2097  *LogOut << std::endl << "Adj:" << std::endl;
2098  DumpContents(*LogOut,adj," ");
2099  *LogOut << std::endl << "Ngdofs_p:" << std::endl;
2100  DumpContents(*LogOut,ngdofs_p," ");
2101  *LogOut << std::endl;
2102  }
2103  global.FunctionEntry("Metis-NodeID");
2104 #ifdef PMETIS
2105  ParMETIS_V3_NodeND(&vtxdist[0],&xadj[0],&adj[0],&numflag,
2106  options,&order[0],&sizes[0],&communicator);
2107 #endif
2108  global.FunctionExit("Metis-NodeID");
2109  if(array_checking){
2110  *LogOut << "Order:" << std::endl;
2111  DumpContents(*LogOut,order," ");
2112  *LogOut << std::endl << "Sizes:" << std::endl;
2113  DumpContents(*LogOut,sizes," ");
2114  *LogOut << std::endl;
2115  }
2116  global.FunctionEntry("OrderSort");
2117  std::vector<idxtype> sorder(order);
2118  Order.resize(order.size());
2119  std::vector<idxtype>::iterator oi = order.begin();
2120  std::vector<Mesh::IndexType>::iterator Oi = Order.begin();
2121  while(oi != order.end())
2122  *Oi++ = *oi++;
2123  order.resize(0);
2124  std::sort(sorder.begin(),sorder.end());
2125  global.FunctionExit("OrderSort");
2126  std::vector<idxtype>::iterator soi = sorder.begin();
2127  *LogOut << "Reordered Min: " << *sorder.begin() << std::endl
2128  << "Reordered Max: " << *--sorder.end() << std::endl;
2129  unsigned int ttcount = 0;
2130  idxtype max_contig = 0;
2131  idxtype min_contig = 0;
2132  // idxtype contig_median = 0;
2133  idxtype contig_min = 0;
2134  idxtype contig_max = 0;
2135  int ncontig = 0;
2136  struct {
2137  int nbin;
2138  int rank;
2139  } binrank_p[nproc], binrank[nproc];
2140  for(unsigned int iii = 0;iii < nproc;iii++){
2141  binrank[iii].nbin = 0;
2142  binrank[iii].rank = rank;
2143  binrank_p[iii].rank = 0;
2144  binrank_p[iii].nbin = 0;
2145  }
2146  std::vector<unsigned int> nbin(nproc,0);
2147  int binsize = ndof_global_total/nproc;
2148  while(soi != sorder.end()){
2149  idxtype first = *soi++;
2150  idxtype second = first;
2151  bool unbinned = true;
2152  unsigned int aaa = 0;
2153  int binmin = 0;
2154  while(aaa < nproc && unbinned){
2155  if(first < (binmin + binsize)){
2156  unbinned = false;
2157  binrank[aaa].nbin++;
2158  }
2159  binmin += binsize;
2160  aaa++;
2161  }
2162  if(soi != sorder.end()){
2163  second = *soi;
2164  if(second != first+1){
2165  if(ncontig > 1000 && debug)
2166  *LogOut << "Large contiguous block(" << ncontig << "): ("
2167  << min_contig << "," << first << ")" << std::endl;
2168  if(ncontig > max_contig){
2169  max_contig = ncontig;
2170  contig_max = first;
2171  contig_min = min_contig;
2172  }
2173  min_contig = second;
2174  ncontig = 0;
2175  if(array_checking){
2176  *LogOut << "NonSequential(" << ttcount << "): " << first << ":"
2177  << second << std::endl;
2178  }
2179  }
2180  else
2181  ncontig++;
2182  }
2183  ttcount++;
2184  }
2185  if(debug){
2186  *LogOut << "Maximum contiguous block(" << max_contig << "): (" << contig_min
2187  << "," << contig_max << ")" << std::endl;
2188  }
2189  int binmin = 0;
2190  int maxbin = 0;
2191  int solve_rank = 0;
2192  for(unsigned int iii = 0;iii < nproc;iii++){
2193  if(debug)
2194  *LogOut << "Bin(" << binmin << "," << binmin+binsize << "): " << binrank[iii].nbin
2195  << std::endl;
2196  if(binrank[iii].nbin > maxbin){
2197  maxbin = binrank[iii].nbin;
2198  solve_rank = iii;
2199  }
2200  binmin+=binsize;
2201  }
2202  *LogOut << "My Preferred Solve Rank = " << solve_rank << std::endl;
2203  MPI_Allreduce(binrank,binrank_p,nproc,MPI_2INT,MPI_MAXLOC,MPI_COMM_WORLD);
2204  std::vector<unsigned int> solve_ranks(nproc,0);
2205  std::vector<unsigned int> srank_to_rank(nproc,0);
2206  std::vector<bool> solve_rank_covered(nproc,false);
2207  std::vector<unsigned int> rank_conflict;
2208  comm.Barrier();
2209  for(unsigned int iii = 0;iii < nproc;iii++){
2210  solve_ranks[binrank_p[iii].rank] = iii;
2211  srank_to_rank[iii] = binrank_p[iii].rank;
2212  }
2213  // MPI_Comm solvecomm = MPI_COMM_NULL;
2214  Comm::CommunicatorObject solvercomm;
2215  commstat = comm.Split(0,solve_ranks[rank],solvercomm);
2216  // int mpistat = MPI_Comm_split(MPI_COMM_WORLD,0,binrank_p[rank].rank,&solvecomm);
2217  if(commstat){
2218  *LogOut << "MPI Error: " << commstat << std::endl;
2219  return(1);
2220  }
2221  // MPI_Comm_rank(solvecomm,&solve_rank);
2222  // *LogOut << "My Solve Rank(1) = " << solve_rank << std::endl;
2223  solve_rank = solvercomm.Rank();
2224  *LogOut << "My Solve Rank = " << solve_rank << std::endl;
2225 
2226 
2227  unsigned int ncovered = 0;
2228  for(unsigned int iii = 0;iii < nproc;iii++){
2229  *LogOut << "SolveRanks[" << iii << "] == " << solve_ranks[iii]
2230  << std::endl
2231  << "SRank[" << iii << "] == " << srank_to_rank[iii] << std::endl;
2232  if(rank != iii){
2233  if(solve_ranks[iii] == solve_ranks[rank])
2234  rank_conflict.push_back(iii);
2235  else{
2236  solve_rank_covered[solve_ranks[iii]] = true;
2237  ncovered++;
2238  }
2239  }
2240  else{
2241  solve_rank_covered[solve_ranks[iii]] = true;
2242  ncovered++;
2243  }
2244  }
2245  if(!rank_conflict.empty()){
2246  comm.SetExit(1);
2247  *LogOut << "Conflicting for solve_rank(" << solve_rank
2248  << "): ";
2249  DumpContents(*LogOut,rank_conflict," ");
2250  *LogOut << std::endl;
2251  }
2252  if(ncovered != nproc){
2253  comm.SetExit(1);
2254  *LogOut << "Uncovered bins: ";
2255  unsigned int bincount = 0;
2256  std::vector<bool>::iterator rci = solve_rank_covered.begin();
2257  while(rci != solve_rank_covered.end()){
2258  if(*rci++ == false)
2259  *LogOut << bincount << " ";
2260  bincount++;
2261  }
2262  *LogOut << std::endl;
2263  }
2264  if(comm.Check())
2265  return(1);
2266  int binxtra = ndof_global_total%nproc;
2267  unsigned int mynrows = binsize + (solve_rank < binxtra ? 1 : 0);
2268  std::vector<unsigned int> nrows_p(nproc,0);
2269  nrows_p[solve_rank] = mynrows;
2270  commstat = solvercomm.AllGather(mynrows,nrows_p);
2271  solvercomm.Barrier();
2272  int solve_doffset = 0;
2273  for(int iii = 0;iii < solve_rank;iii++)
2274  solve_doffset += nrows_p[iii];
2275  int my_first_row = solve_doffset;
2276  int my_last_row = my_first_row + mynrows - 1;
2277  if(debug)
2278  *LogOut << "My Rows (" << mynrows << ") = (" << my_first_row
2279  << "," << my_last_row << ")" << std::endl;
2280  Mesh::Connectivity RemoteRows;
2281  RemoteRows.resize(nproc);
2282  for(unsigned int iii = 0;iii < nglobdof;iii++){
2283  unsigned int row_index = Order[iii];
2284  unsigned int solve_procid = 0;
2285  unsigned int findrow = nrows_p[0];
2286  while(findrow < row_index)
2287  findrow += nrows_p[++solve_procid];
2288  RemoteRows[solve_procid].push_back(row_index);
2289  }
2290  if(array_checking)
2291  *LogOut << "RemoteRows:" << std::endl
2292  << RemoteRows << std::endl;
2293  if(debug){
2294  *LogOut << "Row distribution: " << std::endl;
2295  for(unsigned int iii = 0;iii < nproc;iii++)
2296  *LogOut << "Rows on Solve Rank " << iii << ": "
2297  << RemoteRows[iii].size() << std::endl;
2298  }
2299  unsigned int nlocal_rows = RemoteRows[solve_rank].size();
2300  if(debug)
2301  *LogOut << "Number of solved Rows I own: " << nlocal_rows << std::endl
2302  << "Number of Rows I assemble: " << nglobdof << std::endl
2303  << "Number of Rows I solve: " << mynrows << std::endl;
2304  int nremote_rows = 0;
2305  for(int iii = 0;iii < (int)nproc;iii++)
2306  if(iii != solve_rank)
2307  nremote_rows += RemoteRows[iii].size();
2308  int nrecvd_rows = mynrows - nlocal_rows;
2309  if(debug)
2310  *LogOut << "Number of Remote Rows I assemble (Sent Rows): " << nremote_rows << std::endl
2311  << "Number of my Rows assembled remotely (Recvd Rows): " << nrecvd_rows << std::endl;
2312  // Now need to communicate and discover the mapping of old remote dof numbers
2313  // to new. There are two sets of dofs that we need:
2314  // 1. The new numbers for the dofs on remotely owned nodes
2315  // 2. The new numbers for the remote dofs which we receive
2316  //
2317  // Once we have this information, we can update our version of K so
2318  // that it's sparsity pattern is consistent with the renumbering.
2319  GlobalDofReMapExchange(econ,NodalDofs,ElementDofs,RemoteNodalDofs,
2320  borders,info,Order,comm,LogOut,array_checking);
2321 
2322  if(true){
2323  global.FunctionEntry("Build Sparsity");
2324  // Now to actually build the sparsity pattern, LD[GD]
2325  // We have E[LED], N[LND], Nr[GD], LD:GD
2326  // We'll start by building a local version of K, LD[LD],
2327  // and using a straightforward mapping LD:GD to obtain LD[GD].
2328  // Then we'll add any needed global dofs coming from remote
2329  // processors.
2330  //
2331  // 1. Build local K, LD[GD]
2332  // - Need to somehow merge E[LED] and Nl[LND], since
2333  // we always assemble element-by-element, we should
2334  // rack up the LD on the elements in the order of
2335  // all nodal dofs first, then element dofs.
2336  //
2337  global.FunctionEntry("Create E[LD]");
2338  Mesh::Connectivity E_LD;
2339  E_LD.resize(number_of_elements);
2340  for(Mesh::IndexType en = 0;en < number_of_elements;en++){
2341  std::vector<Mesh::IndexType>::iterator eni = econ[en].begin();
2342  while(eni != econ[en].end()){ // for every node in this element
2343  Mesh::IndexType node_id = *eni++;
2344  std::vector<Mesh::IndexType>::iterator ndi = NodalDofs[node_id-1].begin();
2345  while(ndi != NodalDofs[node_id-1].end())
2346  E_LD[en].push_back(*ndi++); // add it's dofs to the list of dofs for this element
2347 
2348  }
2349  std::vector<Mesh::IndexType>::iterator edi = ElementDofs[en].begin();
2350  while(edi != ElementDofs[en].end())
2351  E_LD[en].push_back(*edi++); // add the element's dofs as well
2352  }
2353  global.FunctionEntry("E[LD] post");
2354  E_LD.ShrinkWrap();
2355  E_LD.Sync();
2356  E_LD.SyncSizes();
2357  // We need the sizes later
2358  std::vector<Mesh::IndexType> element_ndofs(number_of_elements,0);
2359  for(Mesh::IndexType ei = 0;ei < number_of_elements;ei++)
2360  element_ndofs[ei] = E_LD.Esize(ei+1);
2361  global.FunctionExit("E[LD] post");
2362  global.FunctionExit("Create E[LD]");
2363  //
2364  // Now E_LD is a list of all dofs for every element. It is an important point
2365  // that LD are the LD (i.e. the local dofs), because they run from 1-nlocal_dofs.
2366  // Since both the indices of E (1-number_of_elements) and dofs both are sequential
2367  // lists, we can do some nifty array manipulations to obtain the information we need.
2368  // First, we invert E[LD] to obtain LD[E]:
2369  Mesh::Connectivity LD_E;
2370  global.FunctionEntry("Invert E[LD]");
2371  E_LD.Inverse(LD_E,ndoftot);
2372  global.FunctionExit("Invert E[LD]");
2373  //
2374  // Now, there is one slight problem with our version of LD[E], in that it includes
2375  // LD which are actually just local numbers for remote dofs. We do not need the
2376  // stiffness rows for these. In fact, we already have created the stiffness blocks
2377  // for these rows when we created the communication buffers. We know those dofs are
2378  // all dofs with id's > nglobdof. If we don't take care of the problem now, we pay
2379  // a hefty memory and processing fee in the following steps.
2380  LD_E.Sync();
2381  assert(LD_E.Nelem() == ndoftot);
2382  for(Mesh::IndexType di = nglobdof;di < ndoftot;di++)
2383  LD_E[di].resize(0);
2384  LD_E.ShrinkWrap();
2385  LD_E.SyncSizes();
2386 
2387  // LD[E]%E[LD] = LD[LD] := Stiffness
2388  symbstiff.destroy();
2389  global.FunctionEntry("Sparsity Pattern");
2390  LD_E.GetNeighborhood(symbstiff,E_LD,false,false);
2391  global.FunctionExit("Sparsity Pattern");
2392 
2393  comm.Barrier();
2394  LD_E.destroy();
2395  E_LD.destroy();
2396 
2397  if(LogOut && debug && array_checking)
2398  *LogOut << "All Local K:" << std::endl << symbstiff
2399  << std::endl;
2400 
2401 
2402  // Now we need to do two things, update our "K" with the dofs which
2403  // are coming from remote procs and also map all the dof id's to
2404  // global. Then we're done building the parallel version of K.
2405  //
2406  // First, we need a map from local dof id's for the remote dofs
2407  // on the partition boundary to the global id's that have been
2408  // told to us by the remote processor.
2409  //
2410  // *Note: This code updates the local dof rows which already
2411  // contain local dof numbers for remotely owned dofs. These
2412  // remote dofs are just the dofs of the remotely owned border
2413  // nodes. This is just a mapping operation.
2414  //
2415  global.FunctionEntry("K update");
2416  if(true){
2417  std::vector<Mesh::IndexType> local_dof_to_global;
2418  local_dof_to_global.resize(ndoftot-nglobdof);
2419  for(Mesh::IndexType ni = info.nlocal;ni < number_of_nodes;ni++){
2420  std::vector<Mesh::IndexType>::iterator ndi = NodalDofs[ni].begin();
2421  std::vector<Mesh::IndexType>::iterator rdi = RemoteNodalDofs[ni-info.nlocal].begin();
2422  assert(NodalDofs[ni].size() == RemoteNodalDofs[ni-info.nlocal].size());
2423  while(ndi != NodalDofs[ni].end()){
2424  local_dof_to_global[*ndi-nglobdof-1] = *rdi;
2425  ndi++;
2426  rdi++;
2427  }
2428  }
2429  Mesh::Connectivity::iterator ConIt = symbstiff.begin();
2430  while(ConIt != symbstiff.end()){
2431  std::vector<Mesh::IndexType>::iterator dofIt = ConIt->begin();
2432  Mesh::IndexType ind = 0;
2433  while(dofIt != ConIt->end()){
2434  Mesh::IndexType dof_id = *dofIt++;
2435  if(dof_id <= nglobdof)
2436  (*ConIt)[ind++] = Order[dof_id-1]+1;
2437  else
2438  (*ConIt)[ind++] = local_dof_to_global[dof_id - nglobdof - 1];
2439  }
2440  ConIt++;
2441  }
2442  }
2443  //
2444  // Now update with the remote dof info.
2445  // *Note: This part takes the list of remote dofs (some of which are not even on
2446  // the border) which talk to our local dofs and updates the appropriate
2447  // local rows to contain the proper global ids of the remote dofs.
2448  // These values were not present before this section of code.
2449  // This is *not* a mapping operation.
2450  //
2451  for(unsigned int nn = 0;nn < info.nborder;nn++){
2452  std::vector<Mesh::IndexType>::iterator bni = borders[nn].nrecv.begin();
2453  std::vector<Mesh::IndexType>::iterator Api = borders[nn].data.RecvAp.begin();
2454  // (1) For every node that I own on this border...
2455  while(bni != borders[nn].nrecv.end()){
2456  Mesh::IndexType begindex = *Api++;
2457  std::vector<Mesh::IndexType>::iterator ndi = NodalDofs[*bni-1].begin();
2458  // (2) Go through it's local dof id's and..
2459  Mesh::IndexType nvals = *Api - begindex;
2460  while(ndi != NodalDofs[*bni-1].end()){
2461  // (3) add the remote dof id's to the corresponding local row
2462  for(unsigned int i = 0;i < nvals;i++)
2463  symbstiff[*ndi-1].push_back(borders[nn].data.RecvAi[begindex+i]);
2464  ndi++;
2465  }
2466  bni++;
2467  }
2468  }
2469  global.FunctionExit("K update");
2470  if(LogOut && debug && array_checking)
2471  *LogOut << "K after update:" << std::endl << symbstiff
2472  << std::endl;
2473  // Finally, sort and unique - oh yeah, yuck. this will be a
2474  // bottleneck. Yup, its slow alrite.
2475  global.FunctionEntry("K sort");
2476  Mesh::Connectivity::iterator ssIt = symbstiff.begin();
2477  while(ssIt != symbstiff.end()){
2478  std::sort(ssIt->begin(),ssIt->end());
2479  std::vector<Mesh::IndexType> tmp(ssIt->begin(),std::unique(ssIt->begin(),ssIt->end()));
2480  ssIt->swap(tmp);
2481  ssIt++;
2482  }
2483  global.FunctionExit("K sort");
2484  if(LogOut && debug && array_checking)
2485  *LogOut << "Sorted K:" << std::endl << symbstiff
2486  << std::endl;
2487 
2488  comm.Barrier();
2489  global.FunctionEntry("Create Stiffness");
2490  symbstiff.Truncate();
2491  symbstiff.ShrinkWrap();
2492  // FEM::DummyStiffness<double,Mesh::IndexType,Mesh::Connectivity,std::vector<Mesh::IndexType> > k;
2493  k._dofs = &symbstiff;
2494  k.rank = rank;
2495  // Commented out for symbolic assembly
2496  k._data.resize(1000,0.0);
2497  k._sizes.resize(nglobdof+1,0);
2498  Mesh::Connectivity::iterator dci = symbstiff.begin();
2499  Mesh::IndexType indcnt = 1;
2500  while(indcnt <= nglobdof){
2501  k._sizes[indcnt] = dci->size()+k._sizes[indcnt-1];
2502  dci++;
2503  indcnt++;
2504  }
2505  }
2506  Mesh::IndexType nnz = GetTotalSize(symbstiff);
2507 
2508  k._ndof = nglobdof;
2509  std::vector<Mesh::IndexType> ApLoc(1,k._sizes[nglobdof]);
2510  std::vector<Mesh::IndexType> ApTot(1,0);
2511  // comm.Reduce does not work - since we convert datatypes :P
2512  // comm.Reduce(ApLoc,ApTot,MPI_SUM,0);
2513  if(LogOut && verblevel){
2514  std::vector<Mesh::IndexType>::iterator si = k._sizes.begin();
2515  std::vector<Mesh::IndexType>::iterator si2 = k._sizes.begin();
2516  Mesh::IndexType myav = 0;
2517  // Mesh::IndexType theav = 0;
2518  si++;
2519  while(si != k._sizes.end())
2520  myav += (*si++ - *si2++);
2521  myav /= (k._sizes.size()-1);
2522  *LogOut << "The average NZ per stiffness row is: " << myav << std::endl;
2523  }
2524 
2525  if(LogOut && debug)
2526  *LogOut << "Stiffness storage(bytes): "
2527  << nnz*datasize << std::endl;
2528  Mesh::IndexType totsize = 0;
2529  MPI_Reduce(&k._sizes[nglobdof],&totsize,1,MPI_INTEGER,MPI_SUM,0,MPI_COMM_WORLD);
2530  if(debug)
2531  *LogOut << "Total number of local Stiffness entries: "
2532  << k._sizes[nglobdof] << std::endl
2533  << "Total NZ over all procs: " << totsize << std::endl;
2534  if(do_dump_sparse){
2535  std::ostringstream Ostr;
2536  std::ostringstream ZeroStr;
2537  unsigned int pwr10 = 10;
2538  while(nproc/pwr10 && !(rank/pwr10)){
2539  ZeroStr << "0";
2540  pwr10 *= 10;
2541  }
2542  Ostr << infiles[0] << "_sparse_rnm_"
2543  << ZeroStr.str() << rank+1;
2544  std::ofstream Ouf;
2545  Ouf.open(Ostr.str().c_str());
2546  Ouf << ndof_global_total << std::endl;
2547  Ouf << k._ndof << std::endl;
2548  DumpContents(Ouf,Order," ");
2549  Ouf << std::endl;
2550  k.Dump(Ouf);
2551  Ouf.close();
2552  }
2553  global.FunctionExit("Create Stiffness");
2554  global.FunctionExit("Build Sparsity");
2555  std::vector<bool> element_processed(number_of_elements,false);
2556  std::vector<bool> is_border_element(number_of_elements,false);
2557  for(unsigned int iii = 0;iii < info.nlocal;iii++){
2558  std::vector<Mesh::IndexType>::iterator ndi = NodalDofs[iii].begin();
2559  bool node_processed = false;
2560  while(ndi != NodalDofs[iii].end() && !node_processed){
2561  if(Order[*ndi] <= (unsigned int)my_first_row ||
2562  Order[*ndi] >= (unsigned int)my_last_row){
2563  std::vector<Mesh::IndexType>::iterator bei = dc[iii].begin();
2564  while(bei != dc[iii].end())
2565  is_border_element[*bei++] = true;
2566  node_processed = true;
2567  }
2568  ndi++;
2569  }
2570  }
2571  for(unsigned int iii = info.nlocal;iii < number_of_nodes;iii++){
2572  std::vector<Mesh::IndexType>::iterator bei = dc[iii].begin();
2573  while(bei != dc[iii].end())
2574  is_border_element[*bei++] = true;
2575  }
2576  for(unsigned int iii = 0;iii < number_of_elements;iii++){
2577  std::vector<Mesh::IndexType>::iterator edi = ElementDofs[iii].begin();
2578  bool element_processed = false;
2579  while(edi != ElementDofs[iii].end() && !element_processed){
2580  if(Order[*edi] <= (unsigned int)my_first_row ||
2581  Order[*edi] >= (unsigned int)my_last_row){
2582  is_border_element[iii] = true;
2583  element_processed = true;
2584  }
2585  edi++;
2586  }
2587  }
2588  std::vector<bool>::iterator bei = is_border_element.begin();
2589  unsigned int nborder_elements = 0;
2590  while(bei != is_border_element.end()){
2591  if(*bei++)
2592  nborder_elements++;
2593  }
2594  if(debug)
2595  *LogOut << "Border Elements: " << nborder_elements
2596  << "/" << number_of_elements << std::endl;
2597  global.FunctionExit("Renumbering");
2598  }
2599 
2600  // Border Node Mapping
2601  // In order to to border assemblies, I need to have this map:
2602  // BN:<C,id>, (i.e. For each Boundary Node, it's color and id
2603  // where id is the index of it's entry in the nsend array.
2604  global.FunctionEntry("Border Node Mapping");
2605  std::vector<std::pair<unsigned int,unsigned int> > boundary_node_map(info.nnodes-info.nlocal);
2606  for(unsigned int i = 0;i < info.nborder;i++){
2607  std::vector<Mesh::IndexType>::iterator sni = borders[i].nsend.begin();
2608  unsigned int index = 0;
2609  while(sni != borders[i].nsend.end()){
2610  boundary_node_map[*sni - info.nlocal - 1] = std::make_pair(i+1,index+1);
2611  sni++;
2612  index++;
2613  }
2614  }
2615  global.FunctionExit("Border Node Mapping");
2616  std::list<Mesh::IndexType> nonborder_queue;
2617  if(true){
2618  // Re-define border elements to be only those touching remote nodes
2619  global.FunctionEntry("Find Border Elements");
2620  is_border_element.clear();
2621  is_border_element.resize(0);
2622  is_border_element.swap(is_border_element);
2623  element_processed.clear();
2624  element_processed.resize(0);
2625  element_processed.resize(number_of_elements,false);
2626  border_elements.clear();
2627  border_elements.resize(0);
2628  for(Mesh::IndexType nn = info.nlocal;nn < number_of_nodes;nn++){
2629  std::vector<Mesh::IndexType>::iterator ei = dc[nn].begin();
2630  while(ei != dc[nn].end()){
2631  if(!element_processed[*ei-1]){
2632  element_processed[*ei-1] = true;
2633  border_elements.push_back(*ei);
2634  }
2635  ei++;
2636  }
2637  }
2638  border_elements.sort();
2639  std::list<Mesh::IndexType>::iterator ei = queue.begin();
2640  while(ei != queue.end()){
2641  if(!element_processed[*ei-1])
2642  nonborder_queue.push_back(*ei);
2643  ei++;
2644  }
2645  if(LogOut && verblevel){
2646  *LogOut << "Found " << border_elements.size() << " border elements." << std::endl
2647  << "Found " << nonborder_queue.size() << " non border elements."
2648  << std::endl;
2649  if(array_checking){
2650  *LogOut << "Border Elements:" << std::endl;
2651  DumpContents(*LogOut,border_elements," ");
2652  *LogOut << std::endl << "NonBorder Elements:" << std::endl;
2653  DumpContents(*LogOut,nonborder_queue," ");
2654  *LogOut << std::endl;
2655  }
2656  }
2657  global.FunctionExit("Find Border Elements");
2658  }
2659  element_processed.clear();
2660  element_processed.resize(0);
2661  element_processed.resize(number_of_elements,false);
2662  global.FunctionExit("Assembly Prep");
2663  if(false){
2664  global.FunctionEntry("Search Test");
2665  Mesh::Connectivity::iterator ci = symbstiff.begin();
2666  while(ci != symbstiff.end()){
2667  std::vector<Mesh::IndexType>::iterator ssrbegin = ci->begin();
2668  std::vector<Mesh::IndexType>::iterator ssrend = ci->end();
2669  std::vector<Mesh::IndexType>::iterator ssi = ci->begin();
2670  std::vector<Mesh::IndexType>::iterator curpos = ci->begin();
2671  std::vector<Mesh::IndexType>::iterator last_one = ci->begin();
2672  while(ssi != ci->end()){
2673  last_one = curpos;
2674  curpos = std::lower_bound(ssrbegin,ssrend,*ssi++);
2675  }
2676  ci++;
2677  }
2678  global.FunctionExit("Search Test");
2679  }
2680  // return(0);
2681  comm.Barrier();
2682  // Now go for a true assembly
2683  // First, work up some fake data, howabout all 1.0's.
2684  std::vector<double> dofdat(100000,1.0);
2685 
2686  global.FunctionEntry("Assembly Processing");
2687  Mesh::IndexType nsearches = 0;
2688  nsearches = 0;
2689 
2690  // Post receives
2691  global.FunctionEntry("Posting receives");
2692  comm.RecvAll();
2693  global.FunctionExit("Posting receives");
2694  comm.Barrier();
2695 
2696  Mesh::IndexType bordersearch = 0;
2697  if(true){
2698  global.FunctionEntry("Send Buffer Assembly");
2699  std::list<Mesh::IndexType>::iterator bei = border_elements.begin();
2700  while(bei != border_elements.end()){
2701  Mesh::IndexType eindex = *bei++ - 1;
2702  element_processed[eindex]=true;
2703  Mesh::IndexType endof = element_ndofs[eindex];
2704  if(!((eindex+1) > 0 && (eindex+1) <= ElementDofs.Nelem())){
2705  *ErrOut << "ERROR: ElementDofs[" << eindex << "].size() = " << ElementDofs[eindex].size()
2706  << " and ElementDofs.size() = " << ElementDofs.Nelem() << std::endl;
2707  return(1);
2708  }
2709  Mesh::IndexType nedof = ElementDofs.Esize(eindex+1);
2710  Mesh::IndexType search = FEM::AssembleBorderElementII(econ[eindex],ElementDofs[eindex],endof,nedof,
2711  NodalDofs,RemoteNodalDofs,k,borders,
2712  boundary_node_map,dofdat,info,LogOut,debug);
2713  bordersearch+=search;
2714 
2715  }
2716  global.FunctionExit("Send Buffer Assembly");
2717  }
2718  comm.Barrier();
2719  global.FunctionEntry("Posting Sends");
2720  comm.SendAll();
2721  global.FunctionExit("Posting Sends");
2722  comm.Barrier();
2723  if(false){
2724  global.FunctionEntry("Assembly Loop");
2725  // loop over elements
2726  // for(Mesh::IndexType nnn = 0;nnn < number_of_elements;nnn++){
2727  std::list<Mesh::IndexType>::iterator ei = nonborder_queue.begin();
2728  while(ei != nonborder_queue.end()){
2729  // std::list<Mesh::IndexType>::iterator ei = queue.begin();
2730  // while(ei != queue.end()){
2731  Mesh::IndexType nnn = *ei++ -1;
2732  Mesh::IndexType search = 0;
2733  // if(!element_processed[nnn]){
2734  Mesh::IndexType endof = element_ndofs[nnn];
2735  Mesh::IndexType nedof = ElementDofs.Esize(nnn+1);
2736  search = FEM::AssembleLocalElementII(econ[nnn],ElementDofs[nnn],endof,nedof,
2737  NodalDofs,k,dofdat,info);
2738  nsearches+=search;
2739  // }
2740  }
2741  global.FunctionExit("Assembly Loop");
2742  }
2743  Mesh::IndexType fast_searches = 0;
2744  if(true){
2745  global.FunctionEntry("Fast Assembly");
2746  fast_searches = FEM::FastAssembleLocalElements(nonborder_queue,econ,k,
2747  NodalDofs,ElementDofs,element_ndofs,info);
2748  global.FunctionExit("Fast Assembly");
2749  }
2750  comm.Barrier();
2751  global.FunctionEntry("Wait for messages");
2752  comm.WaitAll();
2753  global.FunctionExit("Wait for messages");
2754  comm.Barrier();
2755  // Need to add code to assemble the recv'd dofs to K
2756  global.FunctionEntry("Recv Buffer Assembly");
2757  Mesh::IndexType postsearch = 0;
2758  if(true){
2759  for(unsigned int i = 0;i < info.nborder;i++){
2760  int search = RecvBufAssembly(borders[i],NodalDofs,k,dofdat);
2761  postsearch+=search;
2762  }
2763  }
2764  global.FunctionExit("Recv Buffer Assembly");
2765  global.FunctionExit("Assembly Processing");
2766  if(StdOut && verblevel)
2767  *StdOut << "Slow Ass searches = " << nsearches << std::endl
2768  << "Border searches = " << bordersearch << std::endl
2769  << "Recv Buf searches = " << postsearch << std::endl
2770  << "Fast Ass searches = " << fast_searches << std::endl;
2771  comm.ClearRequests();
2772  comm.Barrier();
2773  global.FunctionExit("Assembly Function");
2774  }
2775  comm.Barrier();
2776 
2777 
2778 
2779 
2780  // bool do_partitioning = true;
2781  if(do_partitioning){
2782  if(nproc > 1){
2783  if(ErrOut)
2784  *ErrOut << "Error: partitioning is for serial runs." << std::endl;
2785  comm.SetExit(1);
2786  }
2787  if(comm.Check())
2788  return(1);
2789  global.FunctionEntry("Partitioning");
2790  std::ifstream Inf;
2791  std::string MeshName(infiles[0]);
2792  std::string MeshBaseName(MeshName.substr(0,MeshName.find_last_of(".")));
2793  Inf.open(MeshName.c_str());
2794  if(!Inf){
2795  if(ErrOut)
2796  *ErrOut << "Failed to open " << MeshName << std::endl;
2797  return(1);
2798  }
2799  global.FunctionEntry("Read Mesh");
2800  Mesh::IndexType number_of_nodes = 0;
2801  if(true){
2803  Inf >> nc;
2804  number_of_nodes = nc.Size();
2805  nc.destroy();
2806  }
2807  if(StdOut && verblevel)
2808  *StdOut << "Number of nodes: " << number_of_nodes << std::endl;
2809  Mesh::Connectivity econ;
2810  Inf >> econ;
2811  global.FunctionExit("Read Mesh");
2812  Inf.close();
2813  econ.ShrinkWrap();
2814  Mesh::IndexType number_of_elements = econ.Nelem();
2815  if(StdOut && verblevel)
2816  *StdOut << "Number of elements: " << number_of_elements
2817  << std::endl;
2818  Mesh::IndexType N = npartitions;
2819  econ.SyncSizes();
2820  // re-orient the elements if needed
2821  if(do_reorient){ // don't do this for 2d geometry (yet)
2823  Inf.open(MeshName.c_str());
2824  if(!Inf){
2825  if(ErrOut)
2826  *ErrOut << "Failed to open " << MeshName << std::endl;
2827  return(1);
2828  }
2829  global.FunctionEntry("ReadCoords");
2830  Inf >> nc;
2831  Inf.close();
2832  global.FunctionExit("ReadCoords");
2833  global.FunctionEntry("Element Reorientation");
2834  for(Mesh::IndexType element_being_processed = 1;
2835  element_being_processed <= number_of_elements;
2836  element_being_processed++)
2837  {
2838  Mesh::IndexType index = element_being_processed - 1;
2839  Mesh::IndexType size_of_element = econ.Esize(element_being_processed);
2840  Mesh::GenericElement ge(size_of_element);
2841  if(ge.Inverted(econ[index],nc))
2842  ge.ReOrient(econ[index]);
2843  }
2844  global.FunctionExit("Element Reorientation");
2845  }
2846  // Mesh::IndexType N2 = 0;
2847  // if(infiles.size() > 1){
2848  // std::istringstream Istr(infiles[1]);
2849  // Istr >> N;
2850  // }
2851  // if(infiles.size() > 2){
2852  // std::istringstream Istr(infiles[2]);
2853  // Istr >> N2;
2854  // }
2855  Mesh::Connectivity dc;
2856  global.FunctionEntry("DualCon");
2857  econ.Inverse(dc,number_of_nodes);
2858  global.FunctionExit("DualCon");
2859  dc.ShrinkWrap();
2860  Mesh::IndexType number_of_faces = 0;
2861  Mesh::Connectivity F_N;
2862  Mesh::Connectivity E_F;
2863  std::vector<Mesh::SymbolicFace> sf;
2864  econ.SyncSizes();
2865  global.FunctionEntry("GetFaceConnectivites");
2866  econ.BuildFaceConnectivity(F_N,E_F,sf,dc);
2867  global.FunctionExit("GetFaceConnectivites");
2868  global.FunctionEntry("Shrinking");
2869  std::vector<Mesh::SymbolicFace>(sf).swap(sf);
2870  global.FunctionExit("Shrinking");
2871  global.FunctionEntry("Creating Graph");
2872  Mesh::Connectivity nl2(number_of_elements);
2873  std::vector<Mesh::SymbolicFace>::iterator sfi = sf.begin();
2874  while(sfi != sf.end()){
2875  if(sfi->second.first > 0){
2876  nl2[sfi->first.first-1].push_back(sfi->second.first);
2877  nl2[sfi->second.first-1].push_back(sfi->first.first);
2878  }
2879  sfi++;
2880  }
2881  global.FunctionExit("Creating Graph");
2882  // -------------- Partitioning --------------------
2883  std::vector<idxtype> adj;
2884  std::vector<idxtype> xadj;
2885  global.FunctionEntry("Container2CSR");
2886  MultiContainer2CSR< std::vector< std::vector<Mesh::IndexType> >,
2887  std::vector<Mesh::IndexType>,
2888  std::vector<idxtype>,idxtype>
2889  (xadj,adj,nl2);
2890  global.FunctionExit("Container2CSR");
2891  nl2.destroy();
2892  std::vector<idxtype> order(number_of_elements,0);
2893  std::vector<idxtype> sizes(2,0);
2894  idxtype wgtflag = 0;
2895  idxtype numflag = 0;
2896  idxtype nparts = N;
2897  idxtype options[5];
2898  idxtype edgecuts = 0;
2899  options[0] = 1;
2900  options[1] = 3;
2901  options[2] = 15;
2902  std::vector<idxtype> vtxdist(2,0);
2903  vtxdist[1] = number_of_elements;
2904  std::vector<idxtype> part(number_of_elements,0);
2905  global.FunctionEntry("Metis");
2906  int comm = MPI_COMM_WORLD;
2907  // int ncon = 0;
2908  // float stupid = 0;
2909  // float ubvecstupid = 0;
2910  if(array_checking){
2911  *LogOut << "Vtxdist:" << std::endl;
2912  DumpContents(*LogOut,vtxdist," ");
2913  *LogOut << std::endl << "Xadj:" << std::endl;
2914  DumpContents(*LogOut,xadj," ");
2915  *LogOut << std::endl << "Adj:" << std::endl;
2916  DumpContents(*LogOut,adj," ");
2917  *LogOut << std::endl;
2918  }
2919  if(false){
2920  global.FunctionEntry("Renumbering");
2921 #ifdef PMETIS
2922  ParMETIS_V3_NodeND(&vtxdist[0],&xadj[0],&adj[0],&numflag,
2923  options,&order[0],&sizes[0],&comm);
2924 #endif
2925  global.FunctionExit("Renumbering");
2926  if(array_checking){
2927  *LogOut << "Order:" << std::endl;
2928  DumpContents(*LogOut,order," ");
2929  *LogOut << std::endl << "Sizes:" << std::endl;
2930  DumpContents(*LogOut,sizes," ");
2931  *LogOut << std::endl;
2932  }
2933  }
2934 #ifdef PMETIS
2935  ParMETIS_V3_PartKway(&vtxdist[0],&xadj[0],&adj[0],NULL,NULL,&wgtflag,
2936  &numflag,NULL,&nparts,NULL,NULL,
2937  options,&edgecuts,&part[0],&comm);
2938 #else
2939  METIS_PartGraphKway(&vtxdist[1],&xadj[0],&adj[0],NULL,NULL,&wgtflag,
2940  &numflag,&nparts,options,&edgecuts,&part[0]);
2941 #endif
2942  global.FunctionExit("Metis");
2943  // ------------------------------------------------
2944  // Take part and create the 1-shifted version into
2945  // a multicontainer
2946  Mesh::Connectivity pcon;
2947  pcon.resize(number_of_elements);
2948  for(Mesh::IndexType i = 0;i < number_of_elements;i++)
2949  pcon[i].push_back(part[i]+1);
2950  pcon.Sync();
2951  pcon.SyncSizes();
2952  // std::ofstream Ouf;
2953  // Ouf.open("part");
2954  // Ouf << pcon << std::endl;
2955  // Ouf.close();
2956  if(StdOut && verblevel)
2957  *StdOut << "Number of edgecuts: " << edgecuts << std::endl;
2958  if(false){
2959  if(StdOut && verblevel)
2960  *StdOut << "Partition Table: ";
2961  DumpContents(*StdOut,part," ");
2962  if(StdOut && verblevel)
2963  *StdOut << std::endl;
2964  }
2965  part.resize(0);
2966  part.swap(part);
2967  Mesh::IndexType Nparts = nparts;
2968 
2969  // Invert the partition table
2970  global.FunctionEntry("Dual Pcon");
2971  Mesh::Connectivity dual_pcon; // E[C] -> C[E]
2972  pcon.Inverse(dual_pcon,nparts);
2973  global.FunctionExit("Dual Pcon");
2974  dual_pcon.SyncSizes();
2975 
2976  global.FunctionEntry("GetAdj nodecolors");
2977  Mesh::Connectivity nodecolors; // N[E]%E[C] = N[C]
2978  dc.GetAdjacent(nodecolors,pcon,nparts,true);
2979  nodecolors.SyncSizes();
2980  global.FunctionExit("GetAdj nodecolors");
2981 
2982  global.FunctionEntry("Inverse colornodes");
2983  Mesh::Connectivity colorsofnodes; // N[C] -> C[N]
2984  nodecolors.Inverse(colorsofnodes,nparts);
2985  global.FunctionExit("Inverse colornodes");
2986  colorsofnodes.SyncSizes();
2987 
2988  global.FunctionEntry("ColorNeighbors");
2989  Mesh::Connectivity colorneighbors; // C[N]%N[C] = C[C]
2990  colorsofnodes.GetNeighborhood(colorneighbors,nodecolors,true,true);
2991  global.FunctionExit("ColorNeighbors");
2992  colorneighbors.SyncSizes();
2993 
2994  // This is enough information to fully describe the partitioning
2995  // to each partition and form the communication lists. We can
2996  // write this to file, or just pass it along in messages to
2997  // each partition.
2998 
2999  // For each color, show me the taco
3000  // produces C[BN] (border nodes for each color)
3001  Mesh::Connectivity bordercon;
3002  nodecolors.InverseDegenerate(bordercon,nparts);
3003  // Lets determine and report the communication imbalance
3004  bool report_each = false;
3005  if(report_each)
3006  if(StdOut && verblevel)
3007  *StdOut << "Nshared nodes/partition:" << std::endl;
3008  double mean_nbnodes = 0;
3009  Mesh::IndexType max_nbnodes = 0;
3010  Mesh::IndexType min_nbnodes = number_of_nodes;
3011  for(Mesh::IndexType iii = 0;iii < Nparts;iii++){
3012  Mesh::IndexType nbsize = bordercon[iii].size();
3013  if(report_each && StdOut)
3014  *StdOut << "Partition " << iii+1 << ": "
3015  << nbsize << std::endl;
3016  mean_nbnodes += static_cast<double>(nbsize);
3017  if(nbsize > max_nbnodes) max_nbnodes = nbsize;
3018  if(nbsize < min_nbnodes) min_nbnodes = nbsize;
3019  }
3020  mean_nbnodes /= static_cast<double>(nparts);
3021  if(StdOut && verblevel)
3022  *StdOut << "(Max/Min/Mean) shared nodes/partition: (" << max_nbnodes << "/"
3023  << min_nbnodes << "/" << mean_nbnodes << ")" << std::endl;
3024 
3025  Mesh::IndexVec bn_index_map;
3026  std::map<Mesh::IndexType,Mesh::IndexType> bnodemap;
3027  std::list<Mesh::IndexType> bnodes;
3028  global.FunctionEntry("Flatten");
3029  Flatten<Mesh::Connectivity,std::vector<Mesh::IndexType>,std::list<Mesh::IndexType> >
3030  (bordercon,bnodes);
3031  global.FunctionExit("Flatten");
3032  global.FunctionEntry("SortUnique");
3033  bnodes.sort();
3034  bnodes.unique();
3035  global.FunctionExit("SortUnique");
3036  Mesh::IndexType number_of_border_nodes = bnodes.size();
3037  bn_index_map.resize(number_of_border_nodes);
3038  Mesh::IndexType i = 0;
3039  std::list<Mesh::IndexType>::iterator eli = bnodes.begin();
3040  while(eli != bnodes.end()){
3041  bn_index_map[i] = *eli;
3042  bnodemap.insert(std::make_pair(*eli,i+1));
3043  eli++;
3044  i++;
3045  }
3046 
3047  global.FunctionEntry("MapBorderNodes");
3048  // C[BN] -> BN[C], but need BN to be mapped in 1-NBN range
3049  Mesh::Connectivity bordercon_mapped;
3050  MapElements<Mesh::Connectivity,Mesh::Connectivity,
3051  std::vector<Mesh::IndexType>,std::map<Mesh::IndexType,Mesh::IndexType> >
3052  (bordercon,bordercon_mapped,bnodemap);
3053  global.FunctionExit("MapBorderNodes");
3054  Mesh::Connectivity bcon_dual;
3055  bordercon_mapped.Sync();
3056  bordercon_mapped.Inverse(bcon_dual,number_of_border_nodes);
3057  // Now we have BN[C], which is a list of colors for each border
3058  // node. We need to step through and assign each border node
3059  // an actual owner. This will produce a list
3060  // of which color (only 1 of them) is the owner of a particular BN.
3061  // Or, bn[C], where bn are the subset of BN that C owns.
3062  // We do this in a way so as to equalize the number of communicated
3063  // nodes on each processors.
3064  std::srand((unsigned)std::time(0));
3065  Mesh::Connectivity owner;
3066  std::vector<Mesh::IndexType> ncomnodes(nparts,0);
3067  owner.resize(number_of_border_nodes);
3068  Mesh::Connectivity::iterator bni = bcon_dual.begin();
3069  Mesh::IndexType ownind = 0;
3070  while(bni != bcon_dual.end()){
3071  Mesh::IndexType ncolors = bni->size();
3072  Mesh::IndexType color = (std::rand()%ncolors)+1;
3073  for(Mesh::IndexType ci = 0;ci < ncolors;ci++)
3074  if(ncomnodes[(*bni)[color-1] - 1] > ncomnodes[(*bni)[ci]-1])
3075  color = ci+1;
3076  std::vector<Mesh::IndexType>::iterator bnici = bni->begin();
3077  for(Mesh::IndexType ci = 1;ci < color;ci++)
3078  bnici++;
3079  // if(bordercon[*bnici-1].size() > static_cast<Mesh::IndexType>(mean_nbnodes))
3080  // color = (std::rand()%ncolors)+1;
3081  owner[ownind++].push_back(*bnici);
3082  ncomnodes[*bnici-1]++;
3083  bni++;
3084  }
3085  // bn[C] -> C[bn]
3086  Mesh::Connectivity dual_owner;
3087  owner.Sync();
3088  owner.Inverse(dual_owner,nparts);
3089  mean_nbnodes = 0;
3090  min_nbnodes = number_of_nodes;
3091  max_nbnodes = 0;
3092  for(Mesh::IndexType iii = 0;iii < Nparts;iii++){
3093  Mesh::IndexType nbsize = dual_owner[iii].size();
3094  if(report_each)
3095  if(StdOut && verblevel)
3096  *StdOut << "Partition " << iii+1 << ": "
3097  << nbsize << std::endl;
3098  mean_nbnodes += static_cast<double>(nbsize);
3099  if(nbsize > max_nbnodes) max_nbnodes = nbsize;
3100  if(nbsize < min_nbnodes) min_nbnodes = nbsize;
3101  }
3102  mean_nbnodes /= static_cast<double>(nparts);
3103  if(StdOut && verblevel)
3104  *StdOut << "(Max/Min/Mean) recvd nodes/partition: (" << max_nbnodes << "/"
3105  << min_nbnodes << "/" << mean_nbnodes << ")" << std::endl;
3106 
3107 
3108  // Populate some information about each color
3109  std::vector<Mesh::PartInfo> ColorInfo(nparts);
3110  for(Mesh::IndexType colind = 0;colind < Nparts;colind++){
3111  ColorInfo[colind].part = colind+1; // partition id
3112  ColorInfo[colind].nelem = dual_pcon[colind].size(); // number of elems
3113  ColorInfo[colind].nborder = colorneighbors[colind].size(); // number of borders
3114  ColorInfo[colind].nnodes = colorsofnodes[colind].size(); // total num nodes
3115  ColorInfo[colind].nshared = bordercon[colind].size(); // num shared nodes
3116  ColorInfo[colind].nown = dual_owner[colind].size(); // num shared nodes owned
3117  }
3118 
3119  global.FunctionEntry("Colorization");
3120  global.FunctionEntry("Colorization 1");
3121  // First, lets loop through the nodes and populate each color's
3122  // non-shared node list.
3123  Mesh::Connectivity PartitionedNodes;
3124  PartitionedNodes.resize(Nparts);
3125  Mesh::IndexVec psize;
3126  Primitive::MultiContainer<Mesh::IndexType,Mesh::IndexType>::VecMap pglob2loc;
3127  pglob2loc.resize(Nparts);
3128  psize.resize(Nparts,0);
3129  for(Mesh::IndexType jjj = 0;jjj < number_of_nodes;jjj++){
3130  Mesh::IndexType node_id = jjj+1;
3131  if(nodecolors.Esize(node_id) == 1){ // then it's not shared
3132  Mesh::IndexType thecolor = nodecolors[node_id-1][0];
3133  PartitionedNodes[thecolor-1].push_back(node_id);
3134  psize[thecolor-1]++;
3135  pglob2loc[thecolor-1].insert(std::make_pair(node_id,psize[thecolor-1]));
3136  }
3137  }
3138  // Now lets add each color's shared nodes that it actually owns
3139  // into the node list
3140  for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3141  std::vector<Mesh::IndexType>::iterator doi = dual_owner[jjj].begin();
3142  while(doi != dual_owner[jjj].end()){
3143  Mesh::IndexType border_node_id = *doi++;
3144  Mesh::IndexType global_node_id = bn_index_map[border_node_id-1];
3145  PartitionedNodes[jjj].push_back(global_node_id);
3146  psize[jjj]++;
3147  pglob2loc[jjj].insert(std::make_pair(global_node_id,psize[jjj]));
3148  }
3149  }
3150  //
3151  // That's all we can conveniently do at this time. We need
3152  // to do the following sections of code before we can populate
3153  // the rest of the nodes, which are nodes on each color that
3154  // are actually owned by another color
3155  //
3156  global.FunctionExit("Colorization 1");
3157 
3158 
3159  global.FunctionEntry("Colorization 2");
3160  // Initial prep of border description for each color
3161  Primitive::MultiContainer<Mesh::Border>::VecVec ColorBorders;
3162  Mesh::VecMap colormap;
3163  ColorBorders.resize(nparts);
3164  colormap.resize(nparts);
3165  for(Mesh::IndexType iii = 0;iii<Nparts;iii++){
3166  ColorBorders[iii].resize(colorneighbors[iii].size());
3167  Mesh::IndexVec::iterator cni = colorneighbors[iii].begin();
3168  Mesh::IndexType colorind = 0;
3169  while(cni != colorneighbors[iii].end()){
3170  colormap[iii].insert(std::make_pair(*cni,colorind+1));
3171  ColorBorders[iii][colorind].rpart = *cni;
3172  colorind++;
3173  cni++;
3174  }
3175  }
3176  global.FunctionExit("Colorization 2");
3177 
3178  global.FunctionEntry("Colorization 3");
3179  // Populate each color's border structure
3180  // Loop through all colors
3181  for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3182  Mesh::IndexType this_color = jjj+1;
3183  // Loop thru all border nodes this color owns
3184  Mesh::IndexVec::iterator nrcvIt = dual_owner[jjj].begin();
3185  while(nrcvIt != dual_owner[jjj].end()){
3186  Mesh::IndexType border_node_id = *nrcvIt++;
3187  Mesh::IndexType global_bn_id = bn_index_map[border_node_id-1];
3188  // Loop through the colors that touch this border node
3189  Mesh::IndexVec::iterator ncIt = nodecolors[global_bn_id-1].begin();
3190  while(ncIt != nodecolors[global_bn_id-1].end()){
3191  Mesh::IndexType remote_color_id = *ncIt++;
3192  if(remote_color_id != this_color){
3193  // Add this node to the border data structure for the local color
3194  Mesh::IndexType local_border_id = colormap[jjj][remote_color_id];
3195  assert(ColorBorders[jjj][local_border_id-1].rpart == remote_color_id);
3196  ColorBorders[jjj][local_border_id-1].nrecv.push_back(global_bn_id);
3197  // Add this node to the border data structure for remote procs
3198  Mesh::IndexType remote_border_id = colormap[remote_color_id-1][this_color];
3199  assert(ColorBorders[remote_color_id-1][remote_border_id-1].rpart == this_color);
3200  ColorBorders[remote_color_id-1][remote_border_id-1].nsend.push_back(global_bn_id);
3201  }
3202  }
3203 
3204  }
3205  }
3206  global.FunctionExit("Colorization 3");
3207 
3208  // Now to continue populating the nodes for each color - we now have
3209  // naturally processed the nodes on each color that are remotely
3210  // owned, so we can conveniently add them to each color's nodelist
3211  // Loop over colors
3212  global.FunctionEntry("Colorization 4");
3213  for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3214  std::list<Mesh::IndexType> sendnodes;
3215  // Loop over each color's border
3216  for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nborder;iii++){
3217  std::vector<Mesh::IndexType>::iterator bni = ColorBorders[jjj][iii].nsend.begin();
3218  while(bni != ColorBorders[jjj][iii].nsend.end())
3219  sendnodes.push_back(*bni++);
3220  }
3221  sendnodes.sort();
3222  sendnodes.unique();
3223  std::list<Mesh::IndexType>::iterator sni = sendnodes.begin();
3224  while(sni != sendnodes.end()){
3225  Mesh::IndexType send_node_id = *sni++;
3226  PartitionedNodes[jjj].push_back(send_node_id);
3227  psize[jjj]++;
3228  pglob2loc[jjj].insert(std::make_pair(send_node_id,psize[jjj]));
3229  }
3230  }
3231  global.FunctionExit("Colorization 4");
3232  //
3233  // Now do some color validation
3234  global.FunctionEntry("Color validation");
3235  for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3236  assert(psize[jjj] == ColorInfo[jjj].nnodes);
3237  }
3238  global.FunctionExit("Color validation");
3239  global.FunctionExit("Colorization");
3240  global.FunctionEntry("Writing Partitions");
3241  // Loop through all colors
3242  global.FunctionEntry("Read Coords");
3244  Inf.open(MeshName.c_str());
3245  if(!Inf){
3246  if(ErrOut)
3247  *ErrOut << "Failed to open " << MeshName << std::endl;
3248  return(1);
3249  }
3250  Inf >> nc;
3251  Inf.close();
3252  global.FunctionExit("Read Coords");
3253  std::vector<std::string> nodal_props(number_of_nodes);
3254  std::vector<std::string> elemental_props(number_of_elements);
3255  bool mesh_has_properties = false;
3256  std::string propname = MeshBaseName + ".prop";
3257  Inf.open(propname.c_str());
3258  if(Inf){
3259  mesh_has_properties = true;
3260  global.FunctionEntry("Read Properties");
3261  for(unsigned int propcount = 0;propcount < number_of_nodes;propcount++)
3262  std::getline(Inf,nodal_props[propcount]);
3263  std::getline(Inf,elemental_props[0]);
3264  for(unsigned int propcount = 0;propcount < number_of_elements;propcount++)
3265  std::getline(Inf,elemental_props[propcount]);
3266  Inf.close();
3267  global.FunctionExit("Read Properties");
3268  }
3269  for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3270  std::ostringstream Ostr;
3271  Ostr << MeshBaseName << "." << jjj+1 << ".info";
3272  std::ofstream OutFile;
3273  std::ofstream OutProp;
3274  OutFile.open(Ostr.str().c_str());
3275  if(mesh_has_properties){
3276  Ostr.str("");
3277  Ostr << MeshBaseName << "." << jjj+1 << ".prop";
3278  OutProp.open(Ostr.str().c_str());
3279  }
3280  OutFile << Nparts << std::endl << jjj+1 << std::endl
3281  << ColorInfo[jjj].nelem << std::endl
3282  << ColorInfo[jjj].nnodes << std::endl
3283  << ColorInfo[jjj].nborder << std::endl
3284  << ColorInfo[jjj].nshared << std::endl
3285  << ColorInfo[jjj].nown << std::endl;
3286  OutFile.close();
3287  Ostr.str("");
3288  Ostr << MeshBaseName << "." << jjj+1 << ".pmesh";
3289  OutFile.open(Ostr.str().c_str());
3290  // Write the nodal coordinates
3291  OutFile << ColorInfo[jjj].nnodes << std::endl;
3292  for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nnodes;iii++){
3293  Mesh::IndexType node_id = PartitionedNodes[jjj][iii];
3294  OutFile << nc.x(node_id) << " " << nc.y(node_id) << " "
3295  << nc.z(node_id) << std::endl;
3296  if(OutProp)
3297  OutProp << nodal_props[node_id-1] << std::endl;
3298  }
3299  if(OutProp)
3300  OutProp << std::endl;
3301  // Write the element connectivity
3302  OutFile << ColorInfo[jjj].nelem << std::endl;
3303  for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nelem;iii++){
3304  Mesh::IndexType eid = dual_pcon[jjj][iii];
3305  std::vector<Mesh::IndexType>::iterator eni = econ[eid-1].begin();
3306  while(eni != econ[eid-1].end()){
3307  OutFile << pglob2loc[jjj][*eni];
3308  if(eni != econ[eid-1].end())
3309  OutFile << " ";
3310  eni++;
3311  }
3312  OutFile << std::endl;
3313  if(OutProp)
3314  OutProp << elemental_props[eid-1] << std::endl;
3315  }
3316  if(OutProp)
3317  OutProp.close();
3318  // Write the borders
3319  OutFile << ColorInfo[jjj].nborder << std::endl;
3320  for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nborder;iii++){
3321  OutFile << ColorBorders[jjj][iii].rpart << " "
3322  << ColorBorders[jjj][iii].nrecv.size() << " "
3323  << ColorBorders[jjj][iii].nsend.size() << std::endl;
3324  std::vector<Mesh::IndexType>::iterator rni = ColorBorders[jjj][iii].nrecv.begin();
3325  while(rni != ColorBorders[jjj][iii].nrecv.end()){
3326  OutFile << pglob2loc[jjj][*rni] << " " << *rni << std::endl;
3327  rni++;
3328  }
3329  rni = ColorBorders[jjj][iii].nsend.begin();
3330  while(rni != ColorBorders[jjj][iii].nsend.end()){
3331  OutFile << pglob2loc[jjj][*rni] << " " << *rni << std::endl;
3332  rni++;
3333  }
3334  }
3335  OutFile.close();
3336  }
3337  global.FunctionExit("Writing Partitions");
3338  global.FunctionExit("Partitioning");
3339  }
3340 
3341 
3342 
3343 
3344 
3345 
3346  //
3347  // Renumbering test for elements
3348  //
3349 // bool do_partition_elements = false;
3350 // if(do_partition_elements){
3351 // if(nproc > 1){
3352 // if(ErrOut)
3353 // *ErrOut << "Error: partitioning is for serial runs." << std::endl;
3354 // comm.SetExit(1);
3355 // }
3356 // if(comm.Check())
3357 // return(1);
3358 // global.FunctionEntry("Partitioning");
3359 // std::ifstream Inf;
3360 // std::string MeshName(infiles[0]);
3361 // Inf.open(MeshName.c_str());
3362 // if(!Inf){
3363 // if(ErrOut)
3364 // *ErrOut << "Failed to open " << MeshName << std::endl;
3365 // return(1);
3366 // }
3367 // global.FunctionEntry("Read Mesh");
3368 // Mesh::IndexType number_of_nodes = 0;
3369 // if(true){
3370 // Mesh::NodalCoordinates nc;
3371 // Inf >> nc;
3372 // number_of_nodes = nc.Size();
3373 // nc.destroy();
3374 // }
3375 // if(StdOut && verblevel)
3376 // *StdOut << "Number of nodes: " << number_of_nodes << std::endl;
3377 // Mesh::Connectivity econ;
3378 // Inf >> econ;
3379 // global.FunctionExit("Read Mesh");
3380 // Inf.close();
3381 // econ.ShrinkWrap();
3382 // Mesh::IndexType number_of_elements = econ.Nelem();
3383 // if(StdOut && verblevel)
3384 // *StdOut << "Number of elements: " << number_of_elements
3385 // << std::endl;
3386 // Mesh::IndexType N = npartitions;
3387 // econ.SyncSizes();
3388 // // re-orient the elements if needed
3389 // if(do_reorient){ // don't do this for 2d geometry (yet)
3390 // Mesh::NodalCoordinates nc;
3391 // Inf.open(MeshName.c_str());
3392 // if(!Inf){
3393 // if(ErrOut)
3394 // *ErrOut << "Failed to open " << MeshName << std::endl;
3395 // return(1);
3396 // }
3397 // global.FunctionEntry("ReadCoords");
3398 // Inf >> nc;
3399 // Inf.close();
3400 // global.FunctionExit("ReadCoords");
3401 // global.FunctionEntry("Element Reorientation");
3402 // for(Mesh::IndexType element_being_processed = 1;
3403 // element_being_processed <= number_of_elements;
3404 // element_being_processed++)
3405 // {
3406 // Mesh::IndexType index = element_being_processed - 1;
3407 // Mesh::IndexType size_of_element = econ.Esize(element_being_processed);
3408 // Mesh::GenericElement ge(size_of_element);
3409 // if(ge.Inverted(econ[index],nc))
3410 // ge.ReOrient(econ[index]);
3411 // }
3412 // global.FunctionExit("Element Reorientation");
3413 // }
3414 // // Mesh::IndexType N2 = 0;
3415 // // if(infiles.size() > 1){
3416 // // std::istringstream Istr(infiles[1]);
3417 // // Istr >> N;
3418 // // }
3419 // // if(infiles.size() > 2){
3420 // // std::istringstream Istr(infiles[2]);
3421 // // Istr >> N2;
3422 // // }
3423 // Mesh::Connectivity dc;
3424 // global.FunctionEntry("DualCon");
3425 // econ.Inverse(dc,number_of_nodes);
3426 // global.FunctionExit("DualCon");
3427 // dc.ShrinkWrap();
3428 // Mesh::Connectivity nl2;
3429 // // Neighborhood populates an array of neighbors
3430 // // for each element. A neighbor is any element
3431 // // that shares a node with a given element.
3432 // global.FunctionEntry("Neighborhood2");
3433 // econ.GetNeighborhood(nl2,dc);
3434 // global.FunctionExit("Neighborhood2");
3435 // nl2.ShrinkWrap();
3436 // // -------------- Partitioning --------------------
3437 // std::vector<idxtype> adj;
3438 // std::vector<idxtype> xadj;
3439 // global.FunctionEntry("Container2CSR");
3440 // Mesh::Container2CSR< std::vector< std::vector<Mesh::IndexType> >,
3441 // std::vector<Mesh::IndexType>,
3442 // std::vector<idxtype>,idxtype>
3443 // (xadj,adj,nl2);
3444 // global.FunctionExit("Container2CSR");
3445 // // nl2.destroy();
3446 // // dc.destroy();
3447 // // econ.destroy();
3448 // std::vector<idxtype> order(number_of_elements,0);
3449 // std::vector<idxtype> sizes(2,0);
3450 // idxtype nelem = static_cast<idxtype>(number_of_elements);
3451 // idxtype wgtflag = 0;
3452 // idxtype numflag = 0;
3453 // idxtype nparts = N;
3454 // idxtype options[5];
3455 // idxtype edgecuts = 0;
3456 // options[0] = 1;
3457 // options[1] = 3;
3458 // options[2] = 15;
3459 // std::vector<idxtype> vtxdist(2*Npartitions,0);
3460 // vtxdist[1] = number_of_elements;
3461 // std::vector<idxtype> part(number_of_elements,0);
3462 // global.FunctionEntry("Metis");
3463 // int comm = MPI_COMM_WORLD;
3464 // int ncon = 0;
3465 // float stupid = 0;
3466 // float ubvecstupid = 0;
3467 // if(array_checking){
3468 // *LogOut << "Vtxdist:" << std::endl;
3469 // DumpContents(*LogOut,vtxdist," ");
3470 // *LogOut << std::endl << "Xadj:" << std::endl;
3471 // DumpContents(*LogOut,xadj," ");
3472 // *LogOut << std::endl << "Adj:" << std::endl;
3473 // DumpContents(*LogOut,adj," ");
3474 // *LogOut << std::endl;
3475 // }
3476 // global.FunctionEntry("Renumbering");
3477 // ParMETIS_V3_NodeND(&vtxdist[0],&xadj[0],&adj[0],&numflag,
3478 // options,&order[0],&sizes[0],&comm);
3479 // global.FunctionExit("Renumbering");
3480 // if(array_checking){
3481 // *LogOut << "Order:" << std::endl;
3482 // DumpContents(*LogOut,order," ");
3483 // *LogOut << std::endl << "Sizes:" << std::endl;
3484 // DumpContents(*LogOut,sizes," ");
3485 // *LogOut << std::endl;
3486 // }
3487 // ParMETIS_V3_PartKway(&vtxdist[0],&xadj[0],&adj[0],NULL,NULL,&wgtflag,
3488 // &numflag,NULL,&nparts,NULL,NULL,
3489 // options,&edgecuts,&part[0],&comm);
3490 // // METIS_PartGraphKway(&vtxdist[0],&xadj[0],&adj[0],NULL,NULL,&wgtflag,
3491 // // &numflag,&nparts,options,&edgecuts,&part[0],&comm);
3492 // global.FunctionExit("Metis");
3493 // // ------------------------------------------------
3494 // // Take part and create the 1-shifted version into
3495 // // a multicontainer
3496 // Mesh::Connectivity pcon;
3497 // pcon.resize(number_of_elements);
3498 // for(Mesh::IndexType i = 0;i < number_of_elements;i++)
3499 // pcon[i].push_back(part[i]+1);
3500 // pcon.Sync();
3501 // pcon.SyncSizes();
3502 // // std::ofstream Ouf;
3503 // // Ouf.open("part");
3504 // // Ouf << pcon << std::endl;
3505 // // Ouf.close();
3506 // if(StdOut && verblevel)
3507 // *StdOut << "Number of edgecuts: " << edgecuts << std::endl;
3508 // if(false){
3509 // if(StdOut && verblevel)
3510 // *StdOut << "Partition Table: ";
3511 // DumpContents(*StdOut,part," ");
3512 // if(StdOut && verblevel)
3513 // *StdOut << std::endl;
3514 // }
3515 // part.resize(0);
3516 // part.swap(part);
3517 // Mesh::IndexType Nparts = nparts;
3518 
3519 // // Invert the partition table
3520 // global.FunctionEntry("Dual Pcon");
3521 // Mesh::Connectivity dual_pcon; // E[C] -> C[E]
3522 // pcon.Inverse(dual_pcon,nparts);
3523 // global.FunctionExit("Dual Pcon");
3524 // dual_pcon.SyncSizes();
3525 
3526 // global.FunctionEntry("GetAdj nodecolors");
3527 // Mesh::Connectivity nodecolors; // N[E]%E[C] = N[C]
3528 // dc.GetAdjacent(nodecolors,pcon,nparts,true);
3529 // nodecolors.SyncSizes();
3530 // global.FunctionExit("GetAdj nodecolors");
3531 
3532 // global.FunctionEntry("Inverse colornodes");
3533 // Mesh::Connectivity colorsofnodes; // N[C] -> C[N]
3534 // nodecolors.Inverse(colorsofnodes,nparts);
3535 // global.FunctionExit("Inverse colornodes");
3536 // colorsofnodes.SyncSizes();
3537 
3538 // global.FunctionEntry("ColorNeighbors");
3539 // Mesh::Connectivity colorneighbors; // C[N]%N[C] = C[C]
3540 // colorsofnodes.GetNeighborhood(colorneighbors,nodecolors,true,true);
3541 // global.FunctionExit("ColorNeighbors");
3542 // colorneighbors.SyncSizes();
3543 
3544 // // This is enough information to fully describe the partitioning
3545 // // to each partition and form the communication lists. We can
3546 // // write this to file, or just pass it along in messages to
3547 // // each partition.
3548 
3549 // // For each color, show me the taco
3550 // // produces C[BN] (border nodes for each color)
3551 // Mesh::Connectivity bordercon;
3552 // nodecolors.InverseDegenerate(bordercon,nparts);
3553 // // Lets determine and report the communication imbalance
3554 // bool report_each = false;
3555 // if(report_each)
3556 // if(StdOut && verblevel)
3557 // *StdOut << "Nshared nodes/partition:" << std::endl;
3558 // double mean_nbnodes = 0;
3559 // Mesh::IndexType max_nbnodes = 0;
3560 // Mesh::IndexType min_nbnodes = number_of_nodes;
3561 // for(Mesh::IndexType iii = 0;iii < Nparts;iii++){
3562 // Mesh::IndexType nbsize = bordercon[iii].size();
3563 // if(report_each && StdOut)
3564 // *StdOut << "Partition " << iii+1 << ": "
3565 // << nbsize << std::endl;
3566 // mean_nbnodes += static_cast<double>(nbsize);
3567 // if(nbsize > max_nbnodes) max_nbnodes = nbsize;
3568 // if(nbsize < min_nbnodes) min_nbnodes = nbsize;
3569 // }
3570 // mean_nbnodes /= static_cast<double>(nparts);
3571 // if(StdOut && verblevel)
3572 // *StdOut << "(Max/Min/Mean) shared nodes/partition: (" << max_nbnodes << "/"
3573 // << min_nbnodes << "/" << mean_nbnodes << ")" << std::endl;
3574 
3575 // Mesh::IndexVec bn_index_map;
3576 // std::map<Mesh::IndexType,Mesh::IndexType> bnodemap;
3577 // std::list<Mesh::IndexType> bnodes;
3578 // global.FunctionEntry("Flatten");
3579 // Flatten<Mesh::Connectivity,std::vector<Mesh::IndexType>,std::list<Mesh::IndexType> >
3580 // (bordercon,bnodes);
3581 // global.FunctionExit("Flatten");
3582 // global.FunctionEntry("SortUnique");
3583 // bnodes.sort();
3584 // bnodes.unique();
3585 // global.FunctionExit("SortUnique");
3586 // Mesh::IndexType number_of_border_nodes = bnodes.size();
3587 // bn_index_map.resize(number_of_border_nodes);
3588 // Mesh::IndexType i = 0;
3589 // std::list<Mesh::IndexType>::iterator eli = bnodes.begin();
3590 // while(eli != bnodes.end()){
3591 // bn_index_map[i] = *eli;
3592 // bnodemap.insert(std::make_pair(*eli,i+1));
3593 // eli++;
3594 // i++;
3595 // }
3596 
3597 // global.FunctionEntry("MapBorderNodes");
3598 // // C[BN] -> BN[C], but need BN to be mapped in 1-NBN range
3599 // Mesh::Connectivity bordercon_mapped;
3600 // MapElements<Mesh::Connectivity,Mesh::Connectivity,
3601 // std::vector<Mesh::IndexType>,std::map<Mesh::IndexType,Mesh::IndexType> >
3602 // (bordercon,bordercon_mapped,bnodemap);
3603 // global.FunctionExit("MapBorderNodes");
3604 // Mesh::Connectivity bcon_dual;
3605 // bordercon_mapped.Sync();
3606 // bordercon_mapped.Inverse(bcon_dual,number_of_border_nodes);
3607 // // Now we have BN[C], which is a list of colors for each border
3608 // // node. We need to step through and assign each border node
3609 // // an actual owner. This will produce a list
3610 // // of which color (only 1 of them) is the owner of a particular BN.
3611 // // Or, bn[C], where bn are the subset of BN that C owns.
3612 // // We do this in a way so as to equalize the number of communicated
3613 // // nodes on each processors.
3614 // std::srand((unsigned)std::time(0));
3615 // Mesh::Connectivity owner;
3616 // std::vector<Mesh::IndexType> ncomnodes(nparts,0);
3617 // owner.resize(number_of_border_nodes);
3618 // Mesh::Connectivity::iterator bni = bcon_dual.begin();
3619 // Mesh::IndexType ownind = 0;
3620 // while(bni != bcon_dual.end()){
3621 // Mesh::IndexType ncolors = bni->size();
3622 // Mesh::IndexType color = (std::rand()%ncolors)+1;
3623 // for(Mesh::IndexType ci = 0;ci < ncolors;ci++)
3624 // if(ncomnodes[(*bni)[color-1] - 1] > ncomnodes[(*bni)[ci]-1])
3625 // color = ci+1;
3626 // std::vector<Mesh::IndexType>::iterator bnici = bni->begin();
3627 // for(Mesh::IndexType ci = 1;ci < color;ci++)
3628 // bnici++;
3629 // // if(bordercon[*bnici-1].size() > static_cast<Mesh::IndexType>(mean_nbnodes))
3630 // // color = (std::rand()%ncolors)+1;
3631 // owner[ownind++].push_back(*bnici);
3632 // ncomnodes[*bnici-1]++;
3633 // bni++;
3634 // }
3635 // // bn[C] -> C[bn]
3636 // Mesh::Connectivity dual_owner;
3637 // owner.Sync();
3638 // owner.Inverse(dual_owner,nparts);
3639 // mean_nbnodes = 0;
3640 // min_nbnodes = number_of_nodes;
3641 // max_nbnodes = 0;
3642 // for(Mesh::IndexType iii = 0;iii < Nparts;iii++){
3643 // Mesh::IndexType nbsize = dual_owner[iii].size();
3644 // if(report_each)
3645 // if(StdOut && verblevel)
3646 // *StdOut << "Partition " << iii+1 << ": "
3647 // << nbsize << std::endl;
3648 // mean_nbnodes += static_cast<double>(nbsize);
3649 // if(nbsize > max_nbnodes) max_nbnodes = nbsize;
3650 // if(nbsize < min_nbnodes) min_nbnodes = nbsize;
3651 // }
3652 // mean_nbnodes /= static_cast<double>(nparts);
3653 // if(StdOut && verblevel)
3654 // *StdOut << "(Max/Min/Mean) recvd nodes/partition: (" << max_nbnodes << "/"
3655 // << min_nbnodes << "/" << mean_nbnodes << ")" << std::endl;
3656 
3657 
3658 // // Populate some information about each color
3659 // std::vector<Mesh::PartInfo> ColorInfo(nparts);
3660 // for(Mesh::IndexType colind = 0;colind < Nparts;colind++){
3661 // ColorInfo[colind].part = colind+1; // partition id
3662 // ColorInfo[colind].nelem = dual_pcon[colind].size(); // number of elems
3663 // ColorInfo[colind].nborder = colorneighbors[colind].size(); // number of borders
3664 // ColorInfo[colind].nnodes = colorsofnodes[colind].size(); // total num nodes
3665 // ColorInfo[colind].nshared = bordercon[colind].size(); // num shared nodes
3666 // ColorInfo[colind].nown = dual_owner[colind].size(); // num shared nodes owned
3667 // }
3668 
3669 // global.FunctionEntry("Colorization");
3670 // global.FunctionEntry("Colorization 1");
3671 // // First, lets loop through the nodes and populate each color's
3672 // // non-shared node list.
3673 // Mesh::Connectivity PartitionedNodes;
3674 // PartitionedNodes.resize(Nparts);
3675 // Mesh::IndexVec psize;
3676 // Primitive::MultiContainer<Mesh::IndexType,Mesh::IndexType>::VecMap pglob2loc;
3677 // pglob2loc.resize(Nparts);
3678 // psize.resize(Nparts,0);
3679 // for(Mesh::IndexType jjj = 0;jjj < number_of_nodes;jjj++){
3680 // Mesh::IndexType node_id = jjj+1;
3681 // if(nodecolors.Esize(node_id) == 1){ // then it's not shared
3682 // Mesh::IndexType thecolor = nodecolors[node_id-1][0];
3683 // PartitionedNodes[thecolor-1].push_back(node_id);
3684 // psize[thecolor-1]++;
3685 // pglob2loc[thecolor-1].insert(std::make_pair(node_id,psize[thecolor-1]));
3686 // }
3687 // }
3688 // // Now lets add each color's shared nodes that it actually owns
3689 // // into the node list
3690 // for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3691 // std::vector<Mesh::IndexType>::iterator doi = dual_owner[jjj].begin();
3692 // while(doi != dual_owner[jjj].end()){
3693 // Mesh::IndexType border_node_id = *doi++;
3694 // Mesh::IndexType global_node_id = bn_index_map[border_node_id-1];
3695 // PartitionedNodes[jjj].push_back(global_node_id);
3696 // psize[jjj]++;
3697 // pglob2loc[jjj].insert(std::make_pair(global_node_id,psize[jjj]));
3698 // }
3699 // }
3700 // //
3701 // // That's all we can conveniently do at this time. We need
3702 // // to do the following sections of code before we can populate
3703 // // the rest of the nodes, which are nodes on each color that
3704 // // are actually owned by another color
3705 // //
3706 // global.FunctionExit("Colorization 1");
3707 
3708 
3709 // global.FunctionEntry("Colorization 2");
3710 // // Initial prep of border description for each color
3711 // Primitive::MultiContainer<Mesh::Border>::VecVec ColorBorders;
3712 // Mesh::VecMap colormap;
3713 // ColorBorders.resize(nparts);
3714 // colormap.resize(nparts);
3715 // for(Mesh::IndexType iii = 0;iii<Nparts;iii++){
3716 // ColorBorders[iii].resize(colorneighbors[iii].size());
3717 // Mesh::IndexVec::iterator cni = colorneighbors[iii].begin();
3718 // Mesh::IndexType colorind = 0;
3719 // while(cni != colorneighbors[iii].end()){
3720 // colormap[iii].insert(std::make_pair(*cni,colorind+1));
3721 // ColorBorders[iii][colorind].rpart = *cni;
3722 // colorind++;
3723 // cni++;
3724 // }
3725 // }
3726 // global.FunctionExit("Colorization 2");
3727 
3728 // global.FunctionEntry("Colorization 3");
3729 // // Populate each color's border structure
3730 // // Loop through all colors
3731 // for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3732 // Mesh::IndexType this_color = jjj+1;
3733 // // Loop thru all border nodes this color owns
3734 // Mesh::IndexVec::iterator nrcvIt = dual_owner[jjj].begin();
3735 // while(nrcvIt != dual_owner[jjj].end()){
3736 // Mesh::IndexType border_node_id = *nrcvIt++;
3737 // Mesh::IndexType global_bn_id = bn_index_map[border_node_id-1];
3738 // // Loop through the colors that touch this border node
3739 // Mesh::IndexVec::iterator ncIt = nodecolors[global_bn_id-1].begin();
3740 // while(ncIt != nodecolors[global_bn_id-1].end()){
3741 // Mesh::IndexType remote_color_id = *ncIt++;
3742 // if(remote_color_id != this_color){
3743 // // Add this node to the border data structure for the local color
3744 // Mesh::IndexType local_border_id = colormap[jjj][remote_color_id];
3745 // assert(ColorBorders[jjj][local_border_id-1].rpart == remote_color_id);
3746 // ColorBorders[jjj][local_border_id-1].nrecv.push_back(global_bn_id);
3747 // // Add this node to the border data structure for remote procs
3748 // Mesh::IndexType remote_border_id = colormap[remote_color_id-1][this_color];
3749 // assert(ColorBorders[remote_color_id-1][remote_border_id-1].rpart == this_color);
3750 // ColorBorders[remote_color_id-1][remote_border_id-1].nsend.push_back(global_bn_id);
3751 // }
3752 // }
3753 
3754 // }
3755 // }
3756 // global.FunctionExit("Colorization 3");
3757 
3758 // // Now to continue populating the nodes for each color - we now have
3759 // // naturally processed the nodes on each color that are remotely
3760 // // owned, so we can conveniently add them to each color's nodelist
3761 // // Loop over colors
3762 // global.FunctionEntry("Colorization 4");
3763 // for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3764 // std::list<Mesh::IndexType> sendnodes;
3765 // // Loop over each color's border
3766 // for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nborder;iii++){
3767 // std::vector<Mesh::IndexType>::iterator bni = ColorBorders[jjj][iii].nsend.begin();
3768 // while(bni != ColorBorders[jjj][iii].nsend.end())
3769 // sendnodes.push_back(*bni++);
3770 // }
3771 // sendnodes.sort();
3772 // sendnodes.unique();
3773 // std::list<Mesh::IndexType>::iterator sni = sendnodes.begin();
3774 // while(sni != sendnodes.end()){
3775 // Mesh::IndexType send_node_id = *sni++;
3776 // PartitionedNodes[jjj].push_back(send_node_id);
3777 // psize[jjj]++;
3778 // pglob2loc[jjj].insert(std::make_pair(send_node_id,psize[jjj]));
3779 // }
3780 // }
3781 // global.FunctionExit("Colorization 4");
3782 // //
3783 // // Now do some color validation
3784 // global.FunctionEntry("Color validation");
3785 // for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3786 // assert(psize[jjj] == ColorInfo[jjj].nnodes);
3787 // }
3788 // global.FunctionExit("Color validation");
3789 // global.FunctionExit("Colorization");
3790 // global.FunctionEntry("Writing Partitions");
3791 // // Loop through all colors
3792 // global.FunctionEntry("Read Coords");
3793 // Mesh::NodalCoordinates nc;
3794 // Inf.open(MeshName.c_str());
3795 // if(!Inf){
3796 // if(ErrOut)
3797 // *ErrOut << "Failed to open " << MeshName << std::endl;
3798 // return(1);
3799 // }
3800 // Inf >> nc;
3801 // Inf.close();
3802 // global.FunctionExit("Read Coords");
3803 // for(Mesh::IndexType jjj = 0;jjj < Nparts;jjj++){
3804 // std::ostringstream Ostr;
3805 // Ostr << MeshName << "." << jjj+1 << ".info";
3806 // std::ofstream OutFile;
3807 // OutFile.open(Ostr.str().c_str());
3808 // OutFile << ColorInfo[jjj].nelem << std::endl
3809 // << ColorInfo[jjj].nnodes << std::endl
3810 // << ColorInfo[jjj].nborder << std::endl
3811 // << ColorInfo[jjj].nshared << std::endl
3812 // << ColorInfo[jjj].nown << std::endl;
3813 // OutFile.close();
3814 // Ostr.str("");
3815 // Ostr << MeshName << "." << jjj+1 << ".pmesh";
3816 // OutFile.open(Ostr.str().c_str());
3817 // // Write the nodal coordinates
3818 // OutFile << ColorInfo[jjj].nnodes << std::endl;
3819 // for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nnodes;iii++){
3820 // Mesh::IndexType node_id = PartitionedNodes[jjj][iii];
3821 // OutFile << nc.x(node_id) << " " << nc.y(node_id) << " "
3822 // << nc.z(node_id) << std::endl;
3823 // }
3824 // // Write the element connectivity
3825 // OutFile << ColorInfo[jjj].nelem << std::endl;
3826 // for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nelem;iii++){
3827 // Mesh::IndexType eid = dual_pcon[jjj][iii];
3828 // std::vector<Mesh::IndexType>::iterator eni = econ[eid-1].begin();
3829 // while(eni != econ[eid-1].end()){
3830 // OutFile << pglob2loc[jjj][*eni];
3831 // if(eni != econ[eid-1].end())
3832 // OutFile << " ";
3833 // eni++;
3834 // }
3835 // OutFile << std::endl;
3836 // }
3837 // // Write the borders
3838 // OutFile << ColorInfo[jjj].nborder << std::endl;
3839 // for(Mesh::IndexType iii = 0;iii < ColorInfo[jjj].nborder;iii++){
3840 // OutFile << ColorBorders[jjj][iii].rpart << " "
3841 // << ColorBorders[jjj][iii].nrecv.size() << " "
3842 // << ColorBorders[jjj][iii].nsend.size() << std::endl;
3843 // std::vector<Mesh::IndexType>::iterator rni = ColorBorders[jjj][iii].nrecv.begin();
3844 // while(rni != ColorBorders[jjj][iii].nrecv.end()){
3845 // OutFile << pglob2loc[jjj][*rni] << " " << *rni << std::endl;
3846 // rni++;
3847 // }
3848 // rni = ColorBorders[jjj][iii].nsend.begin();
3849 // while(rni != ColorBorders[jjj][iii].nsend.end()){
3850 // OutFile << pglob2loc[jjj][*rni] << " " << *rni << std::endl;
3851 // rni++;
3852 // }
3853 // }
3854 // OutFile.close();
3855 // }
3856 // global.FunctionExit("Writing Partitions");
3857 // global.FunctionExit("Partitioning");
3858 // }
3859  // bool do_partitioning = true;
3860  if(do_clone){
3861  if(nproc > 1){
3862  if(ErrOut)
3863  *ErrOut << "Error: Cloning is a serial operation." << std::endl;
3864  comm.SetExit(1);
3865  }
3866  if(infiles.empty()){
3867  if(ErrOut)
3868  *ErrOut << "Error: Nothing to clone." << std::endl;
3869  comm.SetExit(1);
3870  }
3871  if(comm.Check())
3872  return(1);
3873  global.FunctionEntry("Cloning");
3874  std::ifstream Inf;
3875  std::string MeshName(infiles[0]);
3876  unsigned int N = 0;
3877  if(infiles.size() > 1){
3878  std::istringstream Instr(infiles[1]);
3879  Instr >> N;
3880  }
3881  if(N > 0)
3882  npartitions = N;
3883  if(StdOut && verblevel)
3884  *StdOut << "Cloning " << MeshName << " into " << npartitions << " partitions."
3885  << std::endl;
3886  Inf.open(MeshName.c_str());
3887  if(!Inf){
3888  if(ErrOut)
3889  *ErrOut << "Failed to open " << MeshName << std::endl;
3890  return(1);
3891  }
3892  global.FunctionEntry("Read Mesh");
3893  Mesh::IndexType number_of_nodes = 0;
3895  Inf >> nc;
3896  number_of_nodes = nc.Size();
3897  std::vector<double> bounds(6,0.0);
3898  Mesh::GetCoordinateBounds(nc,bounds);
3899  if(StdOut && verblevel)
3900  *StdOut << "Number of nodes: " << number_of_nodes << std::endl
3901  << "Mesh Bounds: (" << bounds[0] << "," << bounds[1]
3902  << ") (" << bounds[2] << "," << bounds[3]
3903  << ") (" << bounds[4] << "," << bounds[5] << ")" << std::endl;
3904  double Xmin = bounds[0];
3905  double Xmax = bounds[1];
3906  Mesh::Connectivity econ;
3907  Inf >> econ;
3908  Inf.close();
3909  econ.ShrinkWrap();
3910  econ.SyncSizes();
3911  global.FunctionExit("Read Mesh");
3912  Mesh::IndexType number_of_elements = econ.Nelem();
3913  if(StdOut && verblevel)
3914  *StdOut << "Number of elements: " << number_of_elements
3915  << std::endl;
3916  global.FunctionEntry("Forming Clones");
3917  // Identify the boundary nodes
3918  std::vector<Mesh::IndexType> left_owned;
3919  std::vector<Mesh::IndexType> left_remote;
3920  std::vector<Mesh::IndexType> right_owned;
3921  std::vector<Mesh::IndexType> right_remote;
3922  std::vector<Mesh::IndexType> non_boundary;
3923  bool left_polarity = true;
3924  bool right_polarity = false;
3925  for(Mesh::IndexType nn = 1;nn <= number_of_nodes;nn++){
3926  if(nc.x(nn) == Xmin){
3927  if(left_polarity){
3928  left_owned.push_back(nn);
3929  left_polarity = false;
3930  }
3931  else{
3932  left_remote.push_back(nn);
3933  left_polarity = true;
3934  }
3935  }
3936  else if(nc.x(nn) == Xmax){
3937  if(right_polarity){
3938  right_owned.push_back(nn);
3939  right_polarity = false;
3940  }
3941  else{
3942  right_remote.push_back(nn);
3943  right_polarity = true;
3944  }
3945  }
3946  else
3947  non_boundary.push_back(nn);
3948  }
3949  Mesh::IndexType nnbn = non_boundary.size();
3950  Mesh::IndexType nleft = left_owned.size() + left_remote.size();
3951  Mesh::IndexType nright = right_owned.size() + right_remote.size();
3952  Mesh::IndexType nshared_nodes = nleft + nright;
3953  N = npartitions;
3954  Mesh::IndexType nown_left = left_owned.size();
3955  Mesh::IndexType nown_right = right_owned.size();
3956  if(StdOut && verblevel)
3957  *StdOut << "Number of internal nodes/partition: " << nnbn << std::endl
3958  << "Number of shared nodes/partition: " << nshared_nodes << std::endl;
3959  // Map the nodes to new id's based on the partitioning
3960  global.FunctionEntry("Mapping Clone Genes");
3961  std::vector<Mesh::IndexType> NodeMap(number_of_nodes,0);
3962  std::vector<Mesh::IndexType>::iterator ni = non_boundary.begin();
3963  Mesh::IndexType node_id = 1;
3964  while(ni != non_boundary.end())
3965  NodeMap[*ni++ - 1] = node_id++;
3966  ni = left_owned.begin();
3967  while(ni != left_owned.end())
3968  NodeMap[*ni++ - 1] = node_id++;
3969  ni = right_owned.begin();
3970  while(ni != right_owned.end())
3971  NodeMap[*ni++ - 1] = node_id++;
3972  ni = left_remote.begin();
3973  while(ni != left_remote.end())
3974  NodeMap[*ni++ - 1] = node_id++;
3975  ni = right_remote.begin();
3976  while(ni != right_remote.end())
3977  NodeMap[*ni++ - 1] = node_id++;
3978  node_id--;
3979  assert(node_id == number_of_nodes);
3980  global.FunctionExit("Mapping Clone Genes");
3981  global.FunctionExit("Forming Clones");
3982  global.FunctionEntry("Writing Clones");
3983  for(Mesh::IndexType jjj = 0;jjj < N;jjj++){
3984  std::ostringstream Ostr;
3985  Ostr << MeshName << "." << jjj+1 << ".info";
3986  std::ofstream OutFile;
3987  OutFile.open(Ostr.str().c_str());
3988  OutFile << N << std::endl << jjj+1 << std::endl
3989  << number_of_elements << std::endl
3990  << number_of_nodes << std::endl
3991  << 2 << std::endl
3992  << nshared_nodes << std::endl
3993  << nown_left+nown_right << std::endl;
3994  OutFile.close();
3995  Ostr.str("");
3996  Ostr << MeshName << "." << jjj+1 << ".pmesh";
3997  OutFile.open(Ostr.str().c_str());
3998  // Write the nodal coordinates
3999  OutFile << number_of_nodes << std::endl;
4000  std::vector<Mesh::IndexType>::iterator nbni = non_boundary.begin();
4001  while(nbni != non_boundary.end()){
4002  OutFile << nc.x(*nbni) << " " << nc.y(*nbni) << " "
4003  << nc.z(*nbni) << std::endl;
4004  nbni++;
4005  }
4006  nbni = left_owned.begin();
4007  while(nbni != left_owned.end()){
4008  OutFile << nc.x(*nbni) << " " << nc.y(*nbni) << " "
4009  << nc.z(*nbni) << std::endl;
4010  nbni++;
4011  }
4012  nbni = right_owned.begin();
4013  while(nbni != right_owned.end()){
4014  OutFile << nc.x(*nbni) << " " << nc.y(*nbni) << " "
4015  << nc.z(*nbni) << std::endl;
4016  nbni++;
4017  }
4018  nbni = left_remote.begin();
4019  while(nbni != left_remote.end()){
4020  OutFile << nc.x(*nbni) << " " << nc.y(*nbni) << " "
4021  << nc.z(*nbni) << std::endl;
4022  nbni++;
4023  }
4024  nbni = right_remote.begin();
4025  while(nbni != right_remote.end()){
4026  OutFile << nc.x(*nbni) << " " << nc.y(*nbni) << " "
4027  << nc.z(*nbni) << std::endl;
4028  nbni++;
4029  }
4030  // Write the element connectivity
4031  OutFile << number_of_elements << std::endl;
4032  for(Mesh::IndexType iii = 0;iii < number_of_elements;iii++){
4033  std::vector<Mesh::IndexType>::iterator eni = econ[iii].begin();
4034  while(eni != econ[iii].end()){
4035  OutFile << NodeMap[*eni-1];
4036  if(eni != econ[iii].end())
4037  OutFile << " ";
4038  eni++;
4039  }
4040  OutFile << std::endl;
4041  }
4042  // Write the borders
4043  OutFile << 2 << std::endl;
4044  OutFile << (jjj == 0 ? N : jjj) << " "
4045  << nown_left << " "
4046  << nleft - nown_left << std::endl;
4047  std::vector<Mesh::IndexType>::iterator rni = left_owned.begin();
4048  while(rni != left_owned.end()){
4049  OutFile << NodeMap[*rni-1] << " " << *rni << std::endl;
4050  rni++;
4051  }
4052  rni = left_remote.begin();
4053  while(rni != left_remote.end()){
4054  OutFile << NodeMap[*rni-1] << " " << *rni << std::endl;
4055  rni++;
4056  }
4057  OutFile << (jjj == (N-1) ? 1 : jjj+2) << " "
4058  << nown_right << " "
4059  << nright - nown_right << std::endl;
4060  rni = right_owned.begin();
4061  while(rni != right_owned.end()){
4062  OutFile << NodeMap[*rni-1] << " " << *rni << std::endl;
4063  rni++;
4064  }
4065  rni = right_remote.begin();
4066  while(rni != right_remote.end()){
4067  OutFile << NodeMap[*rni-1] << " " << *rni << std::endl;
4068  rni++;
4069  }
4070  OutFile.close();
4071  }
4072  global.FunctionExit("Writing Clones");
4073  global.FunctionExit("Cloning");
4074  }
4075  comm.Barrier();
4076  if(LogOut && debug)
4077  *LogOut << "All processors made it to the end." << std::endl;
4078  global.FunctionExit("main");
4079  comm.Barrier();
4080  if(LogOut && debug)
4081  *LogOut << "All processors made it to the end." << std::endl;
4082  global.Finalize();
4083  // global.Profiler.Finalize();
4084  if(StdOut && verblevel)
4085  // global.Report(Profiler.SummarizeSerialExecution(*StdOut);
4086  global.Report(*StdOut);
4087  return(0);
4088 }
4089 
4090 // AssembleOrderedRow(DOFType &row,DOFContType &j,DOFDataContType &dofdat,KType &k) {
4091 // typename DOFContType::iterator jIt = j.begin();
4092 // typename DOFDataContType::iterator ddIt = dofdat.begin();
4093 // if(*jIt == 0)
4094 // jIt++;
4095 // Mesh::IndexType kindex = k.get_index(row,j);
4096 // Mesh::IndexType kend = k.get_index(row+1);
4097 // while(kindex < kend){
4098 // Mesh::IndexType jind = *jIt;
4099 // if(k.get_dof(kindex) == jind){
4100 // k[kindex++] = *ddIt++;
4101 // jIt++;
4102 // }
4103 // kindex++;
4104 // }
4105 // }
4106 
4107  class TestBase {
4108  private:
4110  protected:
4112  public:
4113  virtual ~TestBase(){};
4114  virtual int f1(){ return 0; };
4115  virtual int f2(){ return 0; };
4116  };
4117 
4118 
4119  template <typename DataType>
4120  class TestDerived1 : public TestBase
4121  {
4122  protected:
4123  std::vector<DataType> DerivedNative;
4124  };
4125  template <typename DataType>
4126  class TestDerived2 : public TestBase
4127  {
4128  protected:
4129  std::vector<DataType> DerivedNative;
4130  public:
4131  int f3() { return 0; };
4132  };
4134  private:
4136 public:
4138 };
4140 private:
4142 public:
4144 };
4146 {
4147  typedef TestBase* TestBasePtr;
4148  std::vector<TestBasePtr> TestVector;
4152  TestVector.push_back(&t11);
4153  TestVector.push_back(&t12);
4154  TestVector.push_back(&t21);
4155  return(0);
4156 }
void Sync()
Definition: Mesh.C:246
void swap(int &a, int &b)
Definition: buildface.cpp:88
virtual ~TestBase()
Definition: meshtest1.C:4113
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_COMM_WORLD
int TestFunction()
Definition: meshtest1.C:4145
void ShrinkWrap()
Definition: Mesh.C:247
int RegisterCommBuffers(std::vector< Mesh::Border > &borders, Comm::CommunicatorObject &comm)
Definition: meshtest1.C:732
void GetAdjacent(Connectivity &rl, Connectivity &dc, IndexType n=0, bool sortit=false)
Definition: Mesh.C:520
General connectivity object.
Definition: Mesh.H:334
std::vector< IndexType > _sizes
Definition: Mesh.H:340
void srand()
Definition: CImg.h:4658
j indices k indices k
Definition: Indexing.h:6
void int int REAL REAL * y
Definition: read.cpp:74
ComLineObject for testing app.
Definition: meshtest1.C:15
Vector_n max(const Array_n_const &v1, const Array_n_const &v2)
Definition: Vector_n.h:354
int protected_data
Definition: meshtest1.C:4111
void GlobalDofReMapExchange(Mesh::Connectivity &ec, Mesh::Connectivity &NodalDofs, Mesh::Connectivity &ElementDofs, Mesh::Connectivity &RemoteNodalDofs, std::vector< Mesh::Border > &borders, Mesh::PartInfo &info, std::vector< Mesh::IndexType > &order, Comm::CommunicatorObject &comm, std::ostream *Out, bool verbose)
Definition: meshtest1.C:106
MeshTestComLine(const char *args[])
Definition: meshtest1.C:18
void GetNeighborhood(NeighborHood &, const Connectivity &dc, IndexType size=0)
Definition: Mesh.C:456
real *8 function offset(vNorm, x2, y2, z2)
Definition: PlaneNorm.f90:211
virtual int f2()
Definition: meshtest1.C:4115
Mesh::IndexType npart
Definition: PMesh.H:84
virtual int f1()
Definition: meshtest1.C:4114
bool ShapeOK(std::vector< IndexType > &ec, NodalCoordinates &nc) const
Definition: Mesh.C:1521
void MapElements(OuterCont &src, OutCont &trg, MapType &m)
Definition: Mesh.H:68
IRAD::Primitive::MultiContainer< IndexType, IndexType >::VecMap VecMap
Definition: Mesh.H:63
std::vector< IndexType > IndexVec
Definition: Mesh.H:61
IndexType GetTotalSize(OuterContType &con)
Return the total number of entries in a multicontainer.
Definition: Mesh.H:114
void GlobalDofExchange(std::vector< Mesh::Border > &borders, Mesh::Connectivity &NodalDofs, Mesh::Connectivity &RemoteNodalDofs, Mesh::PartInfo &info, Comm::CommunicatorObject &comm, std::ostream *Out, bool verbose)
Definition: meshtest1.C:374
double & z(IndexType n=1)
Definition: Mesh.H:300
IndexType Size() const
Definition: Mesh.C:49
void int int int REAL REAL REAL * z
Definition: write.cpp:76
void ReOrient(std::vector< IndexType > &ec)
Definition: Mesh.C:1601
void matrix_mode(IndexType offset=0)
Definition: Mesh.C:444
void Inverse(Connectivity &, IndexType nnodes=0) const
Definition: Mesh.C:312
bool Inverted(std::vector< IndexType > &ec, NodalCoordinates &nc) const
Definition: Mesh.C:1475
int private_data
Definition: meshtest1.C:4109
Mesh::IndexType doffset
number of local nodes (convenience)
Definition: PMesh.H:92
IndexType Esize(IndexType n) const
Definition: Mesh.H:365
Mesh::IndexType part
total number of partitions
Definition: PMesh.H:85
blockLoc i
Definition: read.cpp:79
void int int REAL * x
Definition: read.cpp:74
void BuildBorderStiffness(Mesh::Connectivity &ec, std::vector< Mesh::Border > &borders, Mesh::Connectivity &NodalDofs, Mesh::Connectivity &ElementDofs, Mesh::Connectivity &RemoteNodalDofs, Mesh::PartInfo &info, Comm::CommunicatorObject &comm, std::ostream *Out, bool verbose)
Definition: meshtest1.C:517
IndexType Nelem() const
Definition: Mesh.H:364
std::vector< DataType > DerivedNative
Definition: meshtest1.C:4129
unsigned int AllocateCommBuffers(std::vector< Mesh::Border > &borders, Mesh::Connectivity &NodalDofs, std::ostream *Out, bool verbose)
Definition: meshtest1.C:689
void Initialize()
Definition: meshtest1.C:21
Mesh::IndexType nnodes
total number of elems
Definition: PMesh.H:87
double & x(IndexType n=1)
Definition: Mesh.H:280
void TestMPI(std::vector< Mesh::Border > &borders, Mesh::Connectivity &NodalDofs, Mesh::Connectivity &RemoteNodalDofs, Mesh::PartInfo &info, Comm::CommunicatorObject &comm, std::ostream *Out, bool verbose)
Definition: meshtest1.C:306
void graph_mode(IndexType offset=0)
Definition: Mesh.C:434
int main(int argc, char *argv[])
Definition: blastest.C:94
void SyncSizes()
Definition: Mesh.C:281
Vector_n min(const Array_n_const &v1, const Array_n_const &v2)
Definition: Vector_n.h:346
Mesh::IndexType nborder
total number of nodes
Definition: PMesh.H:88
void InverseDegenerate(Connectivity &, IndexType nnodes=0) const
Definition: Mesh.C:293
j indices j
Definition: Indexing.h:6
unsigned long time()
Get the value of a system timer with a millisecond precision.
Definition: CImg.h:4605
Mesh::IndexType nlocal
number of shared nodes owned
Definition: PMesh.H:91
void CountDofs(Mesh::Connectivity &ElementDofs, Mesh::Connectivity &NodalDofs, Mesh::PartInfo &info, Mesh::IndexType &nglobaldof, Mesh::IndexType &nremotedof)
Definition: meshtest1.C:746
Mesh::IndexType nelem
partition id
Definition: PMesh.H:86
**********************************************************************Rocstar Simulation Suite Illinois Rocstar LLC All rights reserved ****Illinois Rocstar LLC IL **www illinoisrocstar com **sales illinoisrocstar com WITHOUT WARRANTY OF ANY **EXPRESS OR INCLUDING BUT NOT LIMITED TO THE WARRANTIES **OF FITNESS FOR A PARTICULAR PURPOSE AND **NONINFRINGEMENT IN NO EVENT SHALL THE CONTRIBUTORS OR **COPYRIGHT HOLDERS BE LIABLE FOR ANY DAMAGES OR OTHER WHETHER IN AN ACTION OF TORT OR **Arising OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE **USE OR OTHER DEALINGS WITH THE SOFTWARE **********************************************************************INTERFACE USE ModDataTypes USE nvals
std::vector< DataType > DerivedNative
Definition: meshtest1.C:4123
double rand()
Return a random variable between [0,1] with respect to an uniform distribution.
Definition: CImg.h:4833
void int int REAL REAL REAL *z blockDim dim * ni
Definition: read.cpp:77
static int rank
Definition: advectest.C:66
void GetCoordinateBounds(NodalCoordinates &nc, std::vector< double > &)
void GetStatistics(std::ostream &Ostr, Mesh::Connectivity &con)
Definition: meshtest1.C:60
IRAD::Primitive::IndexType IndexType
Definition: Mesh.H:57
void info()
Print informations about CImg environement variables.
Definition: CImg.h:5702
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_GROUP_EMPTY INTEGER MPI_SUM
void Truncate()
Definition: Mesh.C:257
Mesh::IndexType nown
number of shared nodes
Definition: PMesh.H:90
Mesh::IndexType nshared
number of borders
Definition: PMesh.H:89
void destroy()
Definition: Mesh.C:269
double & y(IndexType n=1)
Definition: Mesh.H:290