59 _panes[
i]->set_ignore_ghost(
false);
108 vector< pane_i_vector > gelem_lists;
110 vector<vector<map<pair<int,int>,
int> > > nodes_to_send;
111 vector<vector<deque<int> > > elems_to_send;
119 vector<pane_i_vector> recv_info;
124 vector<vector<int> > elem_renumbering;
125 vector<vector<map<pair<int,int>,
int> > > nodes_to_recv;
150 int nrnodes =
_panes[
i]->size_of_real_nodes();
162 = (
const int*)pconn->
pointer()+MAP::Pane_connectivity::pconn_offset();
166 j<
nj; ++
j, index+=vs[index+1]+2) {
171 index+=vs[index+1]+2;
174 for(
int k=0;
k<vs[index+1]; ++
k){
186 int nrnodes =
_panes[
i]->size_of_real_nodes();
194 reinterpret_cast<int*
>(p_n_gorder->
pointer());
196 for(
int j=0;
j< nrnodes; ++
j){
198 n_gorder_ptr[
j] =
j+1;
207 pc.begin_update_shared_nodes();
208 pc.reduce_on_shared_nodes(
MPI_MAX);
209 pc.end_update_shared_nodes();
215 int nrnodes =
_panes[
i]->size_of_real_nodes();
221 reinterpret_cast<int*
>(p_n_gorder->
pointer());
223 for(
int j=0;
j< nrnodes; ++
j)
234 vector<vector<map<pair<int,int>,
int> > > &nodes_to_send,
235 vector<vector<deque<int> > > &elems_to_send,
239 vector<vector<set<int> > > adj_eset;
240 vector<vector<set<int> > > adj_nset;
241 set<int>::iterator eset_pos;
253 int n_comm_panes =
_cpanes[
i].size();
254 adj_eset[
i].resize(n_comm_panes);
255 adj_nset[
i].resize(n_comm_panes);
256 comm_sizes[
i].resize(n_comm_panes,0);
257 nodes_to_send[
i].resize(n_comm_panes);
258 elems_to_send[
i].resize(n_comm_panes);
265 MAP::Pane_dual_connectivity dc(
_panes[
i],0);
268 = (
const int*)pconn->
pointer() +
269 MAP::Pane_connectivity::pconn_offset();
273 MAP::Pane_connectivity::pconn_offset();
276 for (
int j=0, index=0;
j<n_comm_panes; ++
j, index+=vs[index+1]+2) {
280 index+=vs[index+1]+2;
284 for(
int k=0;
k<vs[index+1]; ++
k){
288 adj_nset[
i][
j].insert(vs[index+2+
k]);
289 dc.incident_elements(vs[index+2+
k],elist);
292 for(
unsigned int ii=0; ii<elist.size(); ++ii)
293 cur_eset.insert(elist[ii]);
296 adj_eset[
i][
j] = cur_eset;
300 for(eset_pos = cur_eset.begin(); eset_pos != cur_eset.end(); ++eset_pos){
301 comm_sizes[
i][
j] += 1 + 2*((
_panes[
i]->connectivity(*eset_pos))
302 ->size_of_nodes_pe());
316 reinterpret_cast<int*
>(p_n_gorder->
pointer());
318 gelem_lists[
i].resize(
_cpanes[
i].size());
321 for(
int j=0,
nj = comm_sizes[
i].size();
j<
nj; ++
j){
325 gelem_lists[
i][
j].resize(comm_sizes[
i][
j]);
327 for(eset_pos = adj_eset[
i][j].begin();
328 eset_pos != adj_eset[
i][
j].end();
332 COM::Element_node_enumerator ene(
_panes[
i],*eset_pos);
333 elems_to_send[
i][
j].push_back(*eset_pos);
334 ene.get_nodes(nodes);
337 gelem_lists[
i][
j][index++] = ene.type();
339 for(
int k=0,
nk = ene.size_of_nodes();
k<
nk; ++
k){
342 int N = n_gorder_ptr[nodes[
k]-1];
343 gelem_lists[
i][
j][index++] = P;
344 gelem_lists[
i][
j][index++] = N;
347 if(adj_nset[i][j].find(nodes[
k]) == adj_nset[
i][
j].end())
348 nodes_to_send[i][j].insert(make_pair(make_pair(P,N),nodes[k]));
365 vector<pane_i_vector> &recv_info,
366 vector<vector<int> > &elem_renumbering,
367 vector<vector<map<pair<int,int>,
int> > > &nodes_to_recv){
369 map<pair<int,int>,
int>::iterator pos1, pos2;
371 elem_renumbering.resize(
_npanes);
376 int n_real_nodes =
_panes[
i]->size_of_real_nodes();
377 int next_node_id = n_real_nodes + 1;
378 int comm_npanes = recv_info[
i].size();
380 nodes_to_recv[
i].resize(comm_npanes);
382 for(
int j=0;
j< comm_npanes; ++
j){
384 int recv_size = recv_info[
i][
j].size();
387 while(index < recv_size){
388 int type = recv_info[
i][
j][index];
390 ++elem_renumbering[
i][type+1];
393 for(
int k=1;
k<=2*nnodes;
k+=2){
395 int P = recv_info[
i][
j][index+
k];
396 int N = recv_info[
i][
j][index+
k+1];
399 nodes_to_recv[
i][
j].find(make_pair(P,N));
406 int cur_node_id = next_node_id;
408 _local_nodes[
i].insert(make_pair(make_pair(P,N),next_node_id++));
410 cur_node_id = pos2->second;
416 if(pos1 == nodes_to_recv[
i][
j].end()
417 && cur_node_id > n_real_nodes)
418 nodes_to_recv[
i][
j].insert(make_pair(make_pair(P,N),cur_node_id));
444 vector<vector<map<pair<int,int>,
int> > > &nodes_to_recv,
445 vector<vector<deque<int> > > &elems_to_send,
446 vector<vector<int> > &elem_renumbering,
447 vector<pane_i_vector> &recv_info){
449 map<pair<int,int>,
int>::iterator rns_pos,gnr_pos;
450 vector<vector<int> > node_pos;
453 vector<vector<int> > n_elem;
460 vector<vector<int*> > conn_ptr(
_npanes);
470 int gcr_size =1, rcs_size =1, gnr_size =1, rns_size = 1;
471 int n_comm_panes =
_cpanes[
i].size();
475 for(
int j=0,
nj = nodes_to_send[
i].size();
j<
nj; ++
j)
476 rns_size += 2+nodes_to_send[
i][
j].size();
479 for(
int j=0, nj = nodes_to_recv[
i].size();
j<
nj; ++
j)
480 gnr_size += 2+nodes_to_recv[
i][
j].size();
483 for(
int j=0, nj = (
int)elems_to_send[
i].size();
j<
nj; ++
j)
484 rcs_size += 2+elems_to_send[
i][
j].size();
487 n_elem[
i].resize(n_comm_panes,0);
490 for(
int ind=0, size = (
int)recv_info[
i][
j].size();
498 node_pos[
i].assign(elem_renumbering[
i].begin(),elem_renumbering[
i].end());
506 int nelems = elem_renumbering[
i][
j+1];
507 elem_renumbering[
i][
j+1] += elem_renumbering[
i][
j];
517 _buf_window->set_size(conn_name.c_str(), pane_id, nelems,nelems);
518 _buf_window->resize_array(conn_name.c_str(), pane_id, &addr,nnodes,nelems);
520 conn_ptr[
i][
j] = (
int*)addr;
521 COM_assertion_msg(addr!= NULL,
"Could not allocate space for connectivity table");
530 int gsize = rns_size + gnr_size + rcs_size + gcr_size;
533 _buf_window->set_size(
"pconn", pane_id, rsize+gsize,gsize);
538 int* pconn_ptr = (
int*)addr;
541 int gnr_ind = rns_ind + rns_size;
542 int rcs_ind = gnr_ind + gnr_size;
543 int gcr_ind = rcs_ind + rcs_size;
546 pconn_ptr[rns_ind++] = n_comm_panes;
547 pconn_ptr[gnr_ind++] = n_comm_panes;
548 pconn_ptr[rcs_ind++] = n_comm_panes;
549 pconn_ptr[gcr_ind++] = n_comm_panes;
552 int real_offset =
_panes[
i]->size_of_real_elements()+1;
559 for(
int j=0;
j <n_comm_panes; ++
j){
563 pconn_ptr[rns_ind++] = comm_pane_id;
564 pconn_ptr[gnr_ind++] = comm_pane_id;
565 pconn_ptr[rcs_ind++] = comm_pane_id;
566 pconn_ptr[gcr_ind++] = comm_pane_id;
569 pconn_ptr[rns_ind++] = nodes_to_send[
i][
j].size();
570 pconn_ptr[gnr_ind++] = nodes_to_recv[
i][
j].size();
571 pconn_ptr[rcs_ind++] = elems_to_send[
i][
j].size();
572 pconn_ptr[gcr_ind++] = n_elem[
i][
j];
575 for(rns_pos = nodes_to_send[
i][
j].begin();
576 rns_pos != nodes_to_send[
i][
j].end(); ++rns_pos)
577 pconn_ptr[rns_ind++] = rns_pos->second;
579 for(gnr_pos = nodes_to_recv[
i][
j].begin();
580 gnr_pos != nodes_to_recv[
i][
j].end(); ++gnr_pos)
581 pconn_ptr[gnr_ind++] = gnr_pos->second;
583 for(
int k=0,
nk = (
int)elems_to_send[
i][
j].size();
k<
nk; ++
k)
584 pconn_ptr[rcs_ind++] = elems_to_send[
i][
j][
k];
589 int recv_size = recv_info[
i][
j].size();
591 while(index < recv_size){
593 int elem_type = recv_info[
i][
j][index];
597 int conn_offset = node_pos[
i][elem_type]++;
599 pconn_ptr[gcr_ind++] = real_offset + elem_renumbering[
i][elem_type]++;
602 for(
int k=1;
k <= 2*nnodes;
k+=2){
604 map<pair<int,int>,
int>::iterator pos;
606 find(make_pair(recv_info[
i][
j][index+
k],
607 recv_info[
i][
j][index+
k+1]));
609 conn_ptr[
i][elem_type][nnodes*conn_offset+(
k-1)/2] = pos->second;
617 int new_gsize = new_size -
_panes[
i]->size_of_real_nodes();
621 set_size(
"nc", pane_id, new_size, new_gsize);
647 (
const int*)pconn->
pointer()+MAP::Pane_connectivity::pconn_offset();
652 for (
int j=0,
nj=vs_size;
j<
nj;
j+=vs[
j+1]+2) {
658 for (
int j=0, nj=vs_size;
j<
nj;
j+=vs[
j+1]+2) {
669 vector<pane_i_vector> &recv_info,
677 size_buffer[
i].resize(
_cpanes[
i].size(),1);
679 vector<pane_i_vector> send_buffer;
680 send_buffer.resize(_npanes);
684 send_buffer[
i].resize(
_cpanes[
i].size());
686 send_buffer[
i][
j].resize(1);
687 send_buffer[
i][
j][0] = comm_sizes[
i][
j];
699 size_buffer[
i][
j] = recv_info[
i][
j][0];
716 vector<pane_i_vector> &recv_info,
727 map<int,int> lpaneid_map;
728 map<int,int>::const_iterator it=proc_map.begin();
730 lpaneid_map[ it->first] =
i;
732 vector<MPI_Request> reqs_send, reqs_recv;
733 int int_size =
sizeof(
int);
743 int lpid = lpaneid_map.find(
_panes[
i]->
id())->second;
747 recv_info[
i][
j].resize(comm_sizes[
i][
j],0);
749 const int lqid = lpaneid_map.find(
_cpanes[
i][j])->second;
752 int stag = 100 + ((lpid > lqid) ?
753 lpid*total_npanes+lqid : lqid*total_npanes+lpid);
755 MPI_Comm comm = mycomm;
758 if(myrank == adjrank && lpid == lqid)
759 memcpy(&send_info[
i][j][0], &recv_info[
i][j][0],
760 int_size*send_info[
i][j].size());
765 if (myrank == adjrank){
777 MPI_BYTE, adjrank, stag, comm, &req);
779 reqs_send.push_back(req);
781 ierr =
COMMPI_Irecv(&recv_info[
i][j][0],int_size*comm_sizes[
i][j],
782 MPI_BYTE, adjrank, rtag, comm, &req);
784 reqs_recv.push_back(req);
789 if(mycomm != MPI_COMM_NULL){
794 while(!reqs_recv.empty()){
795 ierr = MPI_Waitany(reqs_recv.size(),&reqs_recv[0],
798 reqs_recv.erase(reqs_recv.begin()+index);
801 if(reqs_send.size()){
802 ierr = MPI_Waitany(reqs_send.size(),&reqs_send[0],
805 reqs_send.erase(reqs_send.begin()+index);
820 const int *vs = (
const int*)pconn->pointer() +
821 MAP::Pane_connectivity::pconn_offset();
822 int vs_size=pconn->size_of_real_items() -
823 MAP::Pane_connectivity::pconn_offset();
829 for (
int j=0,
nj=vs_size;
j<
nj;
j+=vs[
j+1]+2) {
835 for (
int j=0;
j<count; ++
j, index+=vs[index+1]+2) {
838 index+=vs[index+1]+2;
842 for(
int k=0;
k<vs[index+1]; ++
k){
852 std::vector<std::vector<bool> > &marked_elems){
855 marked_elems.clear();
861 marked_elems[
i].clear();
862 marked_elems[
i].resize(
_panes[
i]->size_of_real_elements(),
false);
866 int s_real_elems =
_panes[
i]->size_of_real_elements();
867 std::vector<int> nodes;
868 for(
int j=1;
j<= s_real_elems; ++
j){
869 COM::Element_node_enumerator ene(
_panes[
i],
j);
870 ene.get_nodes(nodes);
871 for(
int k=0,
nk=nodes.size();
k<
nk; ++
k){
872 if (marked_nodes[i][nodes[
k]-1])
873 marked_elems[
i][
j-1] =
true;
int COMMPI_Comm_rank(MPI_Comm c)
int COMMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request *request)
Begins a nonblocking receive.
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_BYTE
#define MAP_END_NAMESPACE
void send_gelem_lists(vector< vector< vector< int > > > &gelem_lists, vector< vector< vector< int > > > &recv_info, vector< vector< int > > &comm_sizes)
#define COM_assertion(EX)
Error checking utility similar to the assert macro of the C language.
Utility for constructing pane connectivities in parallel.
void process_received_data(vector< vector< vector< int > > > &recv_info, vector< vector< int > > &elem_renumbering, vector< vector< map< pair< int, int >, int > > > &nodes_to_recv)
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_COMM_SELF
std::vector< std::vector< int > > _cpanes
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_GROUP_EMPTY INTEGER MPI_MAX
Contains the prototypes for the Pane object.
An Attribute object is a data member of a window.
#define COM_assertion_msg(EX, msg)
void finalize_pconn(vector< vector< map< pair< int, int >, int > > > &nodes_to_send, vector< vector< map< pair< int, int >, int > > > &nodes_to_recv, vector< vector< deque< int > > > &elems_to_send, vector< vector< int > > &elem_renumbering, vector< vector< vector< int > > > &recv_info)
string _etype_str[COM::Connectivity::TYPE_MAX_CONN]
void send_pane_info(vector< vector< vector< int > > > &send_info, vector< vector< vector< int > > > &recv_info, vector< vector< int > > &comm_sizes)
Handles communication of shared nodes, ghost nodes or ghost cells across panes.
int COMMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request)
Begins a nonblocking send.
void get_node_total_order()
vector< vector< int > > _p_gorder
std::vector< std::vector< bool > > _is_shared_elem
const void * pointer() const
Obtain a constant pointer to the physical address.
MAP_BEGIN_NAMESPACE typedef vector< vector< int > > pane_i_vector
std::vector< std::vector< bool > > _is_shared_node
vector< map< pair< int, int >, int > > _local_nodes
Utility for constructing pane ghost connecvtivities in parallel.
void determine_shared_border()
int size_of_real_items() const
Obtain the number of real items in the attribute.
COM::Window * _buf_window
std::map< int, int > Proc_map
Size size_of_nodes_pe() const
Get the number of nodes per element of the current connectivity table.
void mark_elems_from_nodes(std::vector< std::vector< bool > > &marked_nodes, std::vector< std::vector< bool > > &marked_elems)
#define MAP_BEGIN_NAMESPACE
vector< set< set< int > > > pane_i_set_set
int id() const
Obtain the id (or index) of the attribute.
vector< int >::iterator i_vector_iter
void get_ents_to_send(vector< vector< vector< int > > > &gelem_lists, vector< vector< map< pair< int, int >, int > > > &nodes_to_send, vector< vector< deque< int > > > &elems_to_send, vector< vector< int > > &comm_sizes)
Determine elements/nodes to be ghosted on adjacent panes.
Contains declaration of the base class for Roccom implementations.