29 #define QUIET_NAN std::numeric_limits<Real>::quiet_NaN()
36 incident_faces( std::map<
int, std::vector<int> > &opp_subface_lists)
const {
40 for (Pane_set::const_iterator
56 std::map< int, std::vector<int> > opp_subface_lists;
60 std::vector< int> pane_ids; pane_ids.reserve( opp_subface_lists.size());
62 for ( std::map<
int, std::vector<int> >::const_iterator
63 it=opp_subface_lists.begin(); it!=opp_subface_lists.end(); ++it) {
64 pane_ids.push_back( it->first);
70 for ( std::map<
int, std::vector<int> >::const_iterator
71 it=opp_subface_lists.begin(); it!=opp_subface_lists.end(); ++it) {
78 const std::vector< int> &subface_list = it->second;
79 std::set<int> face_list;
80 std::set<int> node_list;
81 for (
int i=0, size=subface_list.size();
i<size; ++
i) {
83 face_list.insert( parent);
87 for (
int k=0, kn=ene.size_of_nodes();
k<kn; ++
k)
88 node_list.insert( ene[
k]);
93 for (
int j=0;
j<3; ++
j) {
97 for (
int k=0, kn=ene.size_of_nodes(); k<kn; ++
k)
98 node_list.insert( ene[k]);
109 const std::map<int, std::pair<int, int> > &opp_panemap = opp_win.
_pane_map;
120 std::map<
int, std::set<int> > sfs;
121 std::map<
int, std::set<int> > sns;
127 int remote_rank = opp_panemap.find( oppf.
pane_id)->second.first;
129 if ( remote_rank != rank) {
131 sfs[ remote_rank].insert( parent);
133 std::set<int> &ns=sns[remote_rank];
136 for (
int k=0, kn=ene.size_of_nodes();
k<kn; ++
k)
142 for (
int j=0;
j<3; ++
j) {
146 for (
int k=0, kn=ene.size_of_nodes(); k<kn; ++
k)
154 for ( std::map<
int, std::set<int> >::const_iterator
155 it=sfs.begin(); it!=sfs.end(); ++it) {
156 std::vector<int> &vec=pane.
_send_faces[ it->first];
157 vec.reserve(it->second.size());
158 vec.insert( vec.end(), it->second.begin(), it->second.end());
163 for ( std::map<
int, std::set<int> >::const_iterator
164 it=sns.begin(); it!=sns.end(); ++it) {
165 std::vector<int> &vec=pane.
_send_nodes[ it->first];
166 vec.reserve(it->second.size());
167 vec.insert( vec.end(), it->second.begin(), it->second.end());
176 std::vector< int> npanes_recv(
comm_size(), 0);
177 std::vector< int> npanes_send(
comm_size(), 0);
180 for (
int i=0;
i<
n; ++
i) {
182 std::pair< int, int> p =
_pane_map.find( pane_ids[i])->second;
183 if ( p.first != rank) ++npanes_recv[ p.first];
186 MPI_Alltoall( &npanes_recv[0], 1, MPI_INT,
187 &npanes_send[0], 1, MPI_INT,
_comm);
190 std::vector<int> displs_recv( npanes_recv.size()+1);
192 std::vector<int> displs_send( npanes_send.size()+1);
195 std::vector< int> ids_recv( displs_recv.back());
196 for (
int i=0;
i<
n; ++
i) {
197 std::pair< int, int> p =
_pane_map.find( pane_ids[
i])->second;
198 if ( p.first != rank) ids_recv[ displs_recv[p.first]++] = pane_ids[
i];
202 std::vector< int> ids_send( displs_send.back());
204 MPI_Alltoallv(&ids_recv[0], &npanes_recv[0],&displs_recv[0], MPI_INT,
205 &ids_send[0], &npanes_send[0],&displs_send[0], MPI_INT,
_comm);
209 for (
int i=0, size=npanes_send.size(),
k=0;
i<size; ++
i) {
210 for (
int j=0;
j<npanes_send[
i]; ++
j, ++
k) {
216 for (
int i=0, size=npanes_recv.size(),
k=0;
i<size; ++
i) {
217 for (
int j=0;
j<npanes_recv[
i]; ++
j, ++
k) {
229 bool replicate_coor) {
230 std::vector< MPI_Request> other_requests, recv_requests;
235 std::vector< RFC_Pane_transfer*> recv_panes;
243 for (std::map< int, RFC_Pane_transfer*>::iterator
251 std::pair< int, int>
s =
_pane_map.find( p->
id())->second;
256 recv_requests.push_back( req);
257 recv_panes.push_back( it->second);
259 if ( replicate_coor) {
263 s.first, 100+totalNumPanes+s.second,
_comm, &req);
265 other_requests.push_back( req);
270 for (std::set< std::pair<int, RFC_Pane_transfer*> >::const_iterator
274 std::pair< int, int>
s =
_pane_map.find( p->
id())->second;
277 std::vector<int> &send_faces=p->
_send_faces[it->first];
283 data_buf.resize( send_faces.size()*
d,
QUIET_NAN);
285 for (
int i=send_faces.size()-1;
i>=0; --
i) {
286 for (
int j=d-1;
j>=0; --
j) {
287 data_buf[
i*d+
j] = addr[(send_faces[
i]-1)*d+
j];
294 ierr = MPI_Isend( const_cast<Real*>(addr), send_faces.size()*d*
sizeof(
Real),
298 other_requests.push_back( req);
300 if ( replicate_coor) {
301 ierr = MPI_Isend( const_cast<Real*>(p->
coordinates()),
303 it->first, 100+totalNumPanes+s.second,
_comm, &req);
305 other_requests.push_back( req);
310 while ( !recv_requests.empty()) {
314 ierr = MPI_Waitany(recv_requests.size(), &recv_requests[0], &index, &stat);
324 for (
int i=faces.size()-1;
i>=0; --
i) {
325 if (
i==faces[
i]-1)
break;
326 for (
int j=d-1;
j>=0; --
j) {
327 buf[(faces[
i]-1)*d+
j] = buf[
i*d+
j];
333 recv_requests.erase( recv_requests.begin()+index);
334 recv_panes.erase( recv_panes.begin()+index);
338 wait_all( other_requests.size(), &other_requests[0]);
345 bool replicate_coor) {
346 std::vector< MPI_Request> other_requests, recv_requests;
351 std::vector< RFC_Pane_transfer*> recv_panes;
359 for (std::map< int, RFC_Pane_transfer*>::iterator
367 std::pair< int, int>
s =
_pane_map.find( p->
id())->second;
372 recv_requests.push_back( req);
373 recv_panes.push_back( it->second);
375 if ( replicate_coor) {
379 s.first, 100+totalNumPanes+s.second,
_comm, &req);
381 other_requests.push_back( req);
386 for (std::set< std::pair<int, RFC_Pane_transfer*> >::const_iterator
390 std::pair< int, int>
s =
_pane_map.find( p->
id())->second;
393 std::vector<int> &send_nodes=p->
_send_nodes[it->first];
399 data_buf.resize( send_nodes.size()*
d);
401 for (
int i=send_nodes.size()-1;
i>=0; --
i) {
402 for (
int j=d-1;
j>=0; --
j) {
403 data_buf[
i*d+
j] = addr[(send_nodes[
i]-1)*d+
j];
410 ierr = MPI_Isend( const_cast<Real*>(addr), send_nodes.size()*d*
sizeof(
Real),
413 other_requests.push_back( req);
415 if ( replicate_coor) {
416 ierr = MPI_Isend( const_cast<Real*>(p->
coordinates()),
418 it->first, 100+totalNumPanes+s.second,
_comm, &req);
420 other_requests.push_back( req);
425 while ( !recv_requests.empty()) {
429 ierr = MPI_Waitany(recv_requests.size(), &recv_requests[0], &index, &stat);
440 for (
int i=nodes.size()-1;
i>=0; --
i) {
441 int lid = nodes[
i]-1;
443 for (
int j=d-1;
j>=0; --
j) {
444 buf[lid*d+
j] = buf[
i*d+
j];
450 recv_requests.erase( recv_requests.begin()+index);
451 recv_panes.erase( recv_panes.begin()+index);
455 wait_all( other_requests.size(), &other_requests[0]);
460 std::vector<void *> ptrs; ptrs.reserve(
_pane_set.size());
465 ptrs.push_back( pane.
pointer( data.
id()));
477 std::vector<void *> ptrs; ptrs.reserve(
_pane_set.size());
482 ptrs.push_back( pane.
pointer( data.
id()));
488 _map_comm.reduce_maxabs_on_shared_nodes();
507 std::vector<MPI_Status> statuses(n);
508 int ierr = MPI_Waitall( n, requests, &statuses[0]);
515 int *index, MPI_Status *stat) {
517 MPI_Status
s;
if ( stat==NULL) stat = &
s;
518 int ierr = MPI_Waitany( n, requests, index, stat);
525 std::vector<Real> buf( data, data+n);
527 MPI_Allreduce( &buf[0], data, n, MPI_DOUBLE, op,
_comm);
532 _panes_to_send.insert( std::pair<int,RFC_Pane_transfer*>( to_rank,
543 COM::Pane *base_pane =
new COM::Pane( (COM::Window*)NULL, pane_id);
561 std::string sdv_material = base_material+
"_sdv";
563 std::string buf_wname(
_bufwin_prefix); buf_wname.append( base_material);
564 std::string sdv_wname=buf_wname+
"_sdv";
569 (base_material+
" "+sdv_material).c_str(), &comm_self);
576 pane->
read_rocin( sdv_wname, buf_wname, base_pane);
592 std::map< int, RFC_Pane_transfer*>::iterator it=
_replic_panes.begin();
594 for ( ; it !=
iend; ++it) {
595 it->second->_data_buf_id = -1;
597 it->second->_data_buf.swap( t);
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_BYTE
void replicate_data(const Facial_data_const &data, bool replicate_coor)
Replicate the given data from remote processes onto local process.
MAP::Pane_communicator _map_comm
RFC_Pane_transfer & pane(const int pid)
An adaptor for enumerating node IDs of an element.
std::map< int, RFC_Pane_transfer * > _replic_panes
int face_id
the local id within the pane starting from 1.
here we put it at the!beginning of the common block The point to point and collective!routines know about but MPI_TYPE_STRUCT as yet does not!MPI_STATUS_IGNORE and MPI_STATUSES_IGNORE are similar objects!Until the underlying MPI library implements the C version of these are declared as arrays of MPI_STATUS_SIZE!The types and are OPTIONAL!Their values are zero if they are not available Note that!using these reduces the portability of MPI_IO INTEGER MPI_BOTTOM INTEGER MPI_DOUBLE_PRECISION INTEGER MPI_LOGICAL INTEGER MPI_2REAL INTEGER MPI_2DOUBLE_COMPLEX INTEGER MPI_LB INTEGER MPI_WTIME_IS_GLOBAL INTEGER MPI_COMM_SELF
void wait_any(int n, MPI_Request *requests, int *index, MPI_Status *stat=NULL)
void COM_delete_window(const char *wname)
const Real * coordinates() const
std::vector< int > _recv_faces
void counts_to_displs(const std::vector< int > &counts, std::vector< int > &displs) const
Base * base()
The id of its base COM::Pane object.
void wait_all(int n, MPI_Request *requests)
std::vector< Real > _coor_buf
int pane_id
the id of the owner pane.
void barrier() const
Block until all processes of the window have reached here.
int COM_get_attribute_handle(const char *waname)
std::map< int, std::vector< Real > > _send_buffers
int color() const
The color of the window for overlay or for data transfer (BLUE or GREEN).
int size_of_faces() const
The total number of faces in the pane.
std::vector< Three_tuple< int > > _subfaces
const std::string _prefix
static const char * _bufwin_prefix
std::vector< Edge_ID > _subnode_parents
Edge ids of parents.
#define RFC_END_NAME_SPACE
void clear_replicated_data()
Clear all the replicate data but keep metadata.
void reduce_maxabs_to_all(Nodal_data &)
#define COM_UNLOAD_MODULE_STATIC_DYNAMIC(moduleName, windowString)
void incident_faces(std::map< int, std::vector< int > > &) const
Obtain the list of incident faces in each pane of the opposite mesh.
void read_binary(std::istream &is, std::vector< int > *b2v_all=NULL, COM::Pane *p=NULL)
**********************************************************************Rocstar Simulation Suite Illinois Rocstar LLC All rights reserved ****Illinois Rocstar LLC IL **www illinoisrocstar com **sales illinoisrocstar com WITHOUT WARRANTY OF ANY **EXPRESS OR INCLUDING BUT NOT LIMITED TO THE WARRANTIES **OF FITNESS FOR A PARTICULAR PURPOSE AND **NONINFRINGEMENT IN NO EVENT SHALL THE CONTRIBUTORS OR **COPYRIGHT HOLDERS BE LIABLE FOR ANY DAMAGES OR OTHER WHETHER IN AN ACTION OF TORT OR **Arising OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE **USE OR OTHER DEALINGS WITH THE SOFTWARE **********************************************************************INTERFACE SUBROUTINE knode iend
std::map< int, std::pair< int, int > > _pane_map
int size_of_nodes() const
The total number of nodes in the pane.
#define RFC_BEGIN_NAME_SPACE
int size_of_subfaces() const
The total number of faces in the subdivision of the pane.
void COM_call_function(const int wf, int argc,...)
Pane_set _pane_set
The set of panes contained in the window.
static const char * get_prefix_base(const char *prefix)
std::vector< int > _subface_parents
Face ids of the parents of the subfaces.
void replicate_metadata(int *pane_ids, int n)
Replicate the metadata of a remote pane only the local process.
void read_rocin(const std::string &sdv_wname, const std::string &parent_wname="", COM::Pane *p=NULL)
Read in using Rocin.
std::map< int, std::vector< int > > _send_nodes
void init_recv_buffer(int pane_id, int from_rank)
std::set< std::pair< int, RFC_Pane_transfer * > > _panes_to_send
static std::string get_sdv_fname(const char *prefix, int pane_id, const int format=SDV_BINARY)
void reduce_to_all(Nodal_data &, MPI_Op)
#define COM_LOAD_MODULE_STATIC_DYNAMIC(moduleName, windowString)
bool is_root() const
Check whether the process has rank 0.
int COM_get_function_handle(const char *wfname)
std::map< int, std::vector< int > > _send_faces
std::vector< Face_ID > _subface_counterparts
Ids of counterparts of faces.
std::vector< Real > _data_buf
void init_send_buffer(int pane_id, int to_rank)
#define COM_EXTERN_MODULE(moduleName)
void allreduce(Array_n &arr, MPI_Op op) const
std::vector< int > _recv_nodes