Back to index

salome-med  6.5.0
StructuredCoincidentDEC.cxx
Go to the documentation of this file.
00001 // Copyright (C) 2007-2012  CEA/DEN, EDF R&D
00002 //
00003 // This library is free software; you can redistribute it and/or
00004 // modify it under the terms of the GNU Lesser General Public
00005 // License as published by the Free Software Foundation; either
00006 // version 2.1 of the License.
00007 //
00008 // This library is distributed in the hope that it will be useful,
00009 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00010 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00011 // Lesser General Public License for more details.
00012 //
00013 // You should have received a copy of the GNU Lesser General Public
00014 // License along with this library; if not, write to the Free Software
00015 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
00016 //
00017 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
00018 //
00019 
00020 #include <mpi.h>
00021 #include "CommInterface.hxx"
00022 #include "Topology.hxx"
00023 #include "BlockTopology.hxx"
00024 #include "ComponentTopology.hxx"
00025 #include "ParaFIELD.hxx"
00026 #include "MPIProcessorGroup.hxx"
00027 #include "StructuredCoincidentDEC.hxx"
00028 #include "InterpKernelUtilities.hxx"
00029 
00030 #include <iostream>
00031 
00032 using namespace std;
00033 
00034 namespace ParaMEDMEM
00035 {
00036 
00082   StructuredCoincidentDEC::StructuredCoincidentDEC():_topo_source(0),_topo_target(0),
00083                                                      _send_counts(0),_recv_counts(0),
00084                                                      _send_displs(0),_recv_displs(0),
00085                                                      _recv_buffer(0),_send_buffer(0)
00086   {  
00087   }
00088 
00089 
00090   StructuredCoincidentDEC::~StructuredCoincidentDEC()
00091   {
00092     delete [] _send_buffer;
00093     delete [] _recv_buffer;
00094     delete []_send_displs;
00095     delete [] _recv_displs;
00096     delete [] _send_counts;
00097     delete [] _recv_counts;
00098     if (! _source_group->containsMyRank())
00099       delete _topo_source;
00100     if(!_target_group->containsMyRank())
00101       delete _topo_target;
00102   }
00103 
00108   StructuredCoincidentDEC::StructuredCoincidentDEC(ProcessorGroup& local_group, ProcessorGroup& distant_group):DisjointDEC(local_group,distant_group),
00109                                                                                                                _topo_source(0),_topo_target(0),
00110                                                                                                                _send_counts(0),_recv_counts(0),
00111                                                                                                                _send_displs(0),_recv_displs(0),
00112                                                                                                                _recv_buffer(0),_send_buffer(0)
00113   {
00114   }
00115 
00118   void StructuredCoincidentDEC::synchronizeTopology()
00119   {
00120     if (_source_group->containsMyRank())
00121       _topo_source = dynamic_cast<BlockTopology*>(_local_field->getTopology());
00122     if (_target_group->containsMyRank())
00123       _topo_target = dynamic_cast<BlockTopology*>(_local_field->getTopology());
00124   
00125     // Transmitting source topology to target code 
00126     broadcastTopology(_topo_source,1000);
00127     // Transmitting target topology to source code
00128     broadcastTopology(_topo_target,2000);
00129     if (_topo_source->getNbElements() != _topo_target->getNbElements())
00130       throw INTERP_KERNEL::Exception("Incompatible dimensions for target and source topologies");
00131 
00132   }
00133 
00138   void StructuredCoincidentDEC::prepareSourceDE()
00139   {
00141     //Step 1 : _buffer array creation 
00142   
00143     if (!_topo_source->getProcGroup()->containsMyRank())
00144       return;
00145     MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
00146   
00147     int myranksource = _topo_source->getProcGroup()->myRank();
00148   
00149     vector <int>* target_arrays=new vector<int>[_topo_target->getProcGroup()->size()];
00150   
00151     //cout<<" topotarget size"<<  _topo_target->getProcGroup()->size()<<endl;
00152   
00153     int nb_local = _topo_source-> getNbLocalElements();
00154     for (int ielem=0; ielem< nb_local ; ielem++)
00155       {
00156         //  cout <<"source local :"<<myranksource<<","<<ielem<<endl; 
00157         int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
00158         //  cout << "global "<<global<<endl;
00159         pair<int,int> target_local =_topo_target->globalToLocal(global);
00160         //  cout << "target local : "<<target_local.first<<","<<target_local.second<<endl; 
00161         target_arrays[target_local.first].push_back(target_local.second); 
00162       }  
00163   
00164     int union_size=group->size();
00165   
00166     _send_counts=new int[union_size];
00167     _send_displs=new int[union_size];
00168     _recv_counts=new int[union_size];
00169     _recv_displs=new int[union_size];
00170      
00171     for (int i=0; i< union_size; i++)
00172       {
00173         _send_counts[i]=0;
00174         _recv_counts[i]=0;
00175         _recv_displs[i]=0;
00176       }
00177     _send_displs[0]=0;
00178   
00179     for (int iproc=0; iproc < _topo_target->getProcGroup()->size(); iproc++)
00180       {
00181         //converts the rank in target to the rank in union communicator
00182         int unionrank=group->translateRank(_topo_target->getProcGroup(),iproc);
00183         _send_counts[unionrank]=target_arrays[iproc].size();
00184       }
00185   
00186     for (int iproc=1; iproc<group->size();iproc++)
00187       _send_displs[iproc]=_send_displs[iproc-1]+_send_counts[iproc-1];
00188   
00189     _send_buffer = new double [nb_local ];
00190 
00192     //Step 2 : filling the _buffers with the source field values 
00193 
00194     int* counter=new int [_topo_target->getProcGroup()->size()];
00195     counter[0]=0;  
00196     for (int i=1; i<_topo_target->getProcGroup()->size(); i++)
00197       counter[i]=counter[i-1]+target_arrays[i-1].size();
00198     
00199       
00200     const double* value = _local_field->getField()->getArray()->getPointer();
00201     //cout << "Nb local " << nb_local<<endl;
00202     for (int ielem=0; ielem<nb_local ; ielem++)
00203       {
00204         int global = _topo_source->localToGlobal(make_pair(myranksource, ielem));
00205         pair<int,int> target_local =_topo_target->globalToLocal(global);
00206         //cout <<"global : "<< global<<" local :"<<target_local.first<<" "<<target_local.second;
00207         //cout <<"counter[]"<<counter[target_local.first]<<endl;
00208         _send_buffer[counter[target_local.first]++]=value[ielem];
00209     
00210       }
00211     delete[] target_arrays;
00212     delete[] counter;
00213     delete group;
00214   }
00215 
00219   void StructuredCoincidentDEC::prepareTargetDE()
00220   {
00221     if (!_topo_target->getProcGroup()->containsMyRank())
00222       return;
00223     MPIProcessorGroup* group=new MPIProcessorGroup(_topo_source->getProcGroup()->getCommInterface());
00224   
00225     int myranktarget = _topo_target->getProcGroup()->myRank();
00226   
00227     vector < vector <int> > source_arrays(_topo_source->getProcGroup()->size());
00228     int nb_local = _topo_target-> getNbLocalElements();
00229     for (int ielem=0; ielem< nb_local ; ielem++)
00230       {
00231         //  cout <<"TS target local :"<<myranktarget<<","<<ielem<<endl; 
00232         int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
00233         //cout << "TS global "<<global<<endl;
00234         pair<int,int> source_local =_topo_source->globalToLocal(global);
00235         //  cout << "TS source local : "<<source_local.first<<","<<source_local.second<<endl; 
00236         source_arrays[source_local.first].push_back(source_local.second); 
00237       }  
00238     int union_size=group->size();
00239     _recv_counts=new int[union_size];
00240     _recv_displs=new int[union_size];
00241     _send_counts=new int[union_size];
00242     _send_displs=new int[union_size];
00243     
00244     for (int i=0; i< union_size; i++)
00245       {
00246         _send_counts[i]=0;
00247         _recv_counts[i]=0;
00248         _recv_displs[i]=0;
00249       }
00250     for (int iproc=0; iproc < _topo_source->getProcGroup()->size(); iproc++)
00251       {
00252         //converts the rank in target to the rank in union communicator
00253         int unionrank=group->translateRank(_topo_source->getProcGroup(),iproc);
00254         _recv_counts[unionrank]=source_arrays[iproc].size();
00255       }
00256     for (int i=1; i<union_size; i++)
00257       _recv_displs[i]=_recv_displs[i-1]+_recv_counts[i-1];
00258     _recv_buffer=new double[nb_local];
00259     
00260     delete group;
00261   }
00262 
00263  
00271   void StructuredCoincidentDEC::broadcastTopology(BlockTopology*& topo, int tag)
00272   {
00273     MPI_Status status;
00274   
00275     int* serializer=0;
00276     int size;
00277   
00278     MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
00279   
00280     // The master proc creates a send buffer containing
00281     // a serialized topology
00282     int rank_master;
00283   
00284     if (topo!=0 && topo->getProcGroup()->myRank()==0)
00285       {
00286         MESSAGE ("Master rank");
00287         topo->serialize(serializer, size);
00288         rank_master = group->translateRank(topo->getProcGroup(),0);
00289         MESSAGE("Master rank world number is "<<rank_master);
00290         MESSAGE("World Size is "<<group->size());
00291         for (int i=0; i< group->size(); i++)
00292           {
00293             if (i!= rank_master)
00294               _comm_interface->send(&rank_master,1,MPI_INT, i,tag+i,*(group->getComm()));
00295           }
00296       }
00297     else
00298       {
00299         MESSAGE(" rank "<<group->myRank()<< " waiting ...");
00300         _comm_interface->recv(&rank_master, 1,MPI_INT, MPI_ANY_SOURCE, tag+group->myRank(), *(group->getComm()),&status);
00301         MESSAGE(" rank "<<group->myRank()<< "received master rank"<<rank_master);
00302       }
00303     // The topology is broadcasted to all processsors in the group
00304     _comm_interface->broadcast(&size, 1,MPI_INT,rank_master,*(group->getComm()));
00305   
00306     int* buffer=new int[size];
00307     if (topo!=0 && topo->getProcGroup()->myRank()==0)
00308       copy(serializer, serializer+size, buffer); 
00309     _comm_interface->broadcast(buffer,size,MPI_INT,rank_master,*(group->getComm()));
00310   
00311     // Processors which did not possess the source topology 
00312     // unserialize it
00313   
00314     BlockTopology* topotemp=new BlockTopology();
00315     topotemp->unserialize(buffer, *_comm_interface);
00316   
00317     if (topo==0) 
00318       topo=topotemp;
00319     else 
00320       delete topotemp;
00321   
00322     // Memory cleaning
00323     delete[] buffer;
00324     if (serializer!=0)
00325       delete[] serializer;
00326     MESSAGE (" rank "<<group->myRank()<< " unserialize is over");
00327     delete group;
00328   }
00329 
00330 
00331 
00332   void StructuredCoincidentDEC::recvData()
00333   {
00334     //MPI_COMM_WORLD is used instead of group because there is no
00335     //mechanism for creating the union group yet
00336     MESSAGE("recvData");
00337     for (int i=0; i< 4; i++)
00338       cout << _recv_counts[i]<<" ";
00339     cout <<endl;
00340     for (int i=0; i< 4; i++)
00341       cout << _recv_displs[i]<<" ";
00342     cout <<endl;
00343   
00344     cout<<"start AllToAll"<<endl;
00345     MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
00346     _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE, 
00347                                _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
00348     cout<<"end AllToAll"<<endl;
00349 
00350     int nb_local = _topo_target->getNbLocalElements();
00351     //double* value=new double[nb_local];
00352     double* value=const_cast<double*>(_local_field->getField()->getArray()->getPointer());
00353   
00354     int myranktarget=_topo_target->getProcGroup()->myRank();
00355     vector<int> counters(_topo_source->getProcGroup()->size());
00356     counters[0]=0;
00357     for (int i=0; i<_topo_source->getProcGroup()->size()-1; i++)
00358       {
00359         MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
00360         int worldrank=group->translateRank(_topo_source->getProcGroup(),i);
00361         counters[i+1]=counters[i]+_recv_counts[worldrank];
00362         delete group;
00363       }
00364   
00365     for (int ielem=0; ielem<nb_local ; ielem++)
00366       {
00367         int global = _topo_target->localToGlobal(make_pair(myranktarget, ielem));
00368         pair<int,int> source_local =_topo_source->globalToLocal(global);
00369         value[ielem]=_recv_buffer[counters[source_local.first]++];
00370       }
00371   
00372   
00373     //_local_field->getField()->setValue(value);
00374   }
00375 
00376   void StructuredCoincidentDEC::sendData()
00377   {
00378     MESSAGE ("sendData");
00379     for (int i=0; i< 4; i++)
00380       cout << _send_counts[i]<<" ";
00381     cout <<endl;
00382     for (int i=0; i< 4; i++)
00383       cout << _send_displs[i]<<" ";
00384     cout <<endl;
00385     cout <<"start AllToAll"<<endl;
00386     MPI_Comm comm = *(dynamic_cast<MPIProcessorGroup*>(_union_group)->getComm());
00387     _comm_interface->allToAllV(_send_buffer, _send_counts, _send_displs, MPI_DOUBLE, 
00388                                _recv_buffer, _recv_counts, _recv_displs, MPI_DOUBLE,comm);
00389     cout<<"end AllToAll"<<endl;
00390   }
00391 
00399   void StructuredCoincidentDEC::synchronize()
00400   {
00401     if (_source_group->containsMyRank())
00402       {
00403         synchronizeTopology();
00404         prepareSourceDE();
00405       }
00406     else if (_target_group->containsMyRank())
00407       {
00408         synchronizeTopology();
00409         prepareTargetDE();
00410       }
00411   }
00415 }
00416