Back to index

salome-med  6.5.0
ExplicitCoincidentDEC.cxx
Go to the documentation of this file.
00001 // Copyright (C) 2007-2012  CEA/DEN, EDF R&D
00002 //
00003 // This library is free software; you can redistribute it and/or
00004 // modify it under the terms of the GNU Lesser General Public
00005 // License as published by the Free Software Foundation; either
00006 // version 2.1 of the License.
00007 //
00008 // This library is distributed in the hope that it will be useful,
00009 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00010 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00011 // Lesser General Public License for more details.
00012 //
00013 // You should have received a copy of the GNU Lesser General Public
00014 // License along with this library; if not, write to the Free Software
00015 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
00016 //
00017 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
00018 //
00019 
00020 #include <mpi.h>
00021 #include "CommInterface.hxx"
00022 #include "Topology.hxx"
00023 #include "BlockTopology.hxx"
00024 #include "ComponentTopology.hxx"
00025 #include "ParaFIELD.hxx"
00026 #include "MPIProcessorGroup.hxx"
00027 #include "ExplicitCoincidentDEC.hxx"
00028 #include "ExplicitMapping.hxx"
00029 #include "InterpKernelUtilities.hxx"
00030 
00031 using namespace std;
00032 
00033 namespace ParaMEDMEM
00034 {
00037   ExplicitCoincidentDEC::ExplicitCoincidentDEC():_toposource(0),_topotarget(0)
00038   {  
00039   }
00040 
00041   ExplicitCoincidentDEC::~ExplicitCoincidentDEC()
00042   {
00043   }
00044 
00045 
00053   void ExplicitCoincidentDEC::synchronize()
00054   {
00055     if (_source_group->containsMyRank())
00056       {
00057         _toposource = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
00058         _sourcegroup= _toposource->getProcGroup()->createProcGroup();
00059         _targetgroup=_toposource->getProcGroup()->createComplementProcGroup();
00060       }
00061     if (_target_group->containsMyRank())
00062       {
00063         _topotarget = dynamic_cast<ExplicitTopology*>(_local_field->getTopology());
00064         _sourcegroup= _topotarget->getProcGroup()->createComplementProcGroup();
00065         _targetgroup=_topotarget->getProcGroup()->createProcGroup();
00066       }
00067   
00068     // Exchanging
00069   
00070     // Transmitting source topology to target code 
00071     broadcastTopology(_toposource,_topotarget,1000);
00072     transferMappingToSource();
00073   }
00074 
00079   void ExplicitCoincidentDEC::prepareSourceDE()
00080   {
00082     //Step 1 : buffer array creation 
00083   
00084     if (!_toposource->getProcGroup()->containsMyRank())
00085       return;
00086     MPIProcessorGroup* group=new MPIProcessorGroup(_sourcegroup->getCommInterface());
00087   
00088     // Warning : the size of the target side is implicitly deduced
00089     //from the size of MPI_COMM_WORLD
00090     int target_size = _toposource->getProcGroup()->getCommInterface().worldSize()- _toposource->getProcGroup()->size()  ;
00091   
00092     vector<int>* target_arrays=new vector<int>[target_size];
00093   
00094     int nb_local = _toposource-> getNbLocalElements();
00095 
00096     int union_size=group->size();
00097   
00098     _sendcounts=new int[union_size];
00099     _senddispls=new int[union_size];
00100     _recvcounts=new int[union_size];
00101     _recvdispls=new int[union_size];
00102   
00103     for (int i=0; i< union_size; i++)
00104       {
00105         _sendcounts[i]=0;
00106         _recvcounts[i]=0;
00107         _recvdispls[i]=0;
00108       }
00109     _senddispls[0]=0;
00110  
00111     int* counts=_explicit_mapping.getCounts();
00112     for (int i=0; i<group->size(); i++)
00113       _sendcounts[i]=counts[i];
00114   
00115     for (int iproc=1; iproc<group->size();iproc++)
00116       _senddispls[iproc]=_senddispls[iproc-1]+_sendcounts[iproc-1];
00117   
00118     _sendbuffer = new double [nb_local * _toposource->getNbComponents()];
00119   
00121     //Step 2 : filling the buffers with the source field values 
00122   
00123     int* counter=new int [target_size];
00124     counter[0]=0;  
00125     for (int i=1; i<target_size; i++)
00126       counter[i]=counter[i-1]+target_arrays[i-1].size();
00127   
00128   
00129     const double* value = _local_field->getField()->getArray()->getPointer();
00130   
00131     int* bufferindex= _explicit_mapping.getBufferIndex();
00132   
00133     for (int ielem=0; ielem<nb_local; ielem++)
00134       {
00135         int ncomp = _toposource->getNbComponents();
00136         for (int icomp=0; icomp<ncomp; icomp++)
00137           {
00138             _sendbuffer[ielem*ncomp+icomp]=value[bufferindex[ielem]*ncomp+icomp];
00139           }  
00140       }
00141     delete[] target_arrays;
00142     delete[] counter;
00143   }
00144 
00148   void ExplicitCoincidentDEC::prepareTargetDE()
00149   {
00150     if (!_topotarget->getProcGroup()->containsMyRank())
00151       return;
00152     MPIProcessorGroup* group=new MPIProcessorGroup(_topotarget->getProcGroup()->getCommInterface());
00153 
00154     vector < vector <int> > source_arrays(_sourcegroup->size());
00155     int nb_local = _topotarget-> getNbLocalElements();
00156     for (int ielem=0; ielem< nb_local ; ielem++)
00157       {
00158         //pair<int,int> source_local =_distant_elems[ielem];
00159         pair <int,int> source_local=_explicit_mapping.getDistantNumbering(ielem);
00160         source_arrays[source_local.first].push_back(source_local.second); 
00161       }  
00162     int union_size=group->size();
00163     _recvcounts=new int[union_size];
00164     _recvdispls=new int[union_size];
00165     _sendcounts=new int[union_size];
00166     _senddispls=new int[union_size];
00167     
00168     for (int i=0; i< union_size; i++)
00169       {
00170         _sendcounts[i]=0;
00171         _recvcounts[i]=0;
00172         _recvdispls[i]=0;
00173       }
00174     for (int iproc=0; iproc < _sourcegroup->size(); iproc++)
00175       {
00176         //converts the rank in target to the rank in union communicator
00177         int unionrank=group->translateRank(_sourcegroup,iproc);
00178         _recvcounts[unionrank]=source_arrays[iproc].size()*_topotarget->getNbComponents();
00179       }
00180     for (int i=1; i<union_size; i++)
00181       _recvdispls[i]=_recvdispls[i-1]+_recvcounts[i-1];
00182     _recvbuffer=new double[nb_local*_topotarget->getNbComponents()];
00183     
00184   }
00185 
00186  
00195   void ExplicitCoincidentDEC::broadcastTopology(const ExplicitTopology* toposend, ExplicitTopology* toporecv, int tag)
00196   {
00197     MPI_Status status;
00198   
00199     int* serializer=0;
00200     int size;
00201   
00202     MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
00203   
00204     // The send processors serialize the send topology
00205     // and send the buffers to the recv procs
00206     if (toposend !=0 && toposend->getProcGroup()->containsMyRank())
00207       {
00208         toposend->serialize(serializer, size);
00209         for (int iproc=0; iproc< group->size(); iproc++)
00210           {
00211             int itarget=iproc;
00212             if (!toposend->getProcGroup()->contains(itarget))
00213               {
00214                 _comm_interface->send(&size,1,MPI_INT, itarget,tag+itarget,*(group->getComm()));
00215                 _comm_interface->send(serializer, size, MPI_INT, itarget, tag+itarget,*(group->getComm()));          
00216               }
00217           }
00218       }
00219     else
00220       {
00221         vector <int> size (group->size());
00222         int myworldrank=group->myRank();
00223         for (int iproc=0; iproc<group->size();iproc++)
00224           {
00225             int isource = iproc;
00226             if (!toporecv->getProcGroup()->contains(isource))
00227               {
00228                 int nbelem;
00229                 _comm_interface->recv(&nbelem, 1, MPI_INT, isource, tag+myworldrank, *(group->getComm()), &status);
00230                 int* buffer = new int[nbelem];
00231                 _comm_interface->recv(buffer, nbelem, MPI_INT, isource,tag+myworldrank, *(group->getComm()), &status);        
00232       
00233                 ExplicitTopology* topotemp=new ExplicitTopology();
00234                 topotemp->unserialize(buffer, *_comm_interface);
00235                 delete[] buffer;
00236         
00237                 for (int ielem=0; ielem<toporecv->getNbLocalElements(); ielem++)
00238                   {
00239                     int global = toporecv->localToGlobal(ielem);
00240                     int sendlocal=topotemp->globalToLocal(global);
00241                     if (sendlocal!=-1)
00242                       {
00243                         size[iproc]++;
00244                         _explicit_mapping.pushBackElem(make_pair(iproc,sendlocal));
00245                       }
00246                   }
00247                 delete topotemp;
00248               }
00249           }  
00250       }  
00251     MESSAGE (" rank "<<group->myRank()<< " broadcastTopology is over");
00252   }
00253 
00254   void ExplicitCoincidentDEC::transferMappingToSource()
00255   {
00256 
00257     MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
00258   
00259     // sending source->target mapping which is stored by target
00260     //in _distant_elems from target to source
00261     if (_topotarget!=0 && _topotarget->getProcGroup()->containsMyRank())
00262       {
00263         int world_size = _topotarget->getProcGroup()->getCommInterface().worldSize()  ;
00264         int* nb_transfer_union=new int[world_size];
00265         int* dummy_recv=new int[world_size];
00266         for (int i=0; i<world_size; i++)
00267           nb_transfer_union[i]=0;
00268         //converts the rank in target to the rank in union communicator
00269     
00270         for (int i=0; i<  _explicit_mapping.nbDistantDomains(); i++)
00271           {
00272             int unionrank=group->translateRank(_sourcegroup,_explicit_mapping.getDistantDomain(i));
00273             nb_transfer_union[unionrank]=_explicit_mapping.getNbDistantElems(i);
00274           }
00275         _comm_interface->allToAll(nb_transfer_union, 1, MPI_INT, dummy_recv, 1, MPI_INT, MPI_COMM_WORLD);
00276       
00277         int* sendbuffer= _explicit_mapping.serialize(_topotarget->getProcGroup()->myRank());
00278       
00279         int* sendcounts= new int [world_size];
00280         int* senddispls = new int [world_size];
00281         for (int i=0; i< world_size; i++)
00282           {
00283             sendcounts[i]=2*nb_transfer_union[i];
00284             if (i==0)
00285               senddispls[i]=0;
00286             else
00287               senddispls[i]=senddispls[i-1]+sendcounts[i-1];
00288           }
00289         int* recvcounts=new int[world_size];
00290         int* recvdispls=new int[world_size];
00291         int *dummyrecv=0;
00292         for (int i=0; i <world_size; i++)
00293           {
00294             recvcounts[i]=0;
00295             recvdispls[i]=0;
00296           }
00297         _comm_interface->allToAllV(sendbuffer, sendcounts, senddispls, MPI_INT, dummyrecv, recvcounts, senddispls, MPI_INT, MPI_COMM_WORLD);
00298       
00299       }
00300     //receiving in the source subdomains the mapping sent by targets
00301     else
00302       {
00303         int world_size = _toposource->getProcGroup()->getCommInterface().worldSize()  ;
00304         int* nb_transfer_union=new int[world_size];
00305         int* dummy_send=new int[world_size];
00306         for (int i=0; i<world_size; i++)
00307           dummy_send[i]=0;
00308         _comm_interface->allToAll(dummy_send, 1, MPI_INT, nb_transfer_union, 1, MPI_INT, MPI_COMM_WORLD);
00309       
00310         int total_size=0;
00311         for (int i=0; i< world_size; i++)
00312           total_size+=nb_transfer_union[i];
00313         int nbtarget = _targetgroup->size();
00314         int* targetranks = new int[ nbtarget];
00315         for (int i=0; i<nbtarget; i++)
00316           targetranks[i]=group->translateRank(_targetgroup,i);
00317         int* mappingbuffer= new int [total_size*2];
00318         int* sendcounts= new int [world_size];
00319         int* senddispls = new int [world_size];
00320         int* recvcounts=new int[world_size];
00321         int* recvdispls=new int[world_size];
00322         for (int i=0; i< world_size; i++)
00323           {
00324             recvcounts[i]=2*nb_transfer_union[i];
00325             if (i==0)
00326               recvdispls[i]=0;
00327             else
00328               recvdispls[i]=recvdispls[i-1]+recvcounts[i-1];
00329           }
00330 
00331         int *dummysend=0;
00332         for (int i=0; i <world_size; i++)
00333           {
00334             sendcounts[i]=0;
00335             senddispls[i]=0;
00336           }
00337         _comm_interface->allToAllV(dummysend, sendcounts, senddispls, MPI_INT, mappingbuffer, recvcounts, recvdispls, MPI_INT, MPI_COMM_WORLD);
00338         _explicit_mapping.unserialize(world_size,nb_transfer_union,nbtarget, targetranks, mappingbuffer);
00339       }
00340   }
00341 
00342   void ExplicitCoincidentDEC::recvData()
00343   {
00344     //MPI_COMM_WORLD is used instead of group because there is no
00345     //mechanism for creating the union group yet
00346     MESSAGE("recvData");
00347 
00348     cout<<"start AllToAll"<<endl;
00349     _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, 
00350                                _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
00351     cout<<"end AllToAll"<<endl;
00352     int nb_local = _topotarget->getNbLocalElements();
00353     double* value=new double[nb_local*_topotarget->getNbComponents()];
00354 
00355     vector<int> counters(_sourcegroup->size());
00356     counters[0]=0;
00357     for (int i=0; i<_sourcegroup->size()-1; i++)
00358       {
00359         MPIProcessorGroup* group=new MPIProcessorGroup(*_comm_interface);
00360         int worldrank=group->translateRank(_sourcegroup,i);
00361         counters[i+1]=counters[i]+_recvcounts[worldrank];
00362       }
00363   
00364     for (int ielem=0; ielem<nb_local ; ielem++)
00365       {
00366         pair<int,int> distant_numbering=_explicit_mapping.getDistantNumbering(ielem);
00367         int iproc=distant_numbering.first; 
00368         int ncomp =  _topotarget->getNbComponents();
00369         for (int icomp=0; icomp< ncomp; icomp++)
00370           value[ielem*ncomp+icomp]=_recvbuffer[counters[iproc]*ncomp+icomp];
00371         counters[iproc]++;
00372       }  
00373     _local_field->getField()->getArray()->useArray(value,true,CPP_DEALLOC,nb_local,_topotarget->getNbComponents());
00374   }
00375 
00376   void ExplicitCoincidentDEC::sendData()
00377   {
00378     MESSAGE ("sendData");
00379     for (int i=0; i< 4; i++)
00380       cout << _sendcounts[i]<<" ";
00381     cout <<endl;
00382     for (int i=0; i< 4; i++)
00383       cout << _senddispls[i]<<" ";
00384     cout <<endl;
00385     //MPI_COMM_WORLD is used instead of group because there is no
00386     //mechanism for creating the union group yet
00387     cout <<"start AllToAll"<<endl;
00388     _comm_interface->allToAllV(_sendbuffer, _sendcounts, _senddispls, MPI_DOUBLE, 
00389                                _recvbuffer, _recvcounts, _recvdispls, MPI_DOUBLE,MPI_COMM_WORLD);
00390   }
00394 }
00395