Back to index

salome-med  6.5.0
MPIProcessorGroup.cxx
Go to the documentation of this file.
00001 // Copyright (C) 2007-2012  CEA/DEN, EDF R&D
00002 //
00003 // This library is free software; you can redistribute it and/or
00004 // modify it under the terms of the GNU Lesser General Public
00005 // License as published by the Free Software Foundation; either
00006 // version 2.1 of the License.
00007 //
00008 // This library is distributed in the hope that it will be useful,
00009 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00010 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00011 // Lesser General Public License for more details.
00012 //
00013 // You should have received a copy of the GNU Lesser General Public
00014 // License along with this library; if not, write to the Free Software
00015 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
00016 //
00017 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
00018 //
00019 
00020 #include "ProcessorGroup.hxx"
00021 #include "MPIProcessorGroup.hxx"
00022 #include "CommInterface.hxx"
00023 #include "InterpolationUtils.hxx"
00024 
00025 #include <iostream>
00026 #include <set>
00027 #include <algorithm>
00028 #include "mpi.h"
00029 
00030 using namespace std;
00031 
00055 namespace ParaMEDMEM
00056 {
00068   MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface):
00069     ProcessorGroup(interface),_world_comm(MPI_COMM_WORLD)
00070   {
00071     _comm=_world_comm;
00072     _comm_interface.commGroup(_world_comm, &_group);
00073     int size;
00074     _comm_interface.commSize(_world_comm,&size);
00075     for (int i=0; i<size; i++)
00076       _proc_ids.insert(i);
00077 
00078   }
00079 
00089   MPIProcessorGroup::MPIProcessorGroup(const CommInterface& interface, set<int> proc_ids, const MPI_Comm& world_comm):
00090     ProcessorGroup(interface, proc_ids),_world_comm(world_comm)
00091   {
00092     updateMPISpecificAttributes();
00093   }
00094 
00095 
00096   void MPIProcessorGroup::updateMPISpecificAttributes()
00097   {
00098     //Creation of a communicator 
00099     MPI_Group group_world;
00100   
00101     int size_world;
00102     _comm_interface.commSize(_world_comm,&size_world);
00103     int rank_world;
00104     _comm_interface.commRank(_world_comm,&rank_world);
00105     _comm_interface.commGroup(_world_comm, &group_world);
00106 
00107     int* ranks=new int[_proc_ids.size()];
00108    
00109     // copying proc_ids in ranks
00110     copy<set<int>::const_iterator,int*> (_proc_ids.begin(), _proc_ids.end(), ranks);
00111     for (int i=0; i< (int)_proc_ids.size();i++)
00112       if (ranks[i]>size_world-1)
00113         throw INTERP_KERNEL::Exception("invalid rank in set<int> argument of MPIProcessorGroup constructor");
00114       
00115     _comm_interface.groupIncl(group_world, _proc_ids.size(), ranks, &_group);
00116   
00117     _comm_interface.commCreate(_world_comm, _group, &_comm);
00118     delete[] ranks;
00119   }
00120 
00129   MPIProcessorGroup::MPIProcessorGroup (const CommInterface& comm_interface, int pstart, int pend, const MPI_Comm& world_comm): ProcessorGroup(comm_interface,pstart,pend),_world_comm(world_comm)
00130   {
00131     //Creation of a communicator 
00132     MPI_Group group_world;
00133   
00134     int size_world;
00135     _comm_interface.commSize(_world_comm,&size_world);
00136     int rank_world;
00137     _comm_interface.commRank(_world_comm,&rank_world);
00138     _comm_interface.commGroup(_world_comm, &group_world);
00139 
00140     if (pend>size_world-1 || pend <pstart || pstart<0)
00141       throw INTERP_KERNEL::Exception("invalid argument in MPIProcessorGroup constructor (comm,pfirst,plast)");
00142     int nprocs=pend-pstart+1;
00143     int* ranks=new int[nprocs];
00144     for (int i=pstart; i<=pend;i++)
00145       {
00146         ranks[i-pstart]=i;
00147       }
00148 
00149     _comm_interface.groupIncl(group_world, nprocs, ranks, &_group);
00150   
00151     _comm_interface.commCreate(_world_comm, _group, &_comm);
00152     delete[] ranks;
00153   }
00158   MPIProcessorGroup::MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids) :
00159     ProcessorGroup(proc_group.getCommInterface()),_world_comm(MPI_COMM_WORLD)
00160   {
00161     cout << "MPIProcessorGroup (const ProcessorGroup& proc_group, set<int> proc_ids)" <<endl;
00162     cout << "Not implemented yet !"<<endl;
00163     exit(1);
00164   }
00165 
00166   MPIProcessorGroup::MPIProcessorGroup(const MPIProcessorGroup& other):ProcessorGroup(other),_world_comm(other._world_comm)
00167   {
00168     updateMPISpecificAttributes();
00169   }
00170 
00171   MPIProcessorGroup::~MPIProcessorGroup()
00172   {
00173     _comm_interface.groupFree(&_group);
00174     if (_comm!=_world_comm && _comm !=MPI_COMM_NULL)
00175       _comm_interface.commFree(&_comm);
00176   
00177   }
00189   int MPIProcessorGroup::translateRank(const ProcessorGroup* group, int rank) const
00190   {
00191     const MPIProcessorGroup* targetgroup=dynamic_cast<const MPIProcessorGroup*>(group);
00192     int local_rank;
00193     MPI_Group_translate_ranks(targetgroup->_group, 1, &rank, _group, &local_rank);
00194     return local_rank;
00195   }
00196   
00201   ProcessorGroup* MPIProcessorGroup::createComplementProcGroup() const
00202   {
00203     set <int> procs;
00204     int world_size=_comm_interface.worldSize();
00205     for (int i=0; i<world_size; i++)
00206       procs.insert(i);
00207     for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
00208       procs.erase(*iter);
00209     
00210     return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
00211     
00212   }
00213 
00214   ProcessorGroup *MPIProcessorGroup::deepCpy() const
00215   {
00216     return new MPIProcessorGroup(*this);
00217   }
00218 
00223   ProcessorGroup*  MPIProcessorGroup::fuse (const ProcessorGroup& group) const
00224   {
00225     set <int> procs = _proc_ids;
00226     const set<int>& distant_proc_ids = group.getProcIDs();
00227     for (set<int>::const_iterator iter=distant_proc_ids.begin(); iter!=distant_proc_ids.end(); iter++)
00228       {
00229         procs.insert(*iter);
00230       }
00231     return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
00232   }
00233 
00234   int MPIProcessorGroup::myRank() const
00235   { 
00236     int rank;
00237     MPI_Comm_rank(_comm,&rank);
00238     return rank;
00239   }
00240   
00244   ProcessorGroup* MPIProcessorGroup::createProcGroup() const
00245   {
00246     set <int> procs;
00247     for (set<int>::const_iterator iter=_proc_ids.begin(); iter!= _proc_ids.end(); iter++)
00248       procs.insert(*iter);
00249   
00250     return new MPIProcessorGroup(_comm_interface, procs, _world_comm);
00251 
00252   }
00253 }