Back to index

salome-med  6.5.0
MEDSPLITTER_ParaDomainSelector.cxx
Go to the documentation of this file.
00001 // Copyright (C) 2007-2012  CEA/DEN, EDF R&D
00002 //
00003 // This library is free software; you can redistribute it and/or
00004 // modify it under the terms of the GNU Lesser General Public
00005 // License as published by the Free Software Foundation; either
00006 // version 2.1 of the License.
00007 //
00008 // This library is distributed in the hope that it will be useful,
00009 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00010 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00011 // Lesser General Public License for more details.
00012 //
00013 // You should have received a copy of the GNU Lesser General Public
00014 // License along with this library; if not, write to the Free Software
00015 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
00016 //
00017 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
00018 //
00019 // File      : MEDSPLITTER_ParaDomainSelector.cxx
00020 // Created   : Wed Jun 24 12:39:59 2009
00021 // Author    : Edward AGAPOV (eap)
00022 
00023 #include "MEDSPLITTER_ParaDomainSelector.hxx"
00024 
00025 #include "MEDSPLITTER_UserGraph.hxx"
00026 #include "MEDSPLITTER_JointExchangeData.hxx"
00027 
00028 #include <MEDMEM_Meshing.hxx>
00029 #include <MEDMEM_DriversDef.hxx>
00030 
00031 #include <iostream>
00032 #include <numeric>
00033 
00034 #ifdef HAVE_MPI2
00035 #include <mpi.h>
00036 #endif
00037 
00038 #ifndef WIN32
00039 #include <sys/sysinfo.h>
00040 #endif
00041 
00042 using namespace MEDSPLITTER;
00043 using namespace MED_EN;
00044 using namespace std;
00045 
00046 //================================================================================
00050 //================================================================================
00051 
00052 ParaDomainSelector::ParaDomainSelector(bool mesure_memory)
00053   :_rank(0),_world_size(1), _nb_result_domains(-1), _mesure_memory(mesure_memory),
00054    _init_time(0.0), _init_memory(0), _max_memory(0)
00055 {
00056 #ifdef HAVE_MPI2
00057   MPI_Comm_size(MPI_COMM_WORLD,&_world_size) ;
00058   MPI_Comm_rank(MPI_COMM_WORLD,&_rank) ;
00059   _init_time = MPI_Wtime();
00060 #endif
00061   evaluateMemory();
00062 }
00063 
00064 //================================================================================
00068 //================================================================================
00069 
00070 ParaDomainSelector::~ParaDomainSelector()
00071 {
00072 }
00073 
00074 //================================================================================
00078 //================================================================================
00079 
00080 bool ParaDomainSelector::isOnDifferentHosts() const
00081 {
00082   evaluateMemory();
00083   if ( _world_size < 2 )
00084     return false;
00085 
00086 #ifdef HAVE_MPI2
00087   char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ];
00088   int size;
00089   MPI_Get_processor_name( name_here, &size);
00090 
00091   int next_proc = (rank() + 1) % nbProcs();
00092   int prev_proc = (rank() - 1 + nbProcs()) % nbProcs();
00093   int tag  = 1111111;
00094 
00095   MPI_Status status;
00096   MPI_Sendrecv((void*)&name_here[0],  MPI_MAX_PROCESSOR_NAME, MPI_CHAR, next_proc, tag,
00097                (void*)&name_there[0], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, prev_proc, tag,
00098                MPI_COMM_WORLD, &status);
00099   return string(name_here) != string(name_there);
00100 #else
00101   return false;
00102 #endif
00103 }
00104 
00105 //================================================================================
00111 //================================================================================
00112 
00113 bool ParaDomainSelector::isMyDomain(int domainIndex) const
00114 {
00115   evaluateMemory();
00116   return (_rank == getProccessorID( domainIndex ));
00117 }
00118 
00119 //================================================================================
00125 //================================================================================
00126 
00127 int ParaDomainSelector::getProccessorID(int domainIndex) const
00128 {
00129   evaluateMemory();
00130   return ( domainIndex % _world_size );
00131 }
00132 
00133 //================================================================================
00141 //================================================================================
00142 
00143 int ParaDomainSelector::gatherNbOf(MED_EN::medEntityMesh        entity,
00144                                    const vector<MEDMEM::MESH*>& domain_meshes)
00145 {
00146   evaluateMemory();
00147 
00148   // get nb of elems of each domain mesh
00149   int nb_domains = domain_meshes.size();
00150   vector<int> nb_elems( nb_domains, 0 );
00151   for ( int i = 0; i < nb_domains; ++i )
00152     if ( domain_meshes[i] )
00153       nb_elems[i] = domain_meshes[i]->getNumberOfElements(entity, MED_ALL_ELEMENTS);
00154 
00155   // receive nb of elems from other procs
00156   vector<int> all_nb_elems( nb_domains );
00157 #ifdef HAVE_MPI2
00158   MPI_Allreduce((void*)&nb_elems[0], (void*)&all_nb_elems[0], nb_domains,
00159                 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
00160 #endif
00161   int total_nb = std::accumulate( all_nb_elems.begin(), all_nb_elems.end(), 0 );
00162 
00163   vector<int>& elem_shift_by_domain
00164     = (entity == MED_CELL) ? _cell_shift_by_domain : _face_shift_by_domain;
00165 
00166   // fill elem_shift_by_domain
00167 
00168   vector< int > ordered_nbs, domain_order( nb_domains );
00169   ordered_nbs.push_back(0);
00170   for ( int iproc = 0; iproc < nbProcs(); ++iproc )
00171     for ( int idomain = 0; idomain < nb_domains; ++idomain )
00172       if ( getProccessorID( idomain ) == iproc )
00173       {
00174         domain_order[ idomain ] = ordered_nbs.size() - 1;
00175         ordered_nbs.push_back( ordered_nbs.back() + all_nb_elems[idomain] );
00176       }
00177   elem_shift_by_domain.resize( nb_domains+1, 0 );
00178   for ( int idomain = 0; idomain < nb_domains; ++idomain )
00179     elem_shift_by_domain[ idomain ] = ordered_nbs[ domain_order[ idomain ]];
00180 
00181   elem_shift_by_domain.back() = ordered_nbs.back(); // to know total nb of elements
00182 
00183   if ( entity == MED_CELL )
00184   {
00185     // fill _nb_vert_of_procs
00186     _nb_vert_of_procs.resize( _world_size+1, 0 );
00187     for ( int i = 0; i < nb_domains; ++i )
00188     {
00189       int rank = getProccessorID( i );
00190       _nb_vert_of_procs[ rank+1 ] += all_nb_elems[ i ];
00191     }
00192     _nb_vert_of_procs[0] = 1; // base = 1
00193     for ( int i = 1; i < _nb_vert_of_procs.size(); ++i )
00194       _nb_vert_of_procs[ i ] += _nb_vert_of_procs[ i-1 ]; // to CSR format
00195   }
00196   else
00197   {
00198     // to compute global ids of faces in joints
00199     //_total_nb_faces = total_nb;
00200   }
00201 
00202 //   if ( !_rank) {
00203 //     MEDMEM::STRING out("_nb_vert_of_procs :");
00204 //     for ( int i = 0; i < _nb_vert_of_procs.size(); ++i )
00205 //       out << _nb_vert_of_procs[i] << " ";
00206 //     std::cout << out << std::endl;
00207 //   }
00208 //   if ( !_rank) {
00209 //     MEDMEM::STRING out("elem_shift_by_domain :");
00210 //     for ( int i = 0; i < elem_shift_by_domain.size(); ++i )
00211 //       out << elem_shift_by_domain[i] << " ";
00212 //     std::cout << out << std::endl;
00213 //   }
00214 
00215   evaluateMemory();
00216 
00217   return total_nb;
00218 }
00219 
00220 //================================================================================
00229 //================================================================================
00230 
00231 #define gatherNbOf_NOT_CALLED(meth) throw MED_EXCEPTION \
00232 ("ParaDomainSelector::" #meth "(): gatherNbOf( MED_CELL ) must be called before")
00233 
00234 int* ParaDomainSelector::getNbVertOfProcs() const
00235 {
00236   evaluateMemory();
00237   if ( _nb_vert_of_procs.empty() )
00238     gatherNbOf_NOT_CALLED(getNbVertOfProcs);
00239 
00240   return (int*) & _nb_vert_of_procs[0];
00241 }
00242 //================================================================================
00249 //================================================================================
00250 
00251 int ParaDomainSelector::getDomainShift(int domainIndex) const
00252 {
00253   evaluateMemory();
00254   if ( _cell_shift_by_domain.empty() )
00255     gatherNbOf_NOT_CALLED(getDomainShift);
00256 
00257   return _cell_shift_by_domain[ domainIndex ];
00258 }
00259 
00260 //================================================================================
00267 //================================================================================
00268 
00269 int ParaDomainSelector::getProcShift() const
00270 {
00271   evaluateMemory();
00272   if ( _nb_vert_of_procs.empty() )
00273     gatherNbOf_NOT_CALLED(getProcShift);
00274 
00275   return _nb_vert_of_procs[_rank]-1;
00276 }
00277 
00278 //================================================================================
00282 //================================================================================
00283 
00284 auto_ptr<Graph> ParaDomainSelector::gatherGraph(const Graph* graph) const
00285 {
00286   Graph* glob_graph = 0;
00287 
00288   evaluateMemory();
00289 #ifdef HAVE_MPI2
00290 
00291   // ---------------
00292   // Gather indices
00293   // ---------------
00294 
00295   vector<int> index_size_of_proc( nbProcs() ); // index sizes - 1
00296   for ( int i = 1; i < _nb_vert_of_procs.size(); ++i )
00297     index_size_of_proc[i-1] = _nb_vert_of_procs[ i ] - _nb_vert_of_procs[ i-1 ];
00298 
00299   int index_size = 1 + _cell_shift_by_domain.back();
00300   int* graph_index = new int[ index_size ];
00301   const int* index = graph->getGraph()->getIndex();
00302   int* proc_index_displacement = (int*) & _nb_vert_of_procs[0];
00303 
00304   MPI_Allgatherv((void*) (index+1),         // send local index except first 0 (or 1)
00305                  index_size_of_proc[_rank], // index size on this proc
00306                  MPI_INT,
00307                  (void*) graph_index,       // receive indices
00308                  & index_size_of_proc[0],   // index size on each proc
00309                  proc_index_displacement,   // displacement of each proc index
00310                  MPI_INT,
00311                  MPI_COMM_WORLD);
00312   graph_index[0] = index[0]; // it is not overwritten thanks to proc_index_displacement[0]==1
00313 
00314   // get sizes of graph values on each proc by the got indices of graphs
00315   vector< int > value_size_of_proc( nbProcs() ), proc_value_displacement(1,0);
00316   for ( int i = 0; i < nbProcs(); ++i )
00317   {
00318     if ( index_size_of_proc[i] > 0 )
00319       value_size_of_proc[i] = graph_index[ proc_index_displacement[ i+1 ]-1 ] - graph_index[0];
00320     else
00321       value_size_of_proc[i] = 0;
00322     proc_value_displacement.push_back( proc_value_displacement.back() + value_size_of_proc[i] );
00323   }
00324   
00325   // update graph_index
00326   for ( int i = 1; i < nbProcs(); ++i )
00327   {
00328     int shift = graph_index[ proc_index_displacement[i]-1 ]-graph_index[0];
00329     for ( int j = proc_index_displacement[i]; j < proc_index_displacement[i+1]; ++j )
00330       graph_index[ j ] += shift;
00331   }
00332   
00333   // --------------
00334   // Gather values
00335   // --------------
00336 
00337   int value_size = graph_index[ index_size-1 ] - graph_index[ 0 ];
00338   int* graph_value = new int[ value_size ];
00339   const int* value = graph->getGraph()->getValue();
00340 
00341   MPI_Allgatherv((void*) value,                // send local value
00342                  value_size_of_proc[_rank],    // value size on this proc
00343                  MPI_INT,
00344                  (void*) graph_value,          // receive values
00345                  & value_size_of_proc[0],      // value size on each proc
00346                  & proc_value_displacement[0], // displacement of each proc value
00347                  MPI_INT,
00348                  MPI_COMM_WORLD);
00349 
00350   // -----------------
00351   // Gather partition
00352   // -----------------
00353 
00354   int * partition = new int[ _cell_shift_by_domain.back() ];
00355   const int* part = graph->getPart();
00356   
00357   MPI_Allgatherv((void*) part,              // send local partition
00358                  index_size_of_proc[_rank], // index size on this proc
00359                  MPI_INT,
00360                  (void*)(partition-1),      // -1 compensates proc_index_displacement[0]==1
00361                  & index_size_of_proc[0],   // index size on each proc
00362                  proc_index_displacement,   // displacement of each proc index
00363                  MPI_INT,
00364                  MPI_COMM_WORLD);
00365 
00366   // -----------
00367   // Make graph
00368   // -----------
00369 
00370   MEDMEM::MEDSKYLINEARRAY* array =
00371     new MEDMEM::MEDSKYLINEARRAY( index_size-1, value_size, graph_index, graph_value, true );
00372 
00373   glob_graph = new UserGraph( array, partition, index_size-1 );
00374 
00375   evaluateMemory();
00376 
00377   delete [] partition;
00378 
00379 #endif // HAVE_MPI2
00380 
00381   return auto_ptr<Graph>( glob_graph );
00382 }
00383 
00384 //================================================================================
00390 //================================================================================
00391 
00392 void ParaDomainSelector::gatherEntityTypesInfo(vector<MEDMEM::MESH*>& domain_meshes,
00393                                                MED_EN::medEntityMesh  entity)
00394 {
00395   const list<medGeometryElement> & all_types = meshEntities[ entity ];
00396 
00397   evaluateMemory();
00398 
00399   // Put nb of elements of the entity of all domains in one vector
00400   // and by the way find out mesh dimension
00401 
00402   vector<int> nb_of_type( domain_meshes.size() * all_types.size(), 0 );
00403   int mesh_dim = -1, space_dim = -1;
00404 
00405   for ( int idomain = 0; idomain < domain_meshes.size(); ++idomain )
00406   {
00407     if ( !isMyDomain(idomain)) continue;
00408 
00409     int* domain_nbs = & nb_of_type[ idomain * all_types.size()];
00410 
00411     list<medGeometryElement>::const_iterator type = all_types.begin();
00412     for ( int t = 0; type != all_types.end(); ++t,++type )
00413       domain_nbs[t] = domain_meshes[idomain]->getNumberOfElements(entity,*type);
00414 
00415     int i_mesh_dim = domain_meshes[idomain]->getMeshDimension();
00416     int i_space_dim = domain_meshes[idomain]->getSpaceDimension();
00417     if ( mesh_dim < i_mesh_dim && i_mesh_dim <= 3 )
00418       mesh_dim = i_mesh_dim;
00419     if ( space_dim < i_space_dim && i_space_dim <= 3 )
00420       space_dim = i_space_dim;
00421   }
00422 
00423   // Receive nbs from other procs
00424 
00425   vector< int > nb_recv( nb_of_type.size() );
00426 #ifdef HAVE_MPI2
00427   MPI_Allreduce((void*)&nb_of_type[0], (void*)&nb_recv[0], nb_of_type.size(),
00428                 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
00429 #endif
00430 
00431   // Set info to meshes of distant domains
00432 
00433   for ( int idomain = 0; idomain < domain_meshes.size(); ++idomain )
00434   {
00435     if ( isMyDomain(idomain)) continue;
00436 
00437     MEDMEM::MESHING* meshing = (MEDMEM::MESHING*) domain_meshes[idomain];
00438     if ( meshing->getMeshDimension() < mesh_dim )
00439     {
00440       //meshing->setSpaceDimension( space_dim );
00441       meshing->setCoordinates( space_dim, /*NumberOfNodes=*/0, 0, "", 0);
00442     }
00443 
00444     vector< medGeometryElement > types;
00445     vector< int >                nb_elems;
00446 
00447     int* domain_nbs = & nb_recv[ idomain * all_types.size()];
00448 
00449     list<medGeometryElement>::const_iterator type = all_types.begin();
00450     for ( int t = 0; type != all_types.end(); ++t,++type )
00451     {
00452       if ( domain_nbs[t]==0 ) continue;
00453       types.push_back( *type );
00454       nb_elems.push_back( domain_nbs[t] );
00455     }
00456     meshing->setNumberOfTypes( types.size(), entity );
00457     if ( !types.empty() )
00458     {
00459       meshing->setTypes           ( & types[0], entity );
00460       meshing->setNumberOfElements( & nb_elems[0], entity );
00461     }
00462   }
00463   evaluateMemory();
00464 }
00465 
00466 //================================================================================
00470 //================================================================================
00471 
00472 void ParaDomainSelector::setNbCellPairs( int nb_cell_pairs, int dist_domain, int loc_domain )
00473 {
00474   // This method is needed for further computing global numbers of faces in joint.
00475   // Store if both domains are on this proc else on one of procs only
00476   if ( isMyDomain( dist_domain ) || dist_domain < loc_domain )
00477   {
00478     if ( _nb_cell_pairs_by_joint.empty() )
00479       _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
00480 
00481     int joint_id = jointId( loc_domain, dist_domain );
00482     _nb_cell_pairs_by_joint[ joint_id ] = nb_cell_pairs;
00483   }
00484   evaluateMemory();
00485 }
00486 
00487 //================================================================================
00491 //================================================================================
00492 
00493 int ParaDomainSelector::getNbCellPairs( int dist_domain, int loc_domain ) const
00494 {
00495   evaluateMemory();
00496 
00497   int joint_id = jointId( loc_domain, dist_domain );
00498   return _nb_cell_pairs_by_joint[ joint_id ];
00499 }
00500 
00501 //================================================================================
00505 //================================================================================
00506 
00507 void ParaDomainSelector::gatherNbCellPairs()
00508 {
00509   const char* LOC = "MEDSPLITTER::ParaDomainSelector::gatherNbCellPairs(): ";
00510   if ( _nb_cell_pairs_by_joint.empty() )
00511     _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
00512   evaluateMemory();
00513 
00514   vector< int > send_buf = _nb_cell_pairs_by_joint;
00515 #ifdef HAVE_MPI2
00516   MPI_Allreduce((void*)&send_buf[0],
00517                 (void*)&_nb_cell_pairs_by_joint[0],
00518                 _nb_cell_pairs_by_joint.size(),
00519                 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
00520 #endif
00521   // check that the set nbs of cell pairs are correct,
00522   // namely that each joint is treated on one proc only
00523   for ( int j = 0; j < _nb_cell_pairs_by_joint.size(); ++j )
00524     if ( _nb_cell_pairs_by_joint[j] != send_buf[j] && send_buf[j]>0 )
00525       throw MED_EXCEPTION(MEDMEM::STRING(LOC) << "invalid nb of cell pairs");
00526 }
00527 
00528 //================================================================================
00532 //================================================================================
00533 
00534 void ParaDomainSelector::exchangeJoint( JointExchangeData* joint ) const
00535 {
00536 #ifdef HAVE_MPI2
00537   vector<int> send_data, recv_data( joint->serialize( send_data ));
00538 
00539   int dest = getProccessorID( joint->distantDomain() );
00540   int tag  = 1001 + jointId( joint->localDomain(), joint->distantDomain() );
00541   
00542   MPI_Status status;
00543   MPI_Sendrecv((void*)&send_data[0], send_data.size(), MPI_INT, dest, tag,
00544                (void*)&recv_data[0], recv_data.size(), MPI_INT, dest, tag,
00545                MPI_COMM_WORLD, &status);  
00546 
00547   joint->deserialize( recv_data );
00548 #endif
00549 }
00550 
00551 //================================================================================
00555 //================================================================================
00556 
00557 int ParaDomainSelector::getFisrtGlobalIdOfSubentity( int loc_domain, int dist_domain ) const
00558 {
00559   // total_nb_faces includes faces existing before creation of joint faces
00560   // (got in gatherNbOf( MED_FACE )).
00561   evaluateMemory();
00562 
00563   int total_nb_faces = _face_shift_by_domain.empty() ? 0 : _face_shift_by_domain.back();
00564   int id = total_nb_faces + 1;
00565 
00566   if ( _nb_cell_pairs_by_joint.empty() )
00567     throw MED_EXCEPTION("MEDSPLITTER::ParaDomainSelector::getFisrtGlobalIdOfSubentity(), "
00568                         "gatherNbCellPairs() must be called before");
00569   int joint_id = jointId( loc_domain, dist_domain );
00570   for ( int j = 0; j < joint_id; ++j )
00571     id += _nb_cell_pairs_by_joint[ j ];
00572 
00573   return id;
00574 }
00575 
00576 //================================================================================
00580 //================================================================================
00581 
00582 int* ParaDomainSelector::exchangeSubentityIds( int loc_domain, int dist_domain,
00583                                                const vector<int>& loc_ids_here ) const
00584 {
00585   int* loc_ids_dist = new int[ loc_ids_here.size()];
00586 #ifdef HAVE_MPI2
00587   int dest = getProccessorID( dist_domain );
00588   int tag  = 2002 + jointId( loc_domain, dist_domain );
00589   MPI_Status status;
00590   MPI_Sendrecv((void*)&loc_ids_here[0], loc_ids_here.size(), MPI_INT, dest, tag,
00591                (void*) loc_ids_dist,    loc_ids_here.size(), MPI_INT, dest, tag,
00592                MPI_COMM_WORLD, &status);  
00593   evaluateMemory();
00594 #endif
00595 
00596   return loc_ids_dist;
00597 }
00598 
00599 //================================================================================
00603 //================================================================================
00604 
00605 int ParaDomainSelector::jointId( int local_domain, int distant_domain ) const
00606 {
00607   evaluateMemory();
00608   if (_nb_result_domains < 0)
00609     throw MED_EXCEPTION("ParaDomainSelector::jointId(): setNbDomains() must be called before()");
00610 
00611   if ( local_domain < distant_domain )
00612     swap( local_domain, distant_domain );
00613   return local_domain * _nb_result_domains + distant_domain;
00614 }
00615 
00616 //================================================================================
00620 //================================================================================
00621 
00622 // int ParaDomainSelector::getDomianOrder(int idomain, int nb_domains) const
00623 // {
00624 //   return nb_domains / nbProcs() * getProccessorID( idomain ) + idomain / nbProcs();
00625 // }
00626 
00627 //================================================================================
00631 //================================================================================
00632 
00633 double ParaDomainSelector::getPassedTime() const
00634 {
00635 #ifdef HAVE_MPI2
00636   return MPI_Wtime() - _init_time;
00637 #else
00638   return 0.0;
00639 #endif
00640 }
00641 
00642 //================================================================================
00646 //================================================================================
00647 
00648 int ParaDomainSelector::evaluateMemory() const
00649 {
00650   if ( _mesure_memory )
00651   {
00652     int used_memory = 0;
00653 #ifndef WIN32
00654     struct sysinfo si;
00655     int err = sysinfo( &si );
00656     if ( !err )
00657       used_memory =
00658         (( si.totalram - si.freeram + si.totalswap - si.freeswap ) * si.mem_unit ) / 1024;
00659 #endif
00660     if ( used_memory > _max_memory )
00661       ((ParaDomainSelector*) this)->_max_memory = used_memory;
00662 
00663     if ( !_init_memory )
00664       ((ParaDomainSelector*) this)->_init_memory = used_memory;
00665   }
00666   return _max_memory - _init_memory;
00667 }