Back to index

salome-med  6.5.0
MEDPARTITIONER_ParaDomainSelector.cxx
Go to the documentation of this file.
00001 // Copyright (C) 2007-2012  CEA/DEN, EDF R&D
00002 //
00003 // This library is free software; you can redistribute it and/or
00004 // modify it under the terms of the GNU Lesser General Public
00005 // License as published by the Free Software Foundation; either
00006 // version 2.1 of the License.
00007 //
00008 // This library is distributed in the hope that it will be useful,
00009 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00010 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00011 // Lesser General Public License for more details.
00012 //
00013 // You should have received a copy of the GNU Lesser General Public
00014 // License along with this library; if not, write to the Free Software
00015 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
00016 //
00017 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
00018 //
00019 
00020 #include "MEDPARTITIONER_ParaDomainSelector.hxx"
00021 #include "MEDPARTITIONER_UserGraph.hxx"
00022 #include "MEDPARTITIONER_Utils.hxx"
00023 
00024 #include "MEDCouplingUMesh.hxx"
00025 
00026 #include <iostream>
00027 #include <numeric>
00028 
00029 #ifdef HAVE_MPI2
00030 #include <mpi.h>
00031 #endif
00032 
00036 MEDPARTITIONER::ParaDomainSelector::ParaDomainSelector(bool mesure_memory)
00037   :_rank(0),_world_size(1), _nb_result_domains(-1), _init_time(0.0),
00038    _mesure_memory(mesure_memory), _init_memory(0), _max_memory(0)
00039 {
00040 #ifdef HAVE_MPI2
00041   if (MyGlobals::_Rank==-1)
00042     {
00043       MPI_Init(0,0);  //do once only
00044       MPI_Comm_size(MPI_COMM_WORLD,&_world_size) ;
00045       MPI_Comm_rank(MPI_COMM_WORLD,&_rank) ;
00046     }
00047   else
00048     {
00049       _world_size=MyGlobals::_World_Size;
00050       _rank=MyGlobals::_Rank;
00051     }
00052   _init_time = MPI_Wtime();
00053 #else
00054   //sequential : no MPI
00055   _world_size=1;
00056   _rank=0;
00057   if (MyGlobals::_Verbose>10)
00058     std::cout << "WARNING : ParaDomainSelector contructor without parallel_mode World_Size=1 by default" << std::endl;
00059 #endif
00060   MyGlobals::_World_Size=_world_size;
00061   MyGlobals::_Rank=_rank;
00062   
00063   if (MyGlobals::_Verbose>200) std::cout << "proc " << MyGlobals::_Rank << " of " << MyGlobals::_World_Size << std::endl;
00064   evaluateMemory();
00065 }
00066 
00067 MEDPARTITIONER::ParaDomainSelector::~ParaDomainSelector()
00068 {
00069 }
00070 
00074 bool MEDPARTITIONER::ParaDomainSelector::isOnDifferentHosts() const
00075 {
00076   evaluateMemory();
00077   if ( _world_size < 2 ) return false;
00078 
00079 #ifdef HAVE_MPI2
00080   char name_here[ MPI_MAX_PROCESSOR_NAME+1 ], name_there[ MPI_MAX_PROCESSOR_NAME+1 ];
00081   int size;
00082   MPI_Get_processor_name( name_here, &size);
00083 
00084   int next_proc = (rank() + 1) % nbProcs();
00085   int prev_proc = (rank() - 1 + nbProcs()) % nbProcs();
00086   int tag  = 1111111;
00087 
00088   MPI_Status status;
00089   MPI_Sendrecv((void*)&name_here[0],  MPI_MAX_PROCESSOR_NAME, MPI_CHAR, next_proc, tag,
00090                (void*)&name_there[0], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, prev_proc, tag,
00091                MPI_COMM_WORLD, &status);
00092                
00093   //bug: (isOnDifferentHosts here and there) is not (isOnDifferentHosts somewhere)
00094   //return string(name_here) != string(name_there);
00095   
00096   int sum_same = -1;
00097   int same = 1;
00098   if (std::string(name_here) != std::string(name_there))
00099     same=0;
00100   MPI_Allreduce( &same, &sum_same, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
00101   return (sum_same != nbProcs());
00102 #endif
00103 }
00104 
00110 bool MEDPARTITIONER::ParaDomainSelector::isMyDomain(int domainIndex) const
00111 {
00112   evaluateMemory();
00113   return (_rank == getProcessorID( domainIndex ));
00114 }
00115 
00121 int MEDPARTITIONER::ParaDomainSelector::getProcessorID(int domainIndex) const
00122 {
00123   evaluateMemory();
00124   return ( domainIndex % _world_size );
00125 }
00126 
00134 void MEDPARTITIONER::ParaDomainSelector::gatherNbOf(const std::vector<ParaMEDMEM::MEDCouplingUMesh*>& domain_meshes)
00135 {
00136   evaluateMemory();
00137   // get nb of elems of each domain mesh
00138   int nb_domains=domain_meshes.size();
00139   std::vector<int> nb_elems(nb_domains*2, 0); //NumberOfCells & NumberOfNodes
00140   for (int i=0; i<nb_domains; ++i)
00141     if ( domain_meshes[i] )
00142       {
00143         nb_elems[i*2] = domain_meshes[i]->getNumberOfCells();
00144         nb_elems[i*2+1] = domain_meshes[i]->getNumberOfNodes();
00145       }
00146   // receive nb of elems from other procs
00147   std::vector<int> all_nb_elems;
00148   if (MyGlobals::_World_Size==1)
00149     {
00150       all_nb_elems=nb_elems;
00151     }
00152   else
00153     {
00154 #ifdef HAVE_MPI2
00155       all_nb_elems.resize( nb_domains*2 );
00156       MPI_Allreduce((void*)&nb_elems[0], (void*)&all_nb_elems[0], nb_domains*2, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
00157 #else
00158       throw INTERP_KERNEL::Exception("not(HAVE_MPI2) incompatible with MPI_World_Size>1");
00159 #endif
00160    }
00161   int total_nb_cells=0, total_nb_nodes=0;
00162   for (int i=0; i<nb_domains; ++i)
00163     {
00164       total_nb_cells+=all_nb_elems[i*2];
00165       total_nb_nodes+=all_nb_elems[i*2+1];
00166     }
00167   
00168   if (MyGlobals::_Is0verbose>10)
00169     std::cout << "totalNbCells " << total_nb_cells << " totalNbNodes " << total_nb_nodes << std::endl;
00170   
00171   std::vector<int>& cell_shift_by_domain=_cell_shift_by_domain;
00172   std::vector<int>& node_shift_by_domain=_node_shift_by_domain;
00173   std::vector<int>& face_shift_by_domain=_face_shift_by_domain;
00174  
00175   std::vector< int > ordered_nbs_cell, ordered_nbs_node, domain_order(nb_domains);
00176   ordered_nbs_cell.push_back(0);
00177   ordered_nbs_node.push_back(0);
00178   for (int iproc=0; iproc<nbProcs(); ++iproc)
00179     for (int idomain=0; idomain<nb_domains; ++idomain)
00180       if (getProcessorID( idomain )==iproc)
00181         {
00182           domain_order[idomain] = ordered_nbs_cell.size() - 1;
00183           ordered_nbs_cell.push_back( ordered_nbs_cell.back() + all_nb_elems[idomain*2] );
00184           ordered_nbs_node.push_back( ordered_nbs_node.back() + all_nb_elems[idomain*2+1] );
00185         }
00186   cell_shift_by_domain.resize( nb_domains+1, 0 );
00187   node_shift_by_domain.resize( nb_domains+1, 0 );
00188   face_shift_by_domain.resize( nb_domains+1, 0 );
00189   for (int idomain=0; idomain<nb_domains; ++idomain)
00190     {
00191       cell_shift_by_domain[ idomain ] = ordered_nbs_cell[ domain_order[ idomain ]];
00192       node_shift_by_domain[ idomain ] = ordered_nbs_node[ domain_order[ idomain ]];
00193     }
00194   cell_shift_by_domain.back() = ordered_nbs_cell.back(); // to know total nb of elements
00195   node_shift_by_domain.back() = ordered_nbs_node.back(); // to know total nb of elements
00196   
00197   if (MyGlobals::_Is0verbose>300)
00198     {
00199       std::cout << "proc " << MyGlobals::_Rank << " : cellShiftByDomain ";
00200       for (int i=0; i<=nb_domains; ++i)
00201         std::cout << cell_shift_by_domain[i] << "|";
00202       std::cout << std::endl;
00203       std::cout << "proc " << MyGlobals::_Rank << " : nodeShiftBy_domain ";
00204       for (int i=0; i<=nb_domains; ++i)
00205         std::cout << node_shift_by_domain[i] << "|";
00206       std::cout << std::endl;
00207     }
00208   // fill _nb_vert_of_procs (is Vtxdist)
00209   _nb_vert_of_procs.resize(_world_size+1);
00210   _nb_vert_of_procs[0] = 0; // base = 0
00211   for (int i=0; i<nb_domains; ++i)
00212     {
00213       int rankk = getProcessorID(i);
00214       _nb_vert_of_procs[rankk+1] += all_nb_elems[i*2];
00215     }
00216   for (std::size_t i=1; i<_nb_vert_of_procs.size(); ++i)
00217     _nb_vert_of_procs[i] += _nb_vert_of_procs[i-1]; // to CSR format : cumulated
00218   
00219   if (MyGlobals::_Is0verbose>200)
00220     {
00221       std::cout << "proc " << MyGlobals::_Rank << " : gatherNbOf : vtxdist is ";
00222       for (int i = 0; i <= _world_size; ++i)
00223         std::cout << _nb_vert_of_procs[i] << " ";
00224       std::cout << std::endl;
00225     }
00226   
00227   evaluateMemory();
00228   return;
00229 }
00230 
00239 int *MEDPARTITIONER::ParaDomainSelector::getProcVtxdist() const
00240 {
00241   evaluateMemory();
00242   if (_nb_vert_of_procs.empty())
00243     throw INTERP_KERNEL::Exception("_nb_vert_of_procs not set");
00244   return const_cast<int*>(& _nb_vert_of_procs[0]);
00245 }
00246 
00253 int MEDPARTITIONER::ParaDomainSelector::getDomainCellShift(int domainIndex) const
00254 {
00255   evaluateMemory();
00256   if (_cell_shift_by_domain.empty())
00257     throw INTERP_KERNEL::Exception("_cell_shift_by_domain not set");
00258   return _cell_shift_by_domain[domainIndex];
00259 }
00260 
00261 int MEDPARTITIONER::ParaDomainSelector::getDomainNodeShift(int domainIndex) const
00262 {
00263   evaluateMemory();
00264   if (_node_shift_by_domain.empty())
00265     throw INTERP_KERNEL::Exception("_node_shift_by_domain not set");
00266   return _node_shift_by_domain[domainIndex];
00267 }
00268 
00275 int MEDPARTITIONER::ParaDomainSelector::getProcNodeShift() const
00276 {
00277   evaluateMemory();
00278   if (_nb_vert_of_procs.empty())
00279     throw INTERP_KERNEL::Exception("_nb_vert_of_procs not set");
00280   return _nb_vert_of_procs[_rank];
00281 }
00282 
00286 std::auto_ptr<MEDPARTITIONER::Graph> MEDPARTITIONER::ParaDomainSelector::gatherGraph(const Graph* graph) const
00287 {
00288   Graph* glob_graph = 0;
00289 
00290   evaluateMemory();
00291 #ifdef HAVE_MPI2
00292 
00293   // ---------------
00294   // Gather indices
00295   // ---------------
00296 
00297   std::vector<int> index_size_of_proc( nbProcs() ); // index sizes - 1
00298   for ( std::size_t i = 1; i < _nb_vert_of_procs.size(); ++i )
00299     index_size_of_proc[i-1] = _nb_vert_of_procs[ i ] - _nb_vert_of_procs[ i-1 ];
00300 
00301   int index_size = 1 + _cell_shift_by_domain.back();
00302   int *graph_index = new int[ index_size ];
00303   const int *index = graph->getGraph()->getIndex();
00304   int *proc_index_displacement = const_cast<int*>( & _nb_vert_of_procs[0] );
00305 
00306   MPI_Allgatherv((void*) (index+1),         // send local index except first 0 (or 1)
00307                  index_size_of_proc[_rank], // index size on this proc
00308                  MPI_INT,
00309                  (void*) graph_index,       // receive indices
00310                  & index_size_of_proc[0],   // index size on each proc
00311                  proc_index_displacement,   // displacement of each proc index
00312                  MPI_INT,
00313                  MPI_COMM_WORLD);
00314   graph_index[0] = index[0]; // it is not overwritten thanks to proc_index_displacement[0]==1
00315 
00316   // get sizes of graph values on each proc by the got indices of graphs
00317   std::vector< int > value_size_of_proc( nbProcs() ), proc_value_displacement(1,0);
00318   for ( int i = 0; i < nbProcs(); ++i )
00319     {
00320       if ( index_size_of_proc[i] > 0 )
00321         value_size_of_proc[i] = graph_index[ proc_index_displacement[ i+1 ]-1 ] - graph_index[0];
00322       else
00323         value_size_of_proc[i] = 0;
00324       proc_value_displacement.push_back( proc_value_displacement.back() + value_size_of_proc[i] );
00325     }
00326   
00327   // update graph_index
00328   for ( int i = 1; i < nbProcs(); ++i )
00329     {
00330       int shift = graph_index[ proc_index_displacement[i]-1 ]-graph_index[0];
00331       for ( int j = proc_index_displacement[i]; j < proc_index_displacement[i+1]; ++j )
00332         graph_index[ j ] += shift;
00333     }
00334   
00335   // --------------
00336   // Gather values
00337   // --------------
00338 
00339   int value_size = graph_index[ index_size-1 ] - graph_index[ 0 ];
00340   int *graph_value = new int[ value_size ];
00341   const int *value = graph->getGraph()->getValue();
00342 
00343   MPI_Allgatherv((void*) value,                // send local value
00344                  value_size_of_proc[_rank],    // value size on this proc
00345                  MPI_INT,
00346                  (void*) graph_value,          // receive values
00347                  & value_size_of_proc[0],      // value size on each proc
00348                  & proc_value_displacement[0], // displacement of each proc value
00349                  MPI_INT,
00350                  MPI_COMM_WORLD);
00351 
00352   // -----------------
00353   // Gather partition
00354   // -----------------
00355 
00356   int * partition = new int[ _cell_shift_by_domain.back() ];
00357   const int* part = graph->getPart();
00358   
00359   MPI_Allgatherv((void*) part,              // send local partition
00360                  index_size_of_proc[_rank], // index size on this proc
00361                  MPI_INT,
00362                  (void*)(partition-1),      // -1 compensates proc_index_displacement[0]==1
00363                  & index_size_of_proc[0],   // index size on each proc
00364                  proc_index_displacement,   // displacement of each proc index
00365                  MPI_INT,
00366                  MPI_COMM_WORLD);
00367 
00368   // -----------
00369   // Make graph
00370   // -----------
00371 
00372   //   MEDPARTITIONER::SkyLineArray* array =
00373   //     new MEDPARTITIONER::SkyLineArray( index_size-1, value_size, graph_index, graph_value, true );
00374 
00375   //   glob_graph = new UserGraph( array, partition, index_size-1 );
00376 
00377   evaluateMemory();
00378 
00379   delete [] partition;
00380 
00381 #endif // HAVE_MPI2
00382 
00383   return std::auto_ptr<Graph>( glob_graph );
00384 }
00385 
00386 
00390 void MEDPARTITIONER::ParaDomainSelector::setNbCellPairs( int nb_cell_pairs, int dist_domain, int loc_domain )
00391 {
00392   // This method is needed for further computing global numbers of faces in joint.
00393   // Store if both domains are on this proc else on one of procs only
00394   if ( isMyDomain( dist_domain ) || dist_domain < loc_domain )
00395     {
00396       if ( _nb_cell_pairs_by_joint.empty() )
00397         _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
00398 
00399       int joint_id = jointId( loc_domain, dist_domain );
00400       _nb_cell_pairs_by_joint[ joint_id ] = nb_cell_pairs;
00401     }
00402   evaluateMemory();
00403 }
00404 
00405 //================================================================================
00409 //================================================================================
00410 
00411 int MEDPARTITIONER::ParaDomainSelector::getNbCellPairs( int dist_domain, int loc_domain ) const
00412 {
00413   evaluateMemory();
00414 
00415   int joint_id = jointId( loc_domain, dist_domain );
00416   return _nb_cell_pairs_by_joint[ joint_id ];
00417 }
00418 
00419 //================================================================================
00423 //================================================================================
00424 
00425 void MEDPARTITIONER::ParaDomainSelector::gatherNbCellPairs()
00426 {
00427   if ( _nb_cell_pairs_by_joint.empty() )
00428     _nb_cell_pairs_by_joint.resize( _nb_result_domains*(_nb_result_domains+1), 0);
00429   evaluateMemory();
00430 
00431   std::vector< int > send_buf = _nb_cell_pairs_by_joint;
00432 #ifdef HAVE_MPI2
00433   MPI_Allreduce((void*)&send_buf[0],
00434                 (void*)&_nb_cell_pairs_by_joint[0],
00435                 _nb_cell_pairs_by_joint.size(),
00436                 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
00437 #endif
00438   // check that the set nbs of cell pairs are correct,
00439   // namely that each joint is treated on one proc only
00440   for ( std::size_t j = 0; j < _nb_cell_pairs_by_joint.size(); ++j )
00441     if ( _nb_cell_pairs_by_joint[j] != send_buf[j] && send_buf[j]>0 )
00442       throw INTERP_KERNEL::Exception("invalid nb of cell pairs");
00443 }
00444 
00445 //================================================================================
00449 //================================================================================
00450 
00451 int MEDPARTITIONER::ParaDomainSelector::getFisrtGlobalIdOfSubentity( int loc_domain, int dist_domain ) const
00452 {
00453   // total_nb_faces includes faces existing before creation of joint faces
00454   // (got in gatherNbOf( MED_FACE )).
00455   evaluateMemory();
00456 
00457   int total_nb_faces = _face_shift_by_domain.empty() ? 0 : _face_shift_by_domain.back();
00458   int id = total_nb_faces + 1;
00459 
00460   if ( _nb_cell_pairs_by_joint.empty() )
00461     throw INTERP_KERNEL::Exception("gatherNbCellPairs() must be called before");
00462   int joint_id = jointId( loc_domain, dist_domain );
00463   for ( int j = 0; j < joint_id; ++j )
00464     id += _nb_cell_pairs_by_joint[ j ];
00465 
00466   return id;
00467 }
00468 
00469 //================================================================================
00473 //================================================================================
00474 
00475 int *MEDPARTITIONER::ParaDomainSelector::exchangeSubentityIds( int loc_domain, int dist_domain,
00476                                                const std::vector<int>& loc_ids_here ) const
00477 {
00478   int* loc_ids_dist = new int[ loc_ids_here.size()];
00479 #ifdef HAVE_MPI2
00480   int dest = getProcessorID( dist_domain );
00481   int tag  = 2002 + jointId( loc_domain, dist_domain );
00482   MPI_Status status;
00483   MPI_Sendrecv((void*)&loc_ids_here[0], loc_ids_here.size(), MPI_INT, dest, tag,
00484                (void*) loc_ids_dist,    loc_ids_here.size(), MPI_INT, dest, tag,
00485                MPI_COMM_WORLD, &status);  
00486 #endif
00487   evaluateMemory();
00488 
00489   return loc_ids_dist;
00490 }
00491 
00492 //================================================================================
00496 //================================================================================
00497 
00498 int MEDPARTITIONER::ParaDomainSelector::jointId( int local_domain, int distant_domain ) const
00499 {
00500   evaluateMemory();
00501   if (_nb_result_domains < 0)
00502     throw INTERP_KERNEL::Exception("setNbDomains() must be called before");
00503 
00504   if ( local_domain < distant_domain )
00505     std::swap( local_domain, distant_domain );
00506   return local_domain * _nb_result_domains + distant_domain;
00507 }
00508 
00509 
00510 //================================================================================
00514 //================================================================================
00515 
00516 double MEDPARTITIONER::ParaDomainSelector::getPassedTime() const
00517 {
00518 #ifdef HAVE_MPI2
00519   return MPI_Wtime() - _init_time;
00520 #else
00521   return 0.0;
00522 #endif
00523 }
00524 
00531 void MEDPARTITIONER::ParaDomainSelector::sendMesh(const ParaMEDMEM::MEDCouplingUMesh& mesh, int target) const
00532 {
00533 #ifndef HAVE_MPI2
00534   throw INTERP_KERNEL::Exception("ParaDomainSelector::sendMesh : incoherent call in non_MPI mode");
00535 #else
00536   if (MyGlobals::_Verbose>600)
00537     std::cout << "proc " << _rank << " : sendMesh '" << mesh.getName() << "' size " << mesh.getNumberOfCells() << " to " << target << std::endl;
00538   // First stage : sending sizes
00539   // ------------------------------
00540   std::vector<int> tinyInfoLocal;
00541   std::vector<std::string> tinyInfoLocalS;
00542   std::vector<double> tinyInfoLocalD;
00543   //Getting tiny info of local mesh to allow the distant proc to initialize and allocate
00544   //the transmitted mesh.
00545   mesh.getTinySerializationInformation(tinyInfoLocalD,tinyInfoLocal,tinyInfoLocalS);
00546   tinyInfoLocal.push_back(mesh.getNumberOfCells());
00547   int tinySize=tinyInfoLocal.size();
00548   MPI_Send(&tinySize, 1, MPI_INT, target, 1113, MPI_COMM_WORLD);
00549   MPI_Send(&tinyInfoLocal[0], tinyInfoLocal.size(), MPI_INT, target, 1112, MPI_COMM_WORLD);
00550 
00551   if (mesh.getNumberOfCells()>0) //no sends if empty
00552     {
00553       ParaMEDMEM::DataArrayInt *v1Local=0;
00554       ParaMEDMEM::DataArrayDouble *v2Local=0;
00555       //serialization of local mesh to send data to distant proc.
00556       mesh.serialize(v1Local,v2Local);
00557       int nbLocalElems=0;
00558       int* ptLocal=0;
00559       if(v1Local) //if empty getNbOfElems() is 1!
00560         {
00561           nbLocalElems=v1Local->getNbOfElems(); // if empty be 1!
00562           ptLocal=v1Local->getPointer();
00563         }
00564       MPI_Send(ptLocal, nbLocalElems, MPI_INT, target, 1111, MPI_COMM_WORLD);
00565       int nbLocalElems2=0;
00566       double *ptLocal2=0;
00567       if(v2Local) //if empty be 0!
00568         {
00569           nbLocalElems2=v2Local->getNbOfElems();
00570           ptLocal2=v2Local->getPointer();
00571         }
00572       MPI_Send(ptLocal2, nbLocalElems2, MPI_DOUBLE, target, 1110, MPI_COMM_WORLD);
00573       if(v1Local) v1Local->decrRef();
00574       if(v2Local) v2Local->decrRef();
00575     }
00576 #endif
00577 }
00578 
00584 void MEDPARTITIONER::ParaDomainSelector::recvMesh(ParaMEDMEM::MEDCouplingUMesh*& mesh, int source)const
00585 {
00586 #ifndef HAVE_MPI2
00587   throw INTERP_KERNEL::Exception("ParaDomainSelector::recvMesh : incoherent call in non_MPI mode");
00588 #else
00589   // First stage : exchanging sizes
00590   // ------------------------------
00591   std::vector<int> tinyInfoDistant;
00592   std::vector<std::string> tinyInfoLocalS;
00593   std::vector<double> tinyInfoDistantD(1);
00594   //Getting tiny info of local mesh to allow the distant proc to initialize and allocate
00595   //the transmitted mesh.
00596   MPI_Status status; 
00597   int tinyVecSize;
00598   MPI_Recv(&tinyVecSize, 1, MPI_INT, source, 1113, MPI_COMM_WORLD, &status);
00599   tinyInfoDistant.resize(tinyVecSize);
00600   std::fill(tinyInfoDistant.begin(),tinyInfoDistant.end(),0);
00601 
00602   MPI_Recv(&tinyInfoDistant[0], tinyVecSize, MPI_INT,source,1112,MPI_COMM_WORLD, &status);
00603   //there was tinyInfoLocal.push_back(mesh.getNumberOfCells());
00604   int NumberOfCells=tinyInfoDistant[tinyVecSize-1];
00605   if (NumberOfCells>0)
00606     {
00607       ParaMEDMEM::DataArrayInt *v1Distant=ParaMEDMEM::DataArrayInt::New();
00608       ParaMEDMEM::DataArrayDouble *v2Distant=ParaMEDMEM::DataArrayDouble::New();
00609       //Building the right instance of copy of distant mesh.
00610       ParaMEDMEM::MEDCouplingPointSet *distant_mesh_tmp=
00611         ParaMEDMEM::MEDCouplingPointSet::BuildInstanceFromMeshType(
00612                                                                    (ParaMEDMEM::MEDCouplingMeshType) tinyInfoDistant[0]);
00613       std::vector<std::string> unusedTinyDistantSts;
00614       mesh=dynamic_cast<ParaMEDMEM::MEDCouplingUMesh*> (distant_mesh_tmp);
00615  
00616       mesh->resizeForUnserialization(tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
00617       int nbDistElem=0;
00618       int *ptDist=0;
00619       if(v1Distant)
00620         {
00621           nbDistElem=v1Distant->getNbOfElems();
00622           ptDist=v1Distant->getPointer();
00623         }
00624       MPI_Recv(ptDist, nbDistElem, MPI_INT, source,1111, MPI_COMM_WORLD, &status);
00625       double *ptDist2=0;
00626       nbDistElem=0;
00627       if(v2Distant)
00628         {
00629           nbDistElem=v2Distant->getNbOfElems();
00630           ptDist2=v2Distant->getPointer();
00631         }
00632       MPI_Recv(ptDist2, nbDistElem, MPI_DOUBLE,source, 1110, MPI_COMM_WORLD, &status);
00633       //finish unserialization
00634       mesh->unserialization(tinyInfoDistantD,tinyInfoDistant,v1Distant,v2Distant,unusedTinyDistantSts);
00635       if(v1Distant) v1Distant->decrRef();
00636       if(v2Distant) v2Distant->decrRef();
00637     }
00638   else
00639     {
00640       mesh=CreateEmptyMEDCouplingUMesh();
00641     }
00642   if (MyGlobals::_Verbose>600)
00643     std::cout << "proc " << _rank << " : recvMesh '" << mesh->getName() << "' size " << mesh->getNumberOfCells() << " from " << source << std::endl;
00644 #endif
00645 }
00646 
00647 #ifndef WIN32
00648 #include <sys/sysinfo.h>
00649 #endif
00650 
00654 int MEDPARTITIONER::ParaDomainSelector::evaluateMemory() const
00655 {
00656   if ( _mesure_memory )
00657     {
00658       int used_memory = 0;
00659 #ifndef WIN32
00660       struct sysinfo si;
00661       int err = sysinfo( &si );
00662       if ( !err )
00663         used_memory = (( si.totalram - si.freeram + si.totalswap - si.freeswap ) * si.mem_unit ) / 1024;
00664 #endif
00665       if ( used_memory > _max_memory )
00666         _max_memory = used_memory;
00667 
00668       if ( !_init_memory )
00669         _init_memory = used_memory;
00670     }
00671   return _max_memory - _init_memory;
00672 }