Back to index

salome-med  6.5.0
MPIAccessDEC.cxx
Go to the documentation of this file.
00001 // Copyright (C) 2007-2012  CEA/DEN, EDF R&D
00002 //
00003 // This library is free software; you can redistribute it and/or
00004 // modify it under the terms of the GNU Lesser General Public
00005 // License as published by the Free Software Foundation; either
00006 // version 2.1 of the License.
00007 //
00008 // This library is distributed in the hope that it will be useful,
00009 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00010 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00011 // Lesser General Public License for more details.
00012 //
00013 // You should have received a copy of the GNU Lesser General Public
00014 // License along with this library; if not, write to the Free Software
00015 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
00016 //
00017 // See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
00018 //
00019 
00020 #include "MPIAccessDEC.hxx"
00021 
00022 #include <cstring>
00023 
00024 using namespace std;
00025 
00026 namespace ParaMEDMEM
00027 {    
00028 
00042   MPIAccessDEC::MPIAccessDEC( const ProcessorGroup& source_group,
00043                               const ProcessorGroup& target_group,
00044                               bool Asynchronous )
00045   {
00046 
00047     ProcessorGroup * union_group = source_group.fuse(target_group) ;  
00048     int i ;
00049     std::set<int> procs;
00050     for ( i = 0 ; i < union_group->size() ; i++ )
00051       {
00052         procs.insert(i) ;
00053       }
00054     MPIProcessorGroup *mpilg = (MPIProcessorGroup *)&source_group;
00055     _MPI_union_group = new ParaMEDMEM::MPIProcessorGroup( union_group->getCommInterface(),procs,mpilg->getWorldComm());
00056     delete union_group ;
00057     _my_rank = _MPI_union_group->myRank() ;
00058     _group_size = _MPI_union_group->size() ;
00059     _MPI_access = new MPIAccess( _MPI_union_group ) ;
00060     _asynchronous = Asynchronous ;
00061     _time_messages = new vector< vector< TimeMessage > > ;
00062     _time_messages->resize( _group_size ) ;
00063     _out_of_time = new vector< bool > ;
00064     _out_of_time->resize( _group_size ) ;
00065     _data_messages_recv_count = new vector< int > ;
00066     _data_messages_recv_count->resize( _group_size ) ;
00067     for ( i = 0 ; i < _group_size ; i++ )
00068       {
00069         (*_out_of_time)[i] = false ;
00070         (*_data_messages_recv_count)[i] = 0 ;
00071       }
00072     _data_messages_type = new vector< MPI_Datatype > ;
00073     _data_messages_type->resize( _group_size ) ;
00074     _data_messages = new vector< vector< void * > > ;
00075     _data_messages->resize( _group_size ) ;
00076     _time_interpolator = NULL ;
00077     _map_of_send_buffers = new map< int , SendBuffStruct * > ;
00078   }
00079 
00080   MPIAccessDEC::~MPIAccessDEC()
00081   {
00082     checkFinalSent() ;
00083     checkFinalRecv() ;
00084     delete _MPI_union_group ;
00085     delete _MPI_access ;
00086     if ( _time_interpolator )
00087       delete _time_interpolator ;
00088     if ( _time_messages )
00089       delete _time_messages ;
00090     if ( _out_of_time )
00091       delete _out_of_time ;
00092     if ( _data_messages_recv_count )
00093       delete _data_messages_recv_count ;
00094     if ( _data_messages_type )
00095       delete _data_messages_type ;
00096     if ( _data_messages )
00097       delete _data_messages ;
00098     if ( _map_of_send_buffers )
00099       delete _map_of_send_buffers ;
00100   } 
00101 
00102   void MPIAccessDEC::setTimeInterpolator( TimeInterpolationMethod aTimeInterp ,
00103                                           double InterpPrecision, int nStepBefore,
00104                                           int nStepAfter )
00105   {
00106     if ( _time_interpolator )
00107       delete _time_interpolator ;
00108     switch ( aTimeInterp )
00109       {
00110       case WithoutTimeInterp :
00111         _time_interpolator = NULL ;
00112         _n_step_before = 0 ;
00113         _n_step_after = 0 ;
00114         break ;
00115       case LinearTimeInterp :
00116         _time_interpolator = new LinearTimeInterpolator( InterpPrecision , nStepBefore ,
00117                                                          nStepAfter ) ;
00118         _n_step_before = nStepBefore ;
00119         _n_step_after = nStepAfter ;
00120         int i ;
00121         for ( i = 0 ; i < _group_size ; i++ )
00122           {
00123             (*_time_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
00124             (*_data_messages)[ i ].resize( _n_step_before + _n_step_after ) ;
00125             int j ;
00126             for ( j = 0 ; j < _n_step_before + _n_step_after ; j++ )
00127               {
00128                 (*_time_messages)[ i ][ j ].time = -1 ;
00129                 (*_time_messages)[ i ][ j ].deltatime = -1 ;
00130                 (*_data_messages)[ i ][ j ] = NULL ;
00131               }
00132           }
00133         break ;
00134       }
00135   }
00136 
00144   int MPIAccessDEC::send( void* sendbuf, int sendcount , int offset ,
00145                           MPI_Datatype sendtype , int target , int &SendRequestId )
00146   {
00147     int sts ;
00148     if ( _asynchronous )
00149       {
00150         if ( sendtype == MPI_INT )
00151           {
00152             sts = _MPI_access->ISend( &((int *) sendbuf)[offset] , sendcount , sendtype ,
00153                                       target , SendRequestId ) ;
00154           }
00155         else
00156           {
00157             sts = _MPI_access->ISend( &((double *) sendbuf)[offset] , sendcount , sendtype ,
00158                                       target , SendRequestId ) ;
00159           }
00160       }
00161     else
00162       {
00163         if ( sendtype == MPI_INT )
00164           {
00165             sts = _MPI_access->send( &((int *) sendbuf)[offset] , sendcount , sendtype ,
00166                                      target , SendRequestId ) ;
00167           }
00168         else
00169           {
00170             sts = _MPI_access->send( &((double *) sendbuf)[offset] , sendcount , sendtype ,
00171                                      target , SendRequestId ) ;
00172           }
00173       }
00174     return sts ;
00175   }
00176 
00184   int MPIAccessDEC::recv( void* recvbuf, int recvcount , int offset ,
00185                           MPI_Datatype recvtype , int target , int &RecvRequestId )
00186   {
00187     int sts ;
00188     if ( _asynchronous )
00189       {
00190         if ( recvtype == MPI_INT )
00191           {
00192             sts = _MPI_access->IRecv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
00193                                       target , RecvRequestId ) ;
00194           }
00195         else
00196           {
00197             sts = _MPI_access->IRecv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
00198                                       target , RecvRequestId ) ;
00199           }
00200       }
00201     else
00202       {
00203         if ( recvtype == MPI_INT )
00204           {
00205             sts = _MPI_access->recv( &((int *) recvbuf)[offset] , recvcount , recvtype ,
00206                                      target , RecvRequestId ) ;
00207           }
00208         else
00209           {
00210             sts = _MPI_access->recv( &((double *) recvbuf)[offset] , recvcount , recvtype ,
00211                                      target , RecvRequestId ) ;
00212           }
00213       }
00214     return sts ;
00215   }
00216 
00226   int MPIAccessDEC::sendRecv( void* sendbuf, int sendcount , int sendoffset ,
00227                               MPI_Datatype sendtype ,
00228                               void* recvbuf, int recvcount , int recvoffset ,
00229                               MPI_Datatype recvtype , int target ,
00230                               int &SendRequestId , int &RecvRequestId )
00231   {
00232     int sts ;
00233     if ( _asynchronous )
00234       {
00235         if ( sendtype == MPI_INT )
00236           {
00237             if ( recvtype == MPI_INT )
00238               {
00239                 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
00240                                               sendtype , target , SendRequestId ,
00241                                               &((int *) recvbuf)[recvoffset] , recvcount ,
00242                                               recvtype , target , RecvRequestId ) ;
00243               }
00244             else
00245               {
00246                 sts = _MPI_access->ISendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
00247                                               sendtype , target , SendRequestId ,
00248                                               &((double *) recvbuf)[recvoffset] ,
00249                                               recvcount , recvtype , target , RecvRequestId ) ;
00250               }
00251           }
00252         else
00253           {
00254             if ( recvtype == MPI_INT )
00255               {
00256                 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
00257                                               sendtype , target , SendRequestId ,
00258                                               &((int *) recvbuf)[recvoffset] ,
00259                                               recvcount , recvtype , target , RecvRequestId ) ;
00260               }
00261             else
00262               {
00263                 sts = _MPI_access->ISendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
00264                                               sendtype , target , SendRequestId ,
00265                                               &((double *) recvbuf)[recvoffset] ,
00266                                               recvcount , recvtype , target , RecvRequestId ) ;
00267               }
00268           }
00269       }
00270     else
00271       {
00272         if ( sendtype == MPI_INT )
00273           {
00274             if ( recvtype == MPI_INT )
00275               {
00276                 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
00277                                              sendtype , target , SendRequestId ,
00278                                              &((int *) recvbuf)[recvoffset] , recvcount ,
00279                                              recvtype , target , RecvRequestId ) ;
00280               }
00281             else
00282               {
00283                 sts = _MPI_access->sendRecv( &((int *) sendbuf)[sendoffset] , sendcount ,
00284                                              sendtype , target , SendRequestId ,
00285                                              &((double *) recvbuf)[recvoffset] ,
00286                                              recvcount , recvtype , target , RecvRequestId ) ;
00287               }
00288           }
00289         else
00290           {
00291             if ( recvtype == MPI_INT )
00292               {
00293                 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
00294                                              sendtype , target , SendRequestId ,
00295                                              &((int *) recvbuf)[recvoffset] ,
00296                                              recvcount , recvtype , target , RecvRequestId ) ;
00297               }
00298             else
00299               {
00300                 sts = _MPI_access->sendRecv( &((double *) sendbuf)[sendoffset] , sendcount ,
00301                                              sendtype , target , SendRequestId ,
00302                                              &((double *) recvbuf)[recvoffset] ,
00303                                              recvcount , recvtype , target , RecvRequestId ) ;
00304               }
00305           }
00306       }
00307     return sts ;
00308   }
00309 
00315   int MPIAccessDEC::allToAll( void* sendbuf, int sendcount, MPI_Datatype sendtype ,
00316                               void* recvbuf, int recvcount, MPI_Datatype recvtype )
00317   {
00318     if ( _time_interpolator )
00319       {
00320         return allToAllTime( sendbuf, sendcount, sendtype , recvbuf, recvcount, recvtype ) ;
00321       }
00322     int sts ;
00323     int target ;
00324     int sendoffset = 0 ;
00325     int recvoffset = 0 ;
00326     int SendRequestId ;
00327     int RecvRequestId ;
00328 
00329     //Free of SendBuffers 
00330     if ( _asynchronous )
00331       checkSent() ;
00332 
00333     //DoSend + DoRecv : SendRecv
00334     SendBuffStruct * aSendDataStruct = NULL ;
00335     if ( _asynchronous && sendbuf )
00336       {
00337         aSendDataStruct = new SendBuffStruct ;
00338         aSendDataStruct->SendBuffer = sendbuf ;
00339         aSendDataStruct->Counter = 0 ;
00340         aSendDataStruct->DataType = sendtype ;
00341       }
00342     for ( target = 0 ; target < _group_size ; target++ )
00343       {
00344         sts = sendRecv( sendbuf , sendcount , sendoffset , sendtype ,
00345                         recvbuf , recvcount , recvoffset , recvtype ,
00346                         target , SendRequestId , RecvRequestId ) ;
00347         if ( _asynchronous && sendbuf && sendcount )
00348           {
00349             aSendDataStruct->Counter += 1 ;
00350             (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
00351           }
00352         sendoffset += sendcount ;
00353         recvoffset += recvcount ;
00354       }
00355     if ( !_asynchronous && sendbuf )
00356       {
00357         if ( sendtype == MPI_INT )
00358           {
00359             delete [] (int *) sendbuf ;
00360           }
00361         else
00362           {
00363             delete [] (double *) sendbuf ;
00364           }
00365       }
00366     return sts ;
00367   }
00368 
00374   int MPIAccessDEC::allToAllv( void* sendbuf, int* sendcounts, int* sdispls,
00375                                MPI_Datatype sendtype ,
00376                                void* recvbuf, int* recvcounts, int* rdispls,
00377                                MPI_Datatype recvtype )
00378   {
00379     if ( _time_interpolator )
00380       {
00381         return allToAllvTime( sendbuf, sendcounts, sdispls, sendtype ,
00382                               recvbuf, recvcounts, rdispls, recvtype ) ;
00383       }
00384     int sts ;
00385     int target ;
00386     int SendRequestId ;
00387     int RecvRequestId ;
00388 
00389     //Free of SendBuffers 
00390     if ( _asynchronous )
00391       {
00392         checkSent() ;
00393       }
00394 
00395     //DoSend + DoRecv : SendRecv
00396     SendBuffStruct * aSendDataStruct = NULL ;
00397     if ( _asynchronous && sendbuf )
00398       {
00399         aSendDataStruct = new SendBuffStruct ;
00400         aSendDataStruct->SendBuffer = sendbuf ;
00401         aSendDataStruct->Counter = 0 ;
00402         aSendDataStruct->DataType = sendtype ;
00403       }
00404     for ( target = 0 ; target < _group_size ; target++ )
00405       {
00406         if ( sendcounts[target] || recvcounts[target] )
00407           {
00408             sts = sendRecv( sendbuf , sendcounts[target] , sdispls[target] , sendtype ,
00409                             recvbuf , recvcounts[target] , rdispls[target] , recvtype ,
00410                             target , SendRequestId , RecvRequestId ) ;
00411             if ( _asynchronous && sendbuf && sendcounts[target])
00412               {
00413                 aSendDataStruct->Counter += 1 ;
00414                 (*_map_of_send_buffers)[ SendRequestId ] = aSendDataStruct ;
00415               }
00416           }
00417       }
00418     if ( !_asynchronous && sendbuf )
00419       {
00420         if ( sendtype == MPI_INT )
00421           {
00422             delete [] (int *) sendbuf ;
00423           }
00424         else
00425           {
00426             delete [] (double *) sendbuf ;
00427           }
00428       }
00429     return sts ;
00430   }
00431 
00432   /*
00433     MPIAccessDEC and the management of SendBuffers :
00434     =================================================
00435 
00436     . In the collective communications collectives we send only parts of
00437     the same buffer to each "target". So in asynchronous mode it is
00438     necessary that all parts are free before to delete/free the
00439     buffer.
00440 
00441     . We assume that buffers are allocated with a new double[]. so a
00442     delete [] is done.
00443 
00444     . The structure SendBuffStruct permit to keep the adress of the buffer
00445     and to manage a reference counter of that buffer. It contains
00446     also MPI_Datatype for the delete [] (double *) ... when the counter
00447     is null.
00448 
00449     . The map _MapOfSendBuffers etablish the correspondance between each
00450     RequestId given by a MPI_Access->ISend(...) and a SendBuffStruct
00451     for each "target" of a part of the buffer.
00452 
00453     . All that concerns only asynchronous Send. In synchronous mode,
00454     we delete senbuf just after the Send.
00455   */
00456 
00457   /*
00458     MPIAccessDEC and the management of RecvBuffers :
00459     =================================================
00460 
00461     If there is no interpolation, no special action is done.
00462 
00463     With interpolation for each target :
00464     ------------------------------------
00465     . We have _time_messages[target] which is a vector of TimesMessages.
00466     We have 2 TimesMessages in our case with a linear interpolation.
00467     They contain the previous time(t0)/deltatime and the last
00468     time(t1)/deltatime.
00469 
00470     . We have _data_messages[target] which is a vector of DatasMessages.
00471     We have 2 DatasMessages in our case with a linear interpolation.
00472     They contain the previous datas at time(t0)/deltatime and at last
00473     time(t1)/deltatime.
00474 
00475     . At time _t(t*) of current processus we do the interpolation of
00476     the values of the 2 DatasMessages which are returned in the part of
00477     recvbuf corresponding to the target with t0 < t* <= t1.
00478 
00479     . Because of the difference of "deltatimes" between processes, we
00480     may have t0 < t1 < t* and there is an extrapolation.
00481 
00482     . The vectors _out_of_time, _DataMessagesRecvCount and _DataMessagesType
00483     contain for each target true if t* > last t1, recvcount and
00484     MPI_Datatype for the finalize of messages at the end.
00485   */
00486 
00496   int MPIAccessDEC::allToAllTime( void* sendbuf, int sendcount , MPI_Datatype sendtype ,
00497                                   void* recvbuf, int recvcount , MPI_Datatype recvtype )
00498   {
00499     int sts ;
00500     int target ;
00501     int sendoffset = 0 ;
00502     int SendTimeRequestId ;
00503     int SendDataRequestId ;
00504 
00505     if ( _time_interpolator == NULL )
00506       {
00507         return MPI_ERR_OTHER ;
00508       }
00509 
00510     //Free of SendBuffers 
00511     if ( _asynchronous )
00512       {
00513         checkSent() ;
00514       }
00515 
00516     //DoSend : Time + SendBuff
00517     SendBuffStruct * aSendTimeStruct = NULL ;
00518     SendBuffStruct * aSendDataStruct = NULL ;
00519     if ( sendbuf && sendcount )
00520       {
00521         TimeMessage * aSendTimeMessage = new TimeMessage ;
00522         if ( _asynchronous )
00523           {
00524             aSendTimeStruct = new SendBuffStruct ;
00525             aSendTimeStruct->SendBuffer = aSendTimeMessage ;
00526             aSendTimeStruct->Counter = 0 ;
00527             aSendTimeStruct->DataType = _MPI_access->timeType() ;
00528             aSendDataStruct = new SendBuffStruct ;
00529             aSendDataStruct->SendBuffer = sendbuf ;
00530             aSendDataStruct->Counter = 0 ;
00531             aSendDataStruct->DataType = sendtype ;
00532           }
00533         aSendTimeMessage->time = _t ;
00534         aSendTimeMessage->deltatime = _dt ;
00535         for ( target = 0 ; target < _group_size ; target++ )
00536           {
00537             sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
00538                         SendTimeRequestId ) ;
00539             sts = send( sendbuf , sendcount , sendoffset , sendtype , target , SendDataRequestId ) ;
00540             if ( _asynchronous )
00541               {
00542                 aSendTimeStruct->Counter += 1 ;
00543                 (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
00544                 aSendDataStruct->Counter += 1 ;
00545                 (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
00546               }
00547             sendoffset += sendcount ;
00548           }
00549         if ( !_asynchronous )
00550           {
00551             delete aSendTimeMessage ;
00552             if ( sendtype == MPI_INT )
00553               {
00554                 delete [] (int *) sendbuf ;
00555               }
00556             else
00557               {
00558                 delete [] (double *) sendbuf ;
00559               }
00560           }
00561       }
00562 
00563     //CheckTime + DoRecv + DoInterp
00564     if ( recvbuf && recvcount )
00565       {
00566         for ( target = 0 ; target < _group_size ; target++ )
00567           {
00568             int recvsize = recvcount*_MPI_access->extent( recvtype ) ;
00569             checkTime( recvcount , recvtype , target , false ) ;
00570             //===========================================================================
00571             //TODO : it is assumed actually that we have only 1 timestep before nad after
00572             //===========================================================================
00573             if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
00574               {
00575                 if ( (*_out_of_time)[target] )
00576                   {
00577                     cout << " =====================================================" << endl
00578                          << "Recv" << _my_rank << " <-- target " << target << " t0 "
00579                          << (*_time_messages)[target][0].time << " < t1 "
00580                          << (*_time_messages)[target][1].time << " < t* " << _t << endl
00581                          << " =====================================================" << endl ;
00582                   }
00583                 if ( recvtype == MPI_INT )
00584                   {
00585                     _time_interpolator->doInterp( (*_time_messages)[target][0].time,
00586                                                   (*_time_messages)[target][1].time, _t, recvcount ,
00587                                                   _n_step_before, _n_step_after,
00588                                                   (int **) &(*_data_messages)[target][0],
00589                                                   (int **) &(*_data_messages)[target][1],
00590                                                   &((int *)recvbuf)[target*recvcount] ) ;
00591                   }
00592                 else
00593                   {
00594                     _time_interpolator->doInterp( (*_time_messages)[target][0].time,
00595                                                   (*_time_messages)[target][1].time, _t, recvcount ,
00596                                                   _n_step_before, _n_step_after,
00597                                                   (double **) &(*_data_messages)[target][0],
00598                                                   (double **) &(*_data_messages)[target][1],
00599                                                   &((double *)recvbuf)[target*recvcount] ) ;
00600                   }
00601               }
00602             else
00603               {
00604                 char * buffdest = (char *) recvbuf ;
00605                 char * buffsrc = (char *) (*_data_messages)[target][1] ;
00606                 memcpy( &buffdest[target*recvsize] , buffsrc , recvsize ) ;
00607               }
00608           }
00609       }
00610 
00611     return sts ;
00612   }
00613 
00614   int MPIAccessDEC::allToAllvTime( void* sendbuf, int* sendcounts, int* sdispls,
00615                                    MPI_Datatype sendtype ,
00616                                    void* recvbuf, int* recvcounts, int* rdispls,
00617                                    MPI_Datatype recvtype )
00618   {
00619     int sts ;
00620     int target ;
00621     int SendTimeRequestId ;
00622     int SendDataRequestId ;
00623 
00624     if ( _time_interpolator == NULL )
00625       {
00626         return MPI_ERR_OTHER ;
00627       }
00628 
00629     //Free of SendBuffers 
00630     if ( _asynchronous )
00631       {
00632         checkSent() ;
00633       }
00634 
00635     /*
00636       . DoSend :
00637       + We create a TimeMessage (look at that structure in MPI_Access).
00638       + If we are in asynchronous mode, we create two structures SendBuffStruct
00639       aSendTimeStruct and aSendDataStruct that we fill.
00640       + We fill the structure aSendTimeMessage with time/deltatime of
00641       the current process. "deltatime" must be nul if it is the last step of
00642       Time.
00643       + After that for each "target", we Send the TimeMessage and the part
00644       of sendbuf corresponding to that target.
00645       + If we are in asynchronous mode, we increment the counter and we add
00646       aSendTimeStruct and aSendDataStruct to _MapOfSendBuffers with the
00647       identifiers SendTimeRequestId and SendDataRequestId returned by
00648       MPI_Access->Send(...).
00649       + And if we are in synchronous mode we delete the SendMessages.
00650     */
00651     //DoSend : Time + SendBuff
00652     SendBuffStruct * aSendTimeStruct = NULL ;
00653     SendBuffStruct * aSendDataStruct = NULL ;
00654     if ( sendbuf )
00655       {
00656         TimeMessage * aSendTimeMessage = new TimeMessage ;
00657         if ( _asynchronous )
00658           {
00659             aSendTimeStruct = new SendBuffStruct ;
00660             aSendTimeStruct->SendBuffer = aSendTimeMessage ;
00661             aSendTimeStruct->Counter = 0 ;
00662             aSendTimeStruct->DataType = _MPI_access->timeType() ;
00663             aSendDataStruct = new SendBuffStruct ;
00664             aSendDataStruct->SendBuffer = sendbuf ;
00665             aSendDataStruct->Counter = 0 ;
00666             aSendDataStruct->DataType = sendtype ;
00667           }
00668         aSendTimeMessage->time = _t ;
00669         aSendTimeMessage->deltatime = _dt ;
00670         for ( target = 0 ; target < _group_size ; target++ )
00671           {
00672             if ( sendcounts[target] )
00673               {
00674                 sts = send( aSendTimeMessage , 1 , 0 , _MPI_access->timeType() , target ,
00675                             SendTimeRequestId ) ;
00676                 sts = send( sendbuf , sendcounts[target] , sdispls[target] , sendtype , target ,
00677                             SendDataRequestId ) ;
00678                 if ( _asynchronous )
00679                   {
00680                     aSendTimeStruct->Counter += 1 ;
00681                     (*_map_of_send_buffers)[ SendTimeRequestId ] = aSendTimeStruct ;
00682                     aSendDataStruct->Counter += 1 ;
00683                     (*_map_of_send_buffers)[ SendDataRequestId ] = aSendDataStruct ;
00684                   }
00685               }
00686           }
00687         if ( !_asynchronous )
00688           {
00689             delete aSendTimeMessage ;
00690             if ( sendtype == MPI_INT )
00691               {
00692                 delete [] (int *) sendbuf ;
00693               }
00694             else
00695               {
00696                 delete [] (double *) sendbuf ;
00697               }
00698           }
00699       }
00700 
00701     /*
00702       . CheckTime + DoRecv + DoInterp
00703       + For each target we call CheckTime
00704       + If there is a TimeInterpolator and if the TimeMessage of the target
00705       is not the first, we call the interpolator which return its
00706       results in the part of the recv buffer corresponding to the "target".
00707       + If not, there is a copy of received datas for that first step of time
00708       in the part of the recv buffer corresponding to the "target".
00709     */
00710     //CheckTime + DoRecv + DoInterp
00711     if ( recvbuf )
00712       {
00713         for ( target = 0 ; target < _group_size ; target++ )
00714           {
00715             if ( recvcounts[target] )
00716               {
00717                 int recvsize = recvcounts[target]*_MPI_access->extent( recvtype ) ;
00718                 checkTime( recvcounts[target] , recvtype , target , false ) ;
00719                 //===========================================================================
00720                 //TODO : it is assumed actually that we have only 1 timestep before nad after
00721                 //===========================================================================
00722                 if ( _time_interpolator && (*_time_messages)[target][0].time != -1 )
00723                   {
00724                     if ( (*_out_of_time)[target] )
00725                       {
00726                         cout << " =====================================================" << endl
00727                              << "Recv" << _my_rank << " <-- target " << target << " t0 "
00728                              << (*_time_messages)[target][0].time << " < t1 "
00729                              << (*_time_messages)[target][1].time << " < t* " << _t << endl
00730                              << " =====================================================" << endl ;
00731                       }
00732                     if ( recvtype == MPI_INT )
00733                       {
00734                         _time_interpolator->doInterp( (*_time_messages)[target][0].time,
00735                                                       (*_time_messages)[target][1].time, _t,
00736                                                       recvcounts[target] , _n_step_before, _n_step_after,
00737                                                       (int **) &(*_data_messages)[target][0],
00738                                                       (int **) &(*_data_messages)[target][1],
00739                                                       &((int *)recvbuf)[rdispls[target]] ) ;
00740                       }
00741                     else
00742                       {
00743                         _time_interpolator->doInterp( (*_time_messages)[target][0].time,
00744                                                       (*_time_messages)[target][1].time, _t,
00745                                                       recvcounts[target] , _n_step_before, _n_step_after,
00746                                                       (double **) &(*_data_messages)[target][0],
00747                                                       (double **) &(*_data_messages)[target][1],
00748                                                       &((double *)recvbuf)[rdispls[target]] ) ;
00749                       }
00750                   }
00751                 else
00752                   {
00753                     char * buffdest = (char *) recvbuf ;
00754                     char * buffsrc = (char *) (*_data_messages)[target][1] ;
00755                     memcpy( &buffdest[rdispls[target]*_MPI_access->extent( recvtype )] , buffsrc ,
00756                             recvsize ) ;
00757                   }
00758               }
00759           }
00760       }
00761 
00762     return sts ;
00763   }
00764 
00765   /*
00766     . CheckTime(recvcount , recvtype , target , UntilEnd)
00767     + At the beginning, we read the first TimeMessage in
00768     &(*_TimeMessages)[target][1] and the first DataMessage
00769     in the allocated buffer (*_DataMessages)[target][1].
00770     + deltatime of TimesMessages must be nul if it is the last one.
00771     + While : _t(t*) is the current time of the processus.
00772     "while _t(t*) is greater than the time of the "target"
00773     (*_TimeMessages)[target][1].time and
00774     (*_TimeMessages)[target][1].deltatime is not nul",
00775     So at the end of the while we have :
00776     _t(t*) <= (*_TimeMessages)[target][1].time with
00777     _t(t*) > (*_TimeMessages)[target][0].time
00778     or we have the last TimeMessage of the "target".
00779     + If it is the finalization of the recv of TimeMessages and
00780     DataMessages (UntilEnd value is true), we execute the while
00781     until (*_TimeMessages)[target][1].deltatime is nul.
00782     + In the while :
00783     We copy the last TimeMessage in the previoud TimeMessage and
00784     we read a new TimeMessage
00785     We delete the previous DataMessage.
00786     We copy the last DataMessage pointer in the previous one.
00787     We allocate a new last DataMessage buffer
00788     (*_DataMessages)[target][1] and we read the corresponding
00789     datas in that buffe.
00790     + If the current time of the current process is greater than the
00791     last time (*_TimeMessages)[target][1].time du target, we give
00792     a true value to (*_OutOfTime)[target].
00793     (*_TimeMessages)[target][1].deltatime is nul.
00794   */
00795   int MPIAccessDEC::checkTime( int recvcount , MPI_Datatype recvtype , int target ,
00796                                bool UntilEnd )
00797   {
00798     int sts = MPI_SUCCESS ;
00799     int RecvTimeRequestId ;
00800     int RecvDataRequestId ;
00801     //Pour l'instant on cherche _time_messages[target][0] < _t <= _time_messages[target][1]
00802     //===========================================================================
00803     //TODO : it is assumed actually that we have only 1 timestep before and after
00804     //       instead of _n_step_before and _n_step_after ...
00805     //===========================================================================
00806     (*_data_messages_recv_count)[target] = recvcount ;
00807     (*_data_messages_type)[target] = recvtype ;
00808     if ( (*_time_messages)[target][1].time == -1 )
00809       {
00810         (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
00811         sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
00812                     target , RecvTimeRequestId ) ;
00813         (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
00814         if ( recvtype == MPI_INT )
00815           {
00816             (*_data_messages)[target][1] = new int[recvcount] ;
00817           }
00818         else
00819           {
00820             (*_data_messages)[target][1] = new double[recvcount] ;
00821           }
00822         sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
00823                     RecvDataRequestId ) ;
00824       }
00825     else
00826       {
00827         while ( ( _t > (*_time_messages)[target][1].time || UntilEnd ) &&
00828                 (*_time_messages)[target][1].deltatime != 0 )
00829           {
00830             (*_time_messages)[target][0] = (*_time_messages)[target][1] ;
00831             sts = recv( &(*_time_messages)[target][1] , 1 , _MPI_access->timeType() ,
00832                         target , RecvTimeRequestId ) ;
00833             if ( UntilEnd )
00834               {
00835                 cout << "CheckTime" << _my_rank << " TimeMessage target " << target
00836                      << " RecvTimeRequestId " << RecvTimeRequestId << " MPITag "
00837                      << _MPI_access->recvMPITag(target) << endl ;
00838               }
00839             if ( recvtype == MPI_INT )
00840               {
00841                 delete [] (int *) (*_data_messages)[target][0] ;
00842               }
00843             else
00844               {
00845                 delete [] (double *) (*_data_messages)[target][0] ;
00846               }
00847             (*_data_messages)[target][0] = (*_data_messages)[target][1] ;
00848             if ( recvtype == MPI_INT )
00849               {
00850                 (*_data_messages)[target][1] = new int[recvcount] ;
00851               }
00852             else
00853               {
00854                 (*_data_messages)[target][1] = new double[recvcount] ;
00855               }
00856             sts = recv( (*_data_messages)[target][1] , recvcount , recvtype , target ,
00857                         RecvDataRequestId ) ;
00858             if ( UntilEnd )
00859               {
00860                 cout << "CheckTime" << _my_rank << " DataMessage target " << target
00861                      << " RecvDataRequestId " << RecvDataRequestId << " MPITag "
00862                      << _MPI_access->recvMPITag(target) << endl ;
00863               }
00864           }
00865 
00866         if ( _t > (*_time_messages)[target][0].time &&
00867              _t <= (*_time_messages)[target][1].time )
00868           {
00869           }
00870         else
00871           {
00872             (*_out_of_time)[target] = true ;
00873           }
00874       }
00875     return sts ;
00876   }
00877 
00878   /*
00879     . CheckSent() :
00880     + call  SendRequestIds of MPI_Access in order to get all
00881     RequestIds of SendMessages of all "targets".
00882     + For each RequestId, CheckSent call "Test" of MPI_Access in order
00883     to know if the buffer is "free" (flag = true). If it is the
00884     FinalCheckSent (WithWait = true), we call Wait instead of Test.
00885     + If the buffer is "free", the counter of the structure SendBuffStruct
00886     (from _MapOfSendBuffers) is decremented.
00887     + If that counter is nul we delete the TimeMessage or the
00888     SendBuffer according to the DataType.
00889     + And we delete the structure SendBuffStruct before the suppression
00890     (erase) of that item of _MapOfSendBuffers
00891   */
00892   int MPIAccessDEC::checkSent(bool WithWait)
00893   {
00894     int sts = MPI_SUCCESS ;
00895     int flag = WithWait ;
00896     int size = _MPI_access->sendRequestIdsSize() ;
00897     int * ArrayOfSendRequests = new int[ size ] ;
00898     int nSendRequest = _MPI_access->sendRequestIds( size , ArrayOfSendRequests ) ;
00899     bool SendTrace = false ;
00900     int i ;
00901     for ( i = 0 ; i < nSendRequest ; i++ )
00902       {
00903         if ( WithWait )
00904           {
00905             cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
00906                  << " SendRequestId " << ArrayOfSendRequests[i] << " MPITarget "
00907                  << _MPI_access->MPITarget(ArrayOfSendRequests[i]) << " MPITag "
00908                  << _MPI_access->MPITag(ArrayOfSendRequests[i]) << " Wait :" << endl ;
00909             sts = _MPI_access->wait( ArrayOfSendRequests[i] ) ;
00910           }
00911         else
00912           {
00913             sts = _MPI_access->test( ArrayOfSendRequests[i] , flag ) ;
00914           }
00915         if ( flag )
00916           {
00917             _MPI_access->deleteRequest( ArrayOfSendRequests[i] ) ;
00918             if ( SendTrace )
00919               {
00920                 cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
00921                      << " SendRequestId " << ArrayOfSendRequests[i]
00922                      << " flag " << flag
00923                      << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
00924                      << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
00925                      << endl ;
00926               }
00927             (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter -= 1 ;
00928             if ( SendTrace )
00929               {
00930                 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == 
00931                      _MPI_access->timeType() )
00932                   {
00933                     cout << "CheckTimeSent" << _my_rank << " Request " ;
00934                   }
00935                 else
00936                   {
00937                     cout << "CheckDataSent" << _my_rank << " Request " ;
00938                   }
00939                 cout << ArrayOfSendRequests[i]
00940                      << " _map_of_send_buffers->SendBuffer "
00941                      << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
00942                      << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
00943                      << endl ;
00944               }
00945             if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter  == 0 )
00946               {
00947                 if ( SendTrace )
00948                   {
00949                     cout << "CheckSent" << _my_rank << " SendRequestId " << ArrayOfSendRequests[i]
00950                          << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
00951                          << " flag " << flag << " SendBuffer "
00952                          << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer
00953                          << " deleted. Erase in _map_of_send_buffers :" << endl ;
00954                   }
00955                 if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType ==
00956                      _MPI_access->timeType() )
00957                   {
00958                     delete (TimeMessage * ) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
00959                   }
00960                 else
00961                   {
00962                     if ( (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType == MPI_INT )
00963                       {
00964                         delete [] (int *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
00965                       }
00966                     else
00967                       {
00968                         delete [] (double *) (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->SendBuffer ;
00969                       }
00970                   }
00971                 delete (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ] ;
00972               }
00973             if ( SendTrace )
00974               {
00975                 cout << "CheckSent" << _my_rank << " Erase in _map_of_send_buffers SendRequestId "
00976                      << ArrayOfSendRequests[i] << endl ;
00977               }
00978             (*_map_of_send_buffers).erase( ArrayOfSendRequests[i] ) ;
00979           }
00980         else if ( SendTrace )
00981           {
00982             cout << "CheckSent" << _my_rank << " " << i << "./" << nSendRequest
00983                  << " SendRequestId " << ArrayOfSendRequests[i]
00984                  << " flag " << flag
00985                  << " Counter " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->Counter
00986                  << " DataType " << (*_map_of_send_buffers)[ ArrayOfSendRequests[i] ]->DataType
00987                  << endl ;
00988           }
00989       }
00990     if ( SendTrace )
00991       {
00992         _MPI_access->check() ;
00993       }
00994     delete [] ArrayOfSendRequests ;
00995     return sts ;
00996   }
00997 
00998   int MPIAccessDEC::checkFinalRecv()
00999   {
01000     int sts = MPI_SUCCESS ;
01001     if ( _time_interpolator )
01002       {
01003         int target ;
01004         for ( target = 0 ; target < _group_size ; target++ )
01005           {
01006             if ( (*_data_messages)[target][0] != NULL )
01007               {
01008                 sts = checkTime( (*_data_messages_recv_count)[target] , (*_data_messages_type)[target] ,
01009                                  target , true ) ;
01010                 if ( (*_data_messages_type)[target] == MPI_INT )
01011                   {
01012                     delete [] (int *) (*_data_messages)[target][0] ;
01013                   }
01014                 else
01015                   {
01016                     delete [] (double *) (*_data_messages)[target][0] ;
01017                   }
01018                 (*_data_messages)[target][0] = NULL ;
01019                 if ( (*_data_messages)[target][1] != NULL )
01020                   {
01021                     if ( (*_data_messages_type)[target] == MPI_INT )
01022                       {
01023                         delete [] (int *) (*_data_messages)[target][1] ;
01024                       }
01025                     else
01026                       {
01027                         delete [] (double *) (*_data_messages)[target][1] ;
01028                       }
01029                     (*_data_messages)[target][1] = NULL ;
01030                   }
01031               }
01032           }
01033       }
01034     return sts ;
01035   }
01036 
01037   ostream & operator<< (ostream & f ,const TimeInterpolationMethod & interpolationmethod )
01038   {
01039     switch (interpolationmethod)
01040       {
01041       case WithoutTimeInterp :
01042         f << " WithoutTimeInterpolation ";
01043         break;
01044       case LinearTimeInterp :
01045         f << " LinearTimeInterpolation ";
01046         break;
01047       default :
01048         f << " UnknownTimeInterpolation ";
01049         break;
01050       }
01051 
01052     return f;
01053   }
01054 }