ROMS
Loading...
Searching...
No Matches
distribute_mod::mp_collect Interface Reference

Public Member Functions

subroutine mp_collect_f (ng, model, npts, aspv, a, inpcomm)
 
subroutine mp_collect_i (ng, model, npts, aspv, a, inpcomm)
 

Detailed Description

Definition at line 108 of file distribute.F.

Member Function/Subroutine Documentation

◆ mp_collect_f()

subroutine distribute_mod::mp_collect::mp_collect_f ( integer, intent(in) ng,
integer, intent(in) model,
integer, intent(in) npts,
real(r8), intent(in) aspv,
real(r8), dimension(npts), intent(inout) a,
integer, intent(in), optional inpcomm )

Definition at line 3522 of file distribute.F.

3523!
3524!***********************************************************************
3525! !
3526! This routine collects a 1D floating-point array from all members !
3527! in the group. Then, it packs distributed data by removing the !
3528! special values. This routine is used when extracting station !
3529! data from tiled arrays. !
3530! !
3531! On Input: !
3532! !
3533! ng Nested grid number. !
3534! model Calling model identifier. !
3535! Npts Number of collected data points. !
3536! Aspv Special value indicating no data. This implies that !
3537! desired data is tile unbouded. !
3538! A Collected data. !
3539! InpComm Communicator handle (integer, OPTIONAL). !
3540! !
3541! On Output: !
3542! !
3543! A Collected data. !
3544! !
3545!***********************************************************************
3546!
3547! Imported variable declarations.
3548!
3549 integer, intent(in) :: ng, model, Npts
3550
3551 integer, intent(in), optional :: InpComm
3552!
3553 real(r8), intent(in) :: Aspv
3554
3555 real(r8), intent(inout) :: A(Npts)
3556!
3557! Local variable declarations.
3558!
3559 integer :: Lstr, MyCOMM, MyError, Nnodes, Serror
3560 integer :: i, rank, request
3561
3562 integer, dimension(MPI_STATUS_SIZE) :: status
3563!
3564# if defined COLLECT_ALLGATHER
3565 real(r8), dimension(Npts,0:NtileI(ng)*NtileJ(ng)-1) :: Arecv
3566# elif defined COLLECT_ALLREDUCE
3567 real(r8), dimension(Npts) :: Asend
3568# else
3569 real(r8), allocatable :: Arecv(:)
3570# endif
3571!
3572 character (len=MPI_MAX_ERROR_STRING) :: string
3573
3574 character (len=*), parameter :: MyFile = &
3575 & __FILE__//", mp_collect_f"
3576
3577# ifdef PROFILE
3578!
3579!-----------------------------------------------------------------------
3580! Turn on time clocks.
3581!-----------------------------------------------------------------------
3582!
3583 CALL wclock_on (ng, model, 69, __line__, myfile)
3584# endif
3585# ifdef MPI
3586!
3587!-----------------------------------------------------------------------
3588! Set distributed-memory communicator handle (context ID).
3589!-----------------------------------------------------------------------
3590!
3591 IF (PRESENT(inpcomm)) THEN
3592 mycomm=inpcomm
3593 ELSE
3594 mycomm=ocn_comm_world
3595 END IF
3596# endif
3597!
3598!-----------------------------------------------------------------------
3599! Collect data from all nodes.
3600!-----------------------------------------------------------------------
3601!
3602! Maximum automatic buffer memory size in bytes.
3603!
3604# if defined COLLECT_ALLGATHER
3605 bmemmax(ng)=max(bmemmax(ng), real(SIZE(arecv)*kind(a),r8))
3606# else
3607 bmemmax(ng)=max(bmemmax(ng), real(npts*kind(a),r8))
3608# endif
3609
3610# if defined COLLECT_ALLGATHER
3611!
3612 CALL mpi_allgather (a, npts, mp_float, arecv, npts, mp_float, &
3613 & mycomm, myerror)
3614 IF (myerror.ne.mpi_success) THEN
3615 CALL mpi_error_string (myerror, string, lstr, serror)
3616 lstr=len_trim(string)
3617 WRITE (stdout,10) 'MPI_ALLGATHER', myrank, myerror, &
3618 & string(1:lstr)
3619 exit_flag=2
3620 RETURN
3621 END IF
3622!
3623! Pack data according to special values: sum or ignore.
3624!
3625 nnodes=ntilei(ng)*ntilej(ng)-1
3626 IF (aspv.eq.0.0_r8) THEN
3627 DO i=1,npts
3628 a(i)=0.0_r8
3629 DO rank=0,nnodes
3630 a(i)=a(i)+arecv(i,rank)
3631 END DO
3632 END DO
3633 ELSE
3634 DO i=1,npts
3635 DO rank=0,nnodes
3636 IF (arecv(i,rank).ne.aspv) THEN
3637 a(i)=arecv(i,rank)
3638 END IF
3639 END DO
3640 END DO
3641 END IF
3642# elif defined COLLECT_ALLREDUCE
3643!
3644! Copy data to send.
3645!
3646 DO i=1,npts
3647 asend(i)=a(i)
3648 END DO
3649!
3650! Collect data from all nodes as a reduced sum.
3651!
3652 CALL mpi_allreduce (asend, a, npts, mp_float, mpi_sum, &
3653 & mycomm, myerror)
3654 IF (myerror.ne.mpi_success) THEN
3655 CALL mpi_error_string (myerror, string, lstr, serror)
3656 lstr=len_trim(string)
3657 WRITE (stdout,10) 'MPI_ALLREDUCE', myrank, myerror, &
3658 & string(1:lstr)
3659 exit_flag=2
3660 RETURN
3661 END IF
3662# else
3663!
3664 IF (myrank.eq.mymaster) THEN
3665!
3666! If master node, allocate and receive buffer.
3667!
3668 IF (.not.allocated(arecv)) THEN
3669 allocate (arecv(npts))
3670 END IF
3671!
3672! If master node, loop over other nodes to receive and accumulate the
3673! data.
3674!
3675 DO rank=1,ntilei(ng)*ntilej(ng)-1
3676 CALL mpi_irecv (arecv, npts, mp_float, rank, rank+5, &
3677 & mycomm, request, myerror)
3678 CALL mpi_wait (request, status, myerror)
3679 IF (myerror.ne.mpi_success) THEN
3680 CALL mpi_error_string (myerror, string, lstr, serror)
3681 lstr=len_trim(string)
3682 WRITE (stdout,10) 'MPI_IRECV', rank, myerror, string(1:lstr)
3683 exit_flag=2
3684 RETURN
3685 END IF
3686 DO i=1,npts
3687 a(i)=a(i)+arecv(i)
3688 END DO
3689 END DO
3690 deallocate (arecv)
3691!
3692! Otherwise, send data to master node.
3693!
3694 ELSE
3695 CALL mpi_isend (a, npts, mp_float, mymaster, myrank+5, &
3696 & mycomm, request, myerror)
3697 CALL mpi_wait (request, status, myerror)
3698 IF (myerror.ne.mpi_success) THEN
3699 CALL mpi_error_string (myerror, string, lstr, serror)
3700 lstr=len_trim(string)
3701 WRITE (stdout,10) 'MPI_ISEND', myrank, myerror, string(1:lstr)
3702 exit_flag=2
3703 RETURN
3704 END IF
3705 END IF
3706!
3707! Broadcast accumulated (full) data to all nodes.
3708!
3709 CALL mpi_bcast (a, npts, mp_float, mymaster, mycomm, myerror)
3710 IF (myerror.ne.mpi_success) THEN
3711 CALL mpi_error_string (myerror, string, lstr, serror)
3712 lstr=len_trim(string)
3713 WRITE (stdout,10) 'MPI_BCAST', myrank, myerror, string(1:lstr)
3714 exit_flag=2
3715 RETURN
3716 END IF
3717# endif
3718 10 FORMAT (/,' MP_COLLECT_F - error during ',a,' call, Task = ', &
3719 & i3.3,' Error = ',i3,/,14x,a)
3720
3721# ifdef PROFILE
3722!
3723!-----------------------------------------------------------------------
3724! Turn off time clocks.
3725!-----------------------------------------------------------------------
3726!
3727 CALL wclock_off (ng, model, 69, __line__, myfile)
3728# endif
3729!
3730 RETURN
recursive subroutine wclock_off(ng, model, region, line, routine)
Definition timers.F:148
recursive subroutine wclock_on(ng, model, region, line, routine)
Definition timers.F:3

References mod_param::bmemmax, mod_scalars::exit_flag, mod_parallel::mp_float, mod_parallel::mymaster, mod_parallel::myrank, mod_param::ntilei, mod_param::ntilej, mod_parallel::ocn_comm_world, mod_iounits::stdout, wclock_off(), and wclock_on().

Here is the call graph for this function:

◆ mp_collect_i()

subroutine distribute_mod::mp_collect::mp_collect_i ( integer, intent(in) ng,
integer, intent(in) model,
integer, intent(in) npts,
integer, intent(in) aspv,
integer, dimension(npts), intent(inout) a,
integer, intent(in), optional inpcomm )

Definition at line 3733 of file distribute.F.

3734!
3735!***********************************************************************
3736! !
3737! This routine collects a 1D integer array from all members in !
3738! the group. Then, it packs distributed data by removing the !
3739! special values. This routine is used when extracting station !
3740! data from tiled arrays. !
3741! !
3742! On Input: !
3743! !
3744! ng Nested grid number. !
3745! model Calling model identifier. !
3746! Npts Number of collected data points. !
3747! Aspv Special value indicating no data. This implies that !
3748! desired data is tile unbouded. !
3749! A Collected data. !
3750! InpComm Communicator handle (integer, OPTIONAL). !
3751! !
3752! On Output: !
3753! !
3754! A Collected data. !
3755! !
3756!***********************************************************************
3757!
3758! Imported variable declarations.
3759!
3760 integer, intent(in) :: ng, model, Npts
3761
3762 integer, intent(in) :: Aspv
3763
3764 integer, intent(in), optional :: InpComm
3765
3766 integer, intent(inout) :: A(Npts)
3767!
3768! Local variable declarations.
3769!
3770 integer :: Lstr, MyCOMM, MyError, Nnodes, Serror
3771 integer :: i, rank, request
3772
3773 integer, dimension(MPI_STATUS_SIZE) :: status
3774
3775# if defined COLLECT_ALLGATHER
3776 integer, dimension(Npts,0:NtileI(ng)*NtileJ(ng)-1) :: Arecv
3777# elif defined COLLECT_ALLREDUCE
3778 integer, dimension(Npts) :: Asend
3779# else
3780 integer, allocatable :: Arecv(:)
3781# endif
3782!
3783 character (len=MPI_MAX_ERROR_STRING) :: string
3784
3785 character (len=*), parameter :: MyFile = &
3786 & __FILE__//", mp_collect_i"
3787
3788# ifdef PROFILE
3789!
3790!-----------------------------------------------------------------------
3791! Turn on time clocks.
3792!-----------------------------------------------------------------------
3793!
3794 CALL wclock_on (ng, model, 69, __line__, myfile)
3795# endif
3796# ifdef MPI
3797!
3798!-----------------------------------------------------------------------
3799! Set distributed-memory communicator handle (context ID).
3800!-----------------------------------------------------------------------
3801!
3802 IF (PRESENT(inpcomm)) THEN
3803 mycomm=inpcomm
3804 ELSE
3805 mycomm=ocn_comm_world
3806 END IF
3807# endif
3808!
3809!-----------------------------------------------------------------------
3810! Collect data from all nodes.
3811!-----------------------------------------------------------------------
3812!
3813! Maximum automatic buffer memory size in bytes.
3814!
3815# if defined COLLECT_ALLGATHER
3816 bmemmax(ng)=max(bmemmax(ng), real(SIZE(arecv)*kind(a),r8))
3817# else
3818 bmemmax(ng)=max(bmemmax(ng), real(npts*kind(a),r8))
3819# endif
3820
3821# if defined COLLECT_ALLGATHER
3822!
3823 CALL mpi_allgather (a, npts, mpi_integer, arecv, npts, &
3824 & mpi_integer, mycomm, myerror)
3825 IF (myerror.ne.mpi_success) THEN
3826 CALL mpi_error_string (myerror, string, lstr, serror)
3827 lstr=len_trim(string)
3828 WRITE (stdout,10) 'MPI_ALLGATHER', myrank, myerror, &
3829 & string(1:lstr)
3830 exit_flag=2
3831 RETURN
3832 END IF
3833!
3834! Pack data according to special values: sum or ignore.
3835!
3836 nnodes=ntilei(ng)*ntilej(ng)-1
3837 IF (aspv.eq.0) THEN
3838 DO i=1,npts
3839 a(i)=0
3840 DO rank=0,nnodes
3841 a(i)=a(i)+arecv(i,rank)
3842 END DO
3843 END DO
3844 ELSE
3845 DO i=1,npts
3846 DO rank=0,nnodes
3847 IF (arecv(i,rank).ne.aspv) THEN
3848 a(i)=arecv(i,rank)
3849 END IF
3850 END DO
3851 END DO
3852 END IF
3853# elif defined COLLECT_ALLREDUCE
3854!
3855! Copy data to send.
3856!
3857 DO i=1,npts
3858 asend(i)=a(i)
3859 END DO
3860!
3861! Collect data from all nodes as a reduced sum.
3862!
3863 CALL mpi_allreduce (asend, a, npts, mpi_integer, mpi_sum, &
3864 & mycomm, myerror)
3865 IF (myerror.ne.mpi_success) THEN
3866 CALL mpi_error_string (myerror, string, lstr, serror)
3867 lstr=len_trim(string)
3868 WRITE (stdout,10) 'MPI_ALLREDUCE', myrank, myerror, &
3869 & string(1:lstr)
3870 exit_flag=2
3871 RETURN
3872 END IF
3873# else
3874!
3875 IF (myrank.eq.mymaster) THEN
3876!
3877! If master node, allocate and receive buffer.
3878!
3879 IF (.not.allocated(arecv)) THEN
3880 allocate (arecv(npts))
3881 END IF
3882!
3883! If master node, loop over other nodes to receive and accumulate the
3884! data.
3885!
3886 DO rank=1,ntilei(ng)*ntilej(ng)-1
3887 CALL mpi_irecv (arecv, npts, mpi_integer, rank, rank+5, &
3888 & mycomm, request, myerror)
3889 CALL mpi_wait (request, status, myerror)
3890 IF (myerror.ne.mpi_success) THEN
3891 CALL mpi_error_string (myerror, string, lstr, serror)
3892 lstr=len_trim(string)
3893 WRITE (stdout,10) 'MPI_IRECV', rank, myerror, string(1:lstr)
3894 exit_flag=2
3895 RETURN
3896 END IF
3897 DO i=1,npts
3898 a(i)=a(i)+arecv(i)
3899 END DO
3900 END DO
3901 deallocate (arecv)
3902!
3903! Otherwise, send data to master node.
3904!
3905 ELSE
3906 CALL mpi_isend (a, npts, mpi_integer, mymaster, myrank+5, &
3907 & mycomm, request, myerror)
3908 CALL mpi_wait (request, status, myerror)
3909 IF (myerror.ne.mpi_success) THEN
3910 CALL mpi_error_string (myerror, string, lstr, serror)
3911 lstr=len_trim(string)
3912 WRITE (stdout,10) 'MPI_ISEND', myrank, myerror, string(1:lstr)
3913 exit_flag=2
3914 RETURN
3915 END IF
3916 END IF
3917!
3918! Broadcast accumulated (full) data to all nodes.
3919!
3920 CALL mpi_bcast (a, npts, mpi_integer, mymaster, mycomm, myerror)
3921 IF (myerror.ne.mpi_success) THEN
3922 CALL mpi_error_string (myerror, string, lstr, serror)
3923 lstr=len_trim(string)
3924 WRITE (stdout,10) 'MPI_BCAST', myrank, myerror, string(1:lstr)
3925 exit_flag=2
3926 RETURN
3927 END IF
3928# endif
3929 10 FORMAT (/,' MP_COLLECT_I - error during ',a,' call, Task = ', &
3930 & i3.3,' Error = ',i3,/,14x,a)
3931
3932# ifdef PROFILE
3933!
3934!-----------------------------------------------------------------------
3935! Turn off time clocks.
3936!-----------------------------------------------------------------------
3937!
3938 CALL wclock_off (ng, model, 69, __line__, myfile)
3939# endif
3940!
3941 RETURN

References mod_param::bmemmax, mod_scalars::exit_flag, mod_parallel::mymaster, mod_parallel::myrank, mod_param::ntilei, mod_param::ntilej, mod_parallel::ocn_comm_world, mod_iounits::stdout, wclock_off(), and wclock_on().

Here is the call graph for this function:

The documentation for this interface was generated from the following file: