ROMS
Loading...
Searching...
No Matches
distribute_mod::mp_assemble Interface Reference

Public Member Functions

subroutine mp_assemblef_1d (ng, model, npts, aspv, a, inpcomm)
 
subroutine mp_assemblef_2d (ng, model, npts, aspv, a, inpcomm)
 
subroutine mp_assemblef_3d (ng, model, npts, aspv, a, inpcomm)
 
subroutine mp_assemblei_1d (ng, model, npts, aspv, a, inpcomm)
 
subroutine mp_assemblei_2d (ng, model, npts, aspv, a, inpcomm)
 

Detailed Description

Definition at line 67 of file distribute.F.

Member Function/Subroutine Documentation

◆ mp_assemblef_1d()

subroutine distribute_mod::mp_assemble::mp_assemblef_1d ( integer, intent(in) ng,
integer, intent(in) model,
integer, intent(in) npts,
real(r8), intent(in) aspv,
real(r8), dimension(:), intent(inout) a,
integer, intent(in), optional inpcomm )

Definition at line 2278 of file distribute.F.

2279!
2280!***********************************************************************
2281! !
2282! This routine assembles a 1D floating-point array from all members !
2283! in the group. The collection of data from all nodes is achieved !
2284! as a reduction sum. !
2285! !
2286! On Input: !
2287! !
2288! ng Nested grid number. !
2289! model Calling model identifier. !
2290! Npts Number of collected data points, PROD(SIZE(A)). !
2291! Aspv Special value indicating that an array element is !
2292! not operated by the current parallel node. It must !
2293! be zero to collect data by a global reduction sum. !
2294! A 1D array to collect. !
2295! InpComm Communicator handle (integer, OPTIONAL). !
2296! !
2297! On Output: !
2298! !
2299! A Assembled 1D array. !
2300! !
2301!***********************************************************************
2302!
2303! Imported variable declarations.
2304!
2305 integer, intent(in) :: ng, model, Npts
2306
2307 integer, intent(in), optional :: InpComm
2308!
2309 real(r8), intent(in) :: Aspv
2310
2311 real(r8), intent(inout) :: A(:)
2312!
2313! Local variable declarations.
2314!
2315 integer :: Lstr, MyCOMM, MyError, MyNpts, Nnodes, Serror
2316 integer :: i, rank, request
2317
2318 integer, dimension(MPI_STATUS_SIZE) :: status
2319!
2320# if defined ASSEMBLE_ALLGATHER
2321 real(r8), dimension(Npts,0:NtileI(ng)*NtileJ(ng)-1) :: Arecv
2322# elif defined ASSEMBLE_ALLREDUCE
2323 real(r8), dimension(Npts) :: Asend
2324# elif defined ASSEMBLE_SENDRECV
2325 real(r8), allocatable :: Arecv(:)
2326# endif
2327!
2328 character (len=MPI_MAX_ERROR_STRING) :: string
2329
2330 character (len=*), parameter :: MyFile = &
2331 & __FILE__//", mp_assemblef_1d"
2332
2333# ifdef PROFILE
2334!
2335!-----------------------------------------------------------------------
2336! Turn on time clocks.
2337!-----------------------------------------------------------------------
2338!
2339 CALL wclock_on (ng, model, 70, __line__, myfile)
2340# endif
2341# ifdef MPI
2342!
2343!-----------------------------------------------------------------------
2344! Set distributed-memory communicator handle (context ID).
2345!-----------------------------------------------------------------------
2346!
2347 IF (PRESENT(inpcomm)) THEN
2348 mycomm=inpcomm
2349 ELSE
2350 mycomm=ocn_comm_world
2351 END IF
2352# endif
2353!
2354!-----------------------------------------------------------------------
2355! Check input parameters.
2356!-----------------------------------------------------------------------
2357!
2358! Maximum automatic buffer memory size in bytes.
2359!
2360# if defined ASSEMBLE_ALLGATHER
2361 bmemmax(ng)=max(bmemmax(ng), real(SIZE(arecv)*kind(a),r8))
2362# else
2363 bmemmax(ng)=max(bmemmax(ng), real(npts*kind(a),r8))
2364# endif
2365!
2366 mynpts=ubound(a, dim=1)
2367 IF (npts.ne.mynpts) THEN
2368 IF (master) THEN
2369 WRITE (stdout,10) npts, mynpts
2370 END IF
2371 exit_flag=7
2372 END IF
2373!
2374 IF (aspv.ne.0.0_r8) THEN
2375 IF (master) THEN
2376 WRITE (stdout,20) aspv
2377 END IF
2378 exit_flag=7
2379 END IF
2380!
2381!-----------------------------------------------------------------------
2382! Collect data from all nodes.
2383!-----------------------------------------------------------------------
2384!
2385# if defined ASSEMBLE_ALLGATHER
2386 CALL mpi_allgather (a, npts, mp_float, arecv, npts, mp_float, &
2387 & mycomm, myerror)
2388 IF (myerror.ne.mpi_success) THEN
2389 CALL mpi_error_string (myerror, string, lstr, serror)
2390 lstr=len_trim(string)
2391 WRITE (stdout,30) 'MPI_ALLGATHER', myrank, myerror, &
2392 & string(1:lstr)
2393 exit_flag=2
2394 RETURN
2395 END IF
2396!
2397! Pack data according to special values: sum or ignore.
2398!
2399 nnodes=ntilei(ng)*ntilej(ng)-1
2400 IF (aspv.eq.0.0_r8) THEN
2401 DO i=1,npts
2402 a(i)=0.0_r8
2403 DO rank=0,nnodes
2404 a(i)=a(i)+arecv(i,rank)
2405 END DO
2406 END DO
2407 ELSE
2408 DO i=1,npts
2409 DO rank=0,nnodes
2410 IF (arecv(i,rank).ne.aspv) THEN
2411 a(i)=arecv(i,rank)
2412 END IF
2413 END DO
2414 END DO
2415 END IF
2416
2417# elif defined ASSEMBLE_ALLREDUCE
2418!
2419! Coppy data to send.
2420!
2421 DO i=1,npts
2422 asend(i)=a(i)
2423 END DO
2424!
2425! Collect data from all nodes as a reduced sum.
2426!
2427 CALL mpi_allreduce (asend, a, npts, mp_float, mpi_sum, &
2428 & mycomm, myerror)
2429 IF (myerror.ne.mpi_success) THEN
2430 CALL mpi_error_string (myerror, string, lstr, serror)
2431 lstr=len_trim(string)
2432 WRITE (stdout,30) 'MPI_ALLREDUCE', myrank, myerror, &
2433 & string(1:lstr)
2434 exit_flag=2
2435 RETURN
2436 END IF
2437
2438# elif defined ASSEMBLE_SENDRECV
2439
2440 IF (myrank.eq.mymaster) THEN
2441!
2442! If master node, allocate and receive buffer.
2443!
2444 IF (.not.allocated(arecv)) THEN
2445 allocate (arecv(npts))
2446 END IF
2447!
2448! If master node, loop over other nodes to receive and accumulate the
2449! data.
2450!
2451 DO rank=1,ntilei(ng)*ntilej(ng)-1
2452 CALL mpi_irecv (arecv, npts, mp_float, rank, rank+5, &
2453 & mycomm, request, myerror)
2454 CALL mpi_wait (request, status, myerror)
2455 IF (myerror.ne.mpi_success) THEN
2456 CALL mpi_error_string (myerror, string, lstr, serror)
2457 lstr=len_trim(string)
2458 WRITE (stdout,30) 'MPI_IRECV', rank, myerror, string(1:lstr)
2459 exit_flag=2
2460 RETURN
2461 END IF
2462 DO i=1,npts
2463 a(i)=a(i)+arecv(i)
2464 END DO
2465 END DO
2466 deallocate (arecv)
2467!
2468! Otherwise, send data to master node.
2469!
2470 ELSE
2471 CALL mpi_isend (a, npts, mp_float, mymaster, myrank+5, &
2472 & mycomm, request, myerror)
2473 CALL mpi_wait (request, status, myerror)
2474 IF (myerror.ne.mpi_success) THEN
2475 CALL mpi_error_string (myerror, string, lstr, serror)
2476 lstr=len_trim(string)
2477 WRITE (stdout,30) 'MPI_ISEND', myrank, myerror, string(1:lstr)
2478 exit_flag=2
2479 RETURN
2480 END IF
2481 END IF
2482!
2483! Broadcast accumulated (full) data to all nodes.
2484!
2485 CALL mpi_bcast (a, npts, mp_float, mymaster, mycomm, myerror)
2486 IF (myerror.ne.mpi_success) THEN
2487 CALL mpi_error_string (myerror, string, lstr, serror)
2488 lstr=len_trim(string)
2489 WRITE (stdout,30) 'MPI_BCAST', myrank, myerror, string(1:lstr)
2490 exit_flag=2
2491 RETURN
2492 END IF
2493# endif
2494
2495# ifdef PROFILE
2496!
2497!-----------------------------------------------------------------------
2498! Turn off time clocks.
2499!-----------------------------------------------------------------------
2500!
2501 CALL wclock_off (ng, model, 70, __line__, myfile)
2502# endif
2503!
2504 10 FORMAT (/,' MP_ASSEMBLEF_1D - inconsistent array size, Npts = ', &
2505 & i10,2x,i10,/,19x,'number of addressed array elements ', &
2506 & 'is incorrect.')
2507 20 FORMAT (/,' MP_ASSEMBLEF_1D - illegal special value, Aspv = ', &
2508 & 1p,e17.10,/,19x,'a zero value is needed for global ', &
2509 & 'reduction.')
2510 30 FORMAT (/,' MP_ASSEMBLEF_1D - error during ',a,' call, Task = ', &
2511 & i3.3,' Error = ',i3,/,19x,a)
2512!
2513 RETURN
recursive subroutine wclock_off(ng, model, region, line, routine)
Definition timers.F:148
recursive subroutine wclock_on(ng, model, region, line, routine)
Definition timers.F:3

References mod_param::bmemmax, mod_scalars::exit_flag, mod_parallel::master, mod_parallel::mp_float, mod_parallel::mymaster, mod_parallel::myrank, mod_param::ntilei, mod_param::ntilej, mod_parallel::ocn_comm_world, mod_iounits::stdout, wclock_off(), and wclock_on().

Here is the call graph for this function:

◆ mp_assemblef_2d()

subroutine distribute_mod::mp_assemble::mp_assemblef_2d ( integer, intent(in) ng,
integer, intent(in) model,
integer, intent(in) npts,
real(r8), intent(in) aspv,
real(r8), dimension(:,:), intent(inout) a,
integer, intent(in), optional inpcomm )

Definition at line 2516 of file distribute.F.

2517!
2518!***********************************************************************
2519! !
2520! This routine assembles a 2D floating-point array from all members !
2521! in the group. The collection of data from all nodes is achieved !
2522! as a reduction sum. !
2523! !
2524! On Input: !
2525! !
2526! ng Nested grid number. !
2527! model Calling model identifier. !
2528! Npts Number of collected data points, PROD(SIZE(A)). !
2529! Aspv Special value indicating that an array element is !
2530! not operated by the current parallel node. It must !
2531! be zero to collect data by a global reduction sum. !
2532! A 2D array to collect. !
2533! InpComm Communicator handle (integer, OPTIONAL). !
2534! !
2535! On Output: !
2536! !
2537! A Assembled 2D array. !
2538! !
2539!***********************************************************************
2540!
2541! Imported variable declarations.
2542!
2543 integer, intent(in) :: ng, model, Npts
2544
2545 integer, intent(in), optional :: InpComm
2546!
2547 real(r8), intent(in) :: Aspv
2548
2549 real(r8), intent(inout) :: A(:,:)
2550!
2551! Local variable declarations.
2552!
2553 integer :: Lstr, MyCOMM, MyError, MyNpts, Nnodes, Serror
2554 integer :: i, rank, request
2555
2556 integer :: Asize(2)
2557
2558 integer, dimension(MPI_STATUS_SIZE) :: status
2559!
2560# if defined ASSEMBLE_ALLGATHER
2561 real(r8), dimension(Npts,0:NtileI(ng)*NtileJ(ng)-1) :: Arecv
2562 real(r8), dimension(Npts) :: Asend
2563# elif defined ASSEMBLE_ALLREDUCE
2564 real(r8), dimension(Npts) :: Arecv, Asend
2565# elif defined ASSEMBLE_SENDRECV
2566 real(r8), allocatable :: Arecv(:)
2567 real(r8), dimension(Npts) :: Asend
2568# endif
2569!
2570 character (len=MPI_MAX_ERROR_STRING) :: string
2571
2572 character (len=*), parameter :: MyFile = &
2573 & __FILE__//", mp_assemblef_2d"
2574
2575# ifdef PROFILE
2576!
2577!-----------------------------------------------------------------------
2578! Turn on time clocks.
2579!-----------------------------------------------------------------------
2580!
2581 CALL wclock_on (ng, model, 70, __line__, myfile)
2582# endif
2583# ifdef MPI
2584!
2585!-----------------------------------------------------------------------
2586! Set distributed-memory communicator handle (context ID).
2587!-----------------------------------------------------------------------
2588!
2589 IF (PRESENT(inpcomm)) THEN
2590 mycomm=inpcomm
2591 ELSE
2592 mycomm=ocn_comm_world
2593 END IF
2594# endif
2595!
2596!-----------------------------------------------------------------------
2597! Check input parameters.
2598!-----------------------------------------------------------------------
2599!
2600! Maximum automatic buffer memory size in bytes.
2601!
2602# if defined ASSEMBLE_ALLGATHER
2603 bmemmax(ng)=max(bmemmax(ng), real((npts+SIZE(arecv))*kind(a),r8))
2604# else
2605 bmemmax(ng)=max(bmemmax(ng), real(2*npts*kind(a),r8))
2606# endif
2607!
2608 asize(1)=ubound(a, dim=1)
2609 asize(2)=ubound(a, dim=2)
2610 mynpts=asize(1)*asize(2)
2611 IF (npts.ne.mynpts) THEN
2612 IF (master) THEN
2613 WRITE (stdout,10) npts, mynpts
2614 END IF
2615 exit_flag=7
2616 END IF
2617!
2618 IF (aspv.ne.0.0_r8) THEN
2619 IF (master) THEN
2620 WRITE (stdout,20) aspv
2621 END IF
2622 exit_flag=7
2623 END IF
2624!
2625!-----------------------------------------------------------------------
2626! Collect data from all nodes.
2627!-----------------------------------------------------------------------
2628!
2629! Reshape input 2D data into 1D array to facilitate communications.
2630!
2631 asend=reshape(a, (/npts/))
2632
2633# if defined ASSEMBLE_ALLGATHER
2634!
2635! Collect data from all nodes.
2636!
2637 CALL mpi_allgather (asend, npts, mp_float, arecv, npts, mp_float, &
2638 & mycomm, myerror)
2639 IF (myerror.ne.mpi_success) THEN
2640 CALL mpi_error_string (myerror, string, lstr, serror)
2641 lstr=len_trim(string)
2642 WRITE (stdout,30) 'MPI_ALLGATHER', myrank, myerror, &
2643 & string(1:lstr)
2644 exit_flag=2
2645 RETURN
2646 END IF
2647!
2648! Pack data according to special values: sum or ignore.
2649!
2650 nnodes=ntilei(ng)*ntilej(ng)-1
2651 IF (aspv.eq.0.0_r8) THEN
2652 DO i=1,npts
2653 asend(i)=0.0_r8
2654 DO rank=0,nnodes
2655 asend(i)=asend(i)+arecv(i,rank)
2656 END DO
2657 END DO
2658 ELSE
2659 DO i=1,npts
2660 DO rank=0,nnodes
2661 IF (arecv(i,rank).ne.aspv) THEN
2662 asend(i)=arecv(i,rank)
2663 END IF
2664 END DO
2665 END DO
2666 END IF
2667!
2668! Load collected data in output 2D array.
2669!
2670 a=reshape(asend, asize)
2671
2672# elif defined ASSEMBLE_ALLREDUCE
2673!
2674! Collect data from all nodes as a reduced sum.
2675!
2676 CALL mpi_allreduce (asend, arecv, npts, mp_float, mpi_sum, &
2677 & mycomm, myerror)
2678 IF (myerror.ne.mpi_success) THEN
2679 CALL mpi_error_string (myerror, string, lstr, serror)
2680 lstr=len_trim(string)
2681 WRITE (stdout,30) 'MPI_ALLREDUCE', myrank, myerror, &
2682 & string(1:lstr)
2683 exit_flag=2
2684 RETURN
2685 END IF
2686!
2687! Load collected data into output 2D array.
2688!
2689 a=reshape(arecv, asize)
2690
2691# elif defined ASSEMBLE_SENDRECV
2692!
2693 IF (myrank.eq.mymaster) THEN
2694!
2695! If master node, allocate and receive buffer.
2696!
2697 IF (.not.allocated(arecv)) THEN
2698 allocate (arecv(npts))
2699 END IF
2700!
2701! If master node, loop over other nodes to receive and accumulate the
2702! data.
2703!
2704 DO rank=1,ntilei(ng)*ntilej(ng)-1
2705 CALL mpi_irecv (arecv, npts, mp_float, rank, rank+5, &
2706 & mycomm, request, myerror)
2707 CALL mpi_wait (request, status, myerror)
2708 IF (myerror.ne.mpi_success) THEN
2709 CALL mpi_error_string (myerror, string, lstr, serror)
2710 lstr=len_trim(string)
2711 WRITE (stdout,30) 'MPI_IRECV', rank, myerror, string(1:lstr)
2712 exit_flag=2
2713 RETURN
2714 END IF
2715 DO i=1,npts
2716 asend(i)=asend(i)+arecv(i)
2717 END DO
2718 END DO
2719 deallocate (arecv)
2720!
2721! Load collected data in output 2D array.
2722!
2723 a=reshape(asend, asize)
2724!
2725! Otherwise, send data to master node.
2726!
2727 ELSE
2728 CALL mpi_isend (asend, npts, mp_float, mymaster, myrank+5, &
2729 & mycomm, request, myerror)
2730 CALL mpi_wait (request, status, myerror)
2731 IF (myerror.ne.mpi_success) THEN
2732 CALL mpi_error_string (myerror, string, lstr, serror)
2733 lstr=len_trim(string)
2734 WRITE (stdout,30) 'MPI_ISEND', myrank, myerror, string(1:lstr)
2735 exit_flag=2
2736 RETURN
2737 END IF
2738 END IF
2739!
2740! Broadcast accumulated (full) data to all nodes.
2741!
2742 CALL mpi_bcast (a, npts, mp_float, mymaster, mycomm, myerror)
2743 IF (myerror.ne.mpi_success) THEN
2744 CALL mpi_error_string (myerror, string, lstr, serror)
2745 lstr=len_trim(string)
2746 WRITE (stdout,30) 'MPI_BCAST', myrank, myerror, string(1:lstr)
2747 exit_flag=2
2748 RETURN
2749 END IF
2750# endif
2751# ifdef PROFILE
2752!
2753!-----------------------------------------------------------------------
2754! Turn off time clocks.
2755!-----------------------------------------------------------------------
2756!
2757 CALL wclock_off (ng, model, 70, __line__, myfile)
2758# endif
2759!
2760 10 FORMAT (/,' MP_ASSEMBLEF_2D - inconsistent array size, Npts = ', &
2761 & i10,2x,i10,/,19x,'number of addressed array elements ', &
2762 & 'is incorrect.')
2763 20 FORMAT (/,' MP_ASSEMBLEF_2D - illegal special value, Aspv = ', &
2764 & 1p,e17.10,/,19x,'a zero value is needed for global ', &
2765 & 'reduction.')
2766 30 FORMAT (/,' MP_ASSEMBLEF_2D - error during ',a,' call, Task = ', &
2767 & i3.3,' Error = ',i3,/,19x,a)
2768!
2769 RETURN

References mod_param::bmemmax, mod_scalars::exit_flag, mod_parallel::master, mod_parallel::mp_float, mod_parallel::mymaster, mod_parallel::myrank, mod_param::ntilei, mod_param::ntilej, mod_parallel::ocn_comm_world, mod_iounits::stdout, wclock_off(), and wclock_on().

Here is the call graph for this function:

◆ mp_assemblef_3d()

subroutine distribute_mod::mp_assemble::mp_assemblef_3d ( integer, intent(in) ng,
integer, intent(in) model,
integer, intent(in) npts,
real(r8), intent(in) aspv,
real(r8), dimension(:,:,:), intent(inout) a,
integer, intent(in), optional inpcomm )

Definition at line 2772 of file distribute.F.

2773!
2774!***********************************************************************
2775! !
2776! This routine assembles a 3D floating-point array from all members !
2777! in the group. The collection of data from all nodes is achieved !
2778! as a reduction sum. !
2779! !
2780! On Input: !
2781! !
2782! ng Nested grid number. !
2783! model Calling model identifier. !
2784! Npts Number of collected data points, PROD(SIZE(A)). !
2785! Aspv Special value indicating that an array element is !
2786! not operated by the current parallel node. It must !
2787! be zero to collect data by a global reduction sum. !
2788! A 3D array to collect. !
2789! InpComm Communicator handle (integer, OPTIONAL). !
2790! !
2791! On Output: !
2792! !
2793! A Assembled 3D array. !
2794! !
2795!***********************************************************************
2796!
2797! Imported variable declarations.
2798!
2799 integer, intent(in) :: ng, model, Npts
2800
2801 integer, intent(in), optional :: InpComm
2802
2803 real(r8), intent(in) :: Aspv
2804
2805 real(r8), intent(inout) :: A(:,:,:)
2806!
2807! Local variable declarations.
2808!
2809 integer :: Lstr, MyCOMM, MyError, MyNpts, Nnodes, Serror
2810 integer :: i, rank, request
2811
2812 integer :: Asize(3)
2813
2814 integer, dimension(MPI_STATUS_SIZE) :: status
2815!
2816# if defined ASSEMBLE_ALLGATHER
2817 real(r8), dimension(Npts,0:NtileI(ng)*NtileJ(ng)-1) :: Arecv
2818 real(r8), dimension(Npts) :: Asend
2819# elif defined ASSEMBLE_ALLREDUCE
2820 real(r8), dimension(Npts) :: Arecv, Asend
2821# elif defined ASSEMBLE_SENDRECV
2822 real(r8), allocatable :: Arecv(:)
2823 real(r8), dimension(Npts) :: Asend
2824# endif
2825!
2826 character (len=MPI_MAX_ERROR_STRING) :: string
2827
2828 character (len=*), parameter :: MyFile = &
2829 & __FILE__//", mp_assemblef_3d"
2830
2831# ifdef PROFILE
2832!
2833!-----------------------------------------------------------------------
2834! Turn on time clocks.
2835!-----------------------------------------------------------------------
2836!
2837 CALL wclock_on (ng, model, 70, __line__, myfile)
2838# endif
2839# ifdef MPI
2840!
2841!-----------------------------------------------------------------------
2842! Set distributed-memory communicator handle (context ID).
2843!-----------------------------------------------------------------------
2844!
2845 IF (PRESENT(inpcomm)) THEN
2846 mycomm=inpcomm
2847 ELSE
2848 mycomm=ocn_comm_world
2849 END IF
2850# endif
2851!
2852!-----------------------------------------------------------------------
2853! Check input parameters.
2854!-----------------------------------------------------------------------
2855!
2856! Maximum automatic buffer memory size in bytes.
2857!
2858# if defined ASSEMBLE_ALLGATHER
2859 bmemmax(ng)=max(bmemmax(ng), real((npts+SIZE(arecv))*kind(a),r8))
2860# else
2861 bmemmax(ng)=max(bmemmax(ng), real(2*npts*kind(a),r8))
2862# endif
2863!
2864 asize(1)=ubound(a, dim=1)
2865 asize(2)=ubound(a, dim=2)
2866 asize(3)=ubound(a, dim=3)
2867 mynpts=asize(1)*asize(2)*asize(3)
2868 IF (npts.ne.mynpts) THEN
2869 IF (master) THEN
2870 WRITE (stdout,10) npts, mynpts
2871 END IF
2872 exit_flag=7
2873 END IF
2874!
2875 IF (aspv.ne.0.0_r8) THEN
2876 IF (master) THEN
2877 WRITE (stdout,20) aspv
2878 END IF
2879 exit_flag=7
2880 END IF
2881!
2882!-----------------------------------------------------------------------
2883! Collect data from all nodes.
2884!-----------------------------------------------------------------------
2885!
2886! Reshape input 3D data into 1D array to facilitate communications.
2887!
2888 asend=reshape(a, (/npts/))
2889
2890# if defined ASSEMBLE_ALLGATHER
2891!
2892! Collect data from all nodes.
2893!
2894 CALL mpi_allgather (asend, npts, mp_float, arecv, npts, mp_float, &
2895 & mycomm, myerror)
2896 IF (myerror.ne.mpi_success) THEN
2897 CALL mpi_error_string (myerror, string, lstr, serror)
2898 lstr=len_trim(string)
2899 WRITE (stdout,30) 'MPI_ALLGATHER', myrank, myerror, &
2900 & string(1:lstr)
2901 exit_flag=2
2902 RETURN
2903 END IF
2904!
2905! Pack data according to special values: sum or ignore.
2906!
2907 nnodes=ntilei(ng)*ntilej(ng)-1
2908 IF (aspv.eq.0.0_r8) THEN
2909 DO i=1,npts
2910 asend(i)=0.0_r8
2911 DO rank=0,nnodes
2912 asend(i)=asend(i)+arecv(i,rank)
2913 END DO
2914 END DO
2915 ELSE
2916 DO i=1,npts
2917 DO rank=0,nnodes
2918 IF (arecv(i,rank).ne.aspv) THEN
2919 asend(i)=arecv(i,rank)
2920 END IF
2921 END DO
2922 END DO
2923 END IF
2924!
2925! Load collected data into output 3D array.
2926!
2927 a=reshape(asend, asize)
2928
2929# elif defined ASSEMBLE_ALLREDUCE
2930!
2931! Collect data from all nodes as a reduced sum.
2932!
2933 CALL mpi_allreduce (asend, arecv, npts, mp_float, mpi_sum, &
2934 & mycomm, myerror)
2935 IF (myerror.ne.mpi_success) THEN
2936 CALL mpi_error_string (myerror, string, lstr, serror)
2937 lstr=len_trim(string)
2938 WRITE (stdout,30) 'MPI_ALLREDUCE', myrank, myerror, &
2939 & string(1:lstr)
2940 exit_flag=2
2941 RETURN
2942 END IF
2943!
2944! Load collected data into output 3D array.
2945!
2946 a=reshape(arecv, asize)
2947
2948# elif defined ASSEMBLE_SENDRECV
2949!
2950 IF (myrank.eq.mymaster) THEN
2951!
2952! If master node, allocate and receive buffer.
2953!
2954 IF (.not.allocated(arecv)) THEN
2955 allocate (arecv(npts))
2956 END IF
2957!
2958! If master node, loop over other nodes to receive and accumulate the
2959! data.
2960!
2961 DO rank=1,ntilei(ng)*ntilej(ng)-1
2962 CALL mpi_irecv (arecv, npts, mp_float, rank, rank+5, &
2963 & mycomm, request, myerror)
2964 CALL mpi_wait (request, status, myerror)
2965 IF (myerror.ne.mpi_success) THEN
2966 CALL mpi_error_string (myerror, string, lstr, serror)
2967 lstr=len_trim(string)
2968 WRITE (stdout,30) 'MPI_IRECV', rank, myerror, string(1:lstr)
2969 exit_flag=2
2970 RETURN
2971 END IF
2972 DO i=1,npts
2973 asend(i)=asend(i)+arecv(i)
2974 END DO
2975 END DO
2976 deallocate (arecv)
2977!
2978! Load collected data into output 3D array.
2979!
2980 a=reshape(asend, asize)
2981!
2982! Otherwise, send data to master node.
2983!
2984 ELSE
2985 CALL mpi_isend (asend, npts, mp_float, mymaster, myrank+5, &
2986 & mycomm, request, myerror)
2987 CALL mpi_wait (request, status, myerror)
2988 IF (myerror.ne.mpi_success) THEN
2989 CALL mpi_error_string (myerror, string, lstr, serror)
2990 lstr=len_trim(string)
2991 WRITE (stdout,30) 'MPI_ISEND', myrank, myerror, string(1:lstr)
2992 exit_flag=2
2993 RETURN
2994 END IF
2995 END IF
2996!
2997! Broadcast accumulated (full) data to all nodes.
2998!
2999 CALL mpi_bcast (a, npts, mp_float, mymaster, mycomm, myerror)
3000 IF (myerror.ne.mpi_success) THEN
3001 CALL mpi_error_string (myerror, string, lstr, serror)
3002 lstr=len_trim(string)
3003 WRITE (stdout,30) 'MPI_BCAST', myrank, myerror, string(1:lstr)
3004 exit_flag=2
3005 RETURN
3006 END IF
3007# endif
3008# ifdef PROFILE
3009!
3010!-----------------------------------------------------------------------
3011! Turn off time clocks.
3012!-----------------------------------------------------------------------
3013!
3014 CALL wclock_off (ng, model, 70, __line__, myfile)
3015# endif
3016!
3017 10 FORMAT (/,' MP_ASSEMBLEF_3D - inconsistent array size, Npts = ', &
3018 & i10,2x,i10,/,19x,'number of addressed array elements ', &
3019 & 'is incorrect.')
3020 20 FORMAT (/,' MP_ASSEMBLEF_3D - illegal special value, Aspv = ', &
3021 & 1p,e17.10,/,19x,'a zero value is needed for global ', &
3022 & 'reduction.')
3023 30 FORMAT (/,' MP_ASSEMBLEF_3D - error during ',a,' call, Task = ', &
3024 & i3.3,' Error = ',i3,/,19x,a)
3025!
3026 RETURN

References mod_param::bmemmax, mod_scalars::exit_flag, mod_parallel::master, mod_parallel::mp_float, mod_parallel::mymaster, mod_parallel::myrank, mod_param::ntilei, mod_param::ntilej, mod_parallel::ocn_comm_world, mod_iounits::stdout, wclock_off(), and wclock_on().

Here is the call graph for this function:

◆ mp_assemblei_1d()

subroutine distribute_mod::mp_assemble::mp_assemblei_1d ( integer, intent(in) ng,
integer, intent(in) model,
integer, intent(in) npts,
integer, intent(in) aspv,
integer, dimension(:), intent(inout) a,
integer, intent(in), optional inpcomm )

Definition at line 3029 of file distribute.F.

3030!
3031!***********************************************************************
3032! !
3033! This routine assembles a 1D integer array from all members in the !
3034! group. The collection of data from all nodes is achieved as a !
3035! reduction sum. !
3036! !
3037! On Input: !
3038! !
3039! ng Nested grid number. !
3040! model Calling model identifier. !
3041! Npts Number of collected data points, PROD(SIZE(A)). !
3042! Aspv Special value indicating that an array element is !
3043! not operated by the current parallel node. It must !
3044! be zero to collect data by a global reduction sum. !
3045! A 1D array to collect. !
3046! InpComm Communicator handle (integer, OPTIONAL). !
3047! !
3048! On Output: !
3049! !
3050! A Assembled 1D array. !
3051! !
3052!***********************************************************************
3053!
3054! Imported variable declarations.
3055!
3056 integer, intent(in) :: ng, model, Npts
3057
3058 integer, intent(in), optional :: InpComm
3059
3060 integer, intent(in) :: Aspv
3061
3062 integer, intent(inout) :: A(:)
3063!
3064! Local variable declarations.
3065!
3066 integer :: Lstr, MyCOMM, MyError, MyNpts, Nnodes, Serror
3067 integer :: i, rank, request
3068
3069 integer, dimension(MPI_STATUS_SIZE) :: status
3070
3071# if defined ASSEMBLE_ALLGATHER
3072 integer, dimension(Npts,0:NtileI(ng)*NtileJ(ng)-1) :: Arecv
3073# elif defined ASSEMBLE_ALLREDUCE
3074 integer, dimension(Npts) :: Asend
3075# elif defined ASSEMBLE_SENDRECV
3076 integer, allocatable :: Arecv(:)
3077# endif
3078!
3079 character (len=MPI_MAX_ERROR_STRING) :: string
3080
3081 character (len=*), parameter :: MyFile = &
3082 & __FILE__//", mp_assemblei_1d"
3083
3084# ifdef PROFILE
3085!
3086!-----------------------------------------------------------------------
3087! Turn on time clocks.
3088!-----------------------------------------------------------------------
3089!
3090 CALL wclock_on (ng, model, 70, __line__, myfile)
3091# endif
3092# ifdef MPI
3093!
3094!-----------------------------------------------------------------------
3095! Set distributed-memory communicator handle (context ID).
3096!-----------------------------------------------------------------------
3097!
3098 IF (PRESENT(inpcomm)) THEN
3099 mycomm=inpcomm
3100 ELSE
3101 mycomm=ocn_comm_world
3102 END IF
3103# endif
3104!
3105!-----------------------------------------------------------------------
3106! Check input parameters.
3107!-----------------------------------------------------------------------
3108!
3109! Maximum automatic buffer memory size in bytes.
3110!
3111# if defined ASSEMBLE_ALLGATHER
3112 bmemmax(ng)=max(bmemmax(ng), real(SIZE(arecv)*kind(a),r8))
3113# else
3114 bmemmax(ng)=max(bmemmax(ng), real(npts*kind(a),r8))
3115# endif
3116!
3117 mynpts=ubound(a, dim=1)
3118 IF (npts.ne.mynpts) THEN
3119 IF (master) THEN
3120 WRITE (stdout,10) npts, mynpts
3121 END IF
3122 exit_flag=7
3123 END IF
3124!
3125 IF (aspv.ne.0) THEN
3126 IF (master) THEN
3127 WRITE (stdout,20) aspv
3128 END IF
3129 exit_flag=7
3130 END IF
3131!
3132!-----------------------------------------------------------------------
3133! Collect data from all nodes.
3134!-----------------------------------------------------------------------
3135!
3136# if defined ASSEMBLE_ALLGATHER
3137 CALL mpi_allgather (a, npts, mpi_integer, &
3138 & arecv, npts, mpi_integer, &
3139 & mycomm, myerror)
3140 IF (myerror.ne.mpi_success) THEN
3141 CALL mpi_error_string (myerror, string, lstr, serror)
3142 lstr=len_trim(string)
3143 WRITE (stdout,30) 'MPI_ALLGATHER', myrank, myerror, &
3144 & string(1:lstr)
3145 exit_flag=2
3146 RETURN
3147 END IF
3148!
3149! Pack data according to special values: sum or ignore.
3150!
3151 nnodes=ntilei(ng)*ntilej(ng)-1
3152 IF (aspv.eq.0.0_r8) THEN
3153 DO i=1,npts
3154 a(i)=0.0_r8
3155 DO rank=0,nnodes
3156 a(i)=a(i)+arecv(i,rank)
3157 END DO
3158 END DO
3159 ELSE
3160 DO i=1,npts
3161 DO rank=0,nnodes
3162 IF (arecv(i,rank).ne.aspv) THEN
3163 a(i)=arecv(i,rank)
3164 END IF
3165 END DO
3166 END DO
3167 END IF
3168
3169# elif defined ASSEMBLE_ALLREDUCE
3170!
3171! Copy data to send.
3172!
3173 DO i=1,npts
3174 asend(i)=a(i)
3175 END DO
3176!
3177! Collect data from all nodes as a reduced sum.
3178!
3179 CALL mpi_allreduce (asend, a, npts, mpi_integer, mpi_sum, &
3180 & mycomm, myerror)
3181 IF (myerror.ne.mpi_success) THEN
3182 CALL mpi_error_string (myerror, string, lstr, serror)
3183 lstr=len_trim(string)
3184 WRITE (stdout,30) 'MPI_ALLREDUCE', myrank, myerror, &
3185 & string(1:lstr)
3186 exit_flag=2
3187 RETURN
3188 END IF
3189
3190# elif defined ASSEMBLE_SENDRECV
3191
3192 IF (myrank.eq.mymaster) THEN
3193!
3194! If master node, allocate and receive buffer.
3195!
3196 IF (.not.allocated(arecv)) THEN
3197 allocate (arecv(npts))
3198 END IF
3199!
3200! If master node, loop over other nodes to receive and accumulate the
3201! data.
3202!
3203 DO rank=1,ntilei(ng)*ntilej(ng)-1
3204 CALL mpi_irecv (arecv, npts, mpi_integer, rank, rank+5, &
3205 & mycomm, request, myerror)
3206 CALL mpi_wait (request, status, myerror)
3207 IF (myerror.ne.mpi_success) THEN
3208 CALL mpi_error_string (myerror, string, lstr, serror)
3209 lstr=len_trim(string)
3210 WRITE (stdout,30) 'MPI_IRECV', rank, myerror, string(1:lstr)
3211 exit_flag=2
3212 RETURN
3213 END IF
3214 DO i=1,npts
3215 a(i)=a(i)+arecv(i)
3216 END DO
3217 END DO
3218 deallocate (arecv)
3219!
3220! Otherwise, send data to master node.
3221!
3222 ELSE
3223 CALL mpi_isend (a, npts, mpi_integer, mymaster, myrank+5, &
3224 & mycomm, request, myerror)
3225 CALL mpi_wait (request, status, myerror)
3226 IF (myerror.ne.mpi_success) THEN
3227 CALL mpi_error_string (myerror, string, lstr, serror)
3228 lstr=len_trim(string)
3229 WRITE (stdout,30) 'MPI_ISEND', myrank, myerror, string(1:lstr)
3230 exit_flag=2
3231 RETURN
3232 END IF
3233 END IF
3234!
3235! Broadcast accumulated (full) data to all nodes.
3236!
3237 CALL mpi_bcast (a, npts, mpi_integer, mymaster, mycomm, myerror)
3238 IF (myerror.ne.mpi_success) THEN
3239 CALL mpi_error_string (myerror, string, lstr, serror)
3240 lstr=len_trim(string)
3241 WRITE (stdout,30) 'MPI_BCAST', myrank, myerror, string(1:lstr)
3242 exit_flag=2
3243 RETURN
3244 END IF
3245# endif
3246# ifdef PROFILE
3247!
3248!-----------------------------------------------------------------------
3249! Turn off time clocks.
3250!-----------------------------------------------------------------------
3251!
3252 CALL wclock_off (ng, model, 70, __line__, myfile)
3253# endif
3254!
3255 10 FORMAT (/,' MP_ASSEMBLEI_1D - inconsistent array size, Npts = ', &
3256 & i10,2x,i10,/,19x,'number of addressed array elements ', &
3257 & 'is incorrect.')
3258 20 FORMAT (/,' MP_ASSEMBLEI_1D - illegal special value, Aspv = ',i4, &
3259 & /,19x,'a zero value is needed for global reduction.')
3260 30 FORMAT (/,' MP_ASSEMBLEI_1D - error during ',a,' call, Task = ', &
3261 & i3.3,' Error = ',i3,/,19x,a)
3262!
3263 RETURN

References mod_param::bmemmax, mod_scalars::exit_flag, mod_parallel::master, mod_parallel::mymaster, mod_parallel::myrank, mod_param::ntilei, mod_param::ntilej, mod_parallel::ocn_comm_world, mod_iounits::stdout, wclock_off(), and wclock_on().

Here is the call graph for this function:

◆ mp_assemblei_2d()

subroutine distribute_mod::mp_assemble::mp_assemblei_2d ( integer, intent(in) ng,
integer, intent(in) model,
integer, intent(in) npts,
integer, intent(in) aspv,
integer, dimension(:,:), intent(inout) a,
integer, intent(in), optional inpcomm )

Definition at line 3266 of file distribute.F.

3267!
3268!***********************************************************************
3269! !
3270! This routine assembles a 2D integer array from all members in the !
3271! group. The collection of data from all nodes is achieved as a !
3272! reduction sum. !
3273! !
3274! On Input: !
3275! !
3276! ng Nested grid number. !
3277! model Calling model identifier. !
3278! Npts Number of collected data points, PROD(SIZE(A)). !
3279! Aspv Special value indicating that an array element is !
3280! not operated by the current parallel node. It must !
3281! be zero to collect data by a global reduction sum. !
3282! A 2D array to collect. !
3283! InpComm Communicator handle (integer, OPTIONAL). !
3284! !
3285! On Output: !
3286! !
3287! A Assembled 2D array. !
3288! !
3289!***********************************************************************
3290!
3291! Imported variable declarations.
3292!
3293 integer, intent(in) :: ng, model, Npts
3294
3295 integer, intent(in), optional :: InpComm
3296
3297 integer, intent(in) :: Aspv
3298
3299 integer, intent(inout) :: A(:,:)
3300!
3301! Local variable declarations.
3302!
3303 integer :: Lstr, MyCOMM, MyError, MyNpts, Nnodes, Serror
3304 integer :: i, rank, request
3305
3306 integer :: Asize(2)
3307
3308 integer, dimension(MPI_STATUS_SIZE) :: status
3309
3310# if defined ASSEMBLE_ALLGATHER
3311 integer, dimension(Npts,0:NtileI(ng)*NtileJ(ng)-1) :: Arecv
3312 integer, dimension(Npts) :: Asend
3313# elif defined ASSEMBLE_ALLREDUCE
3314 integer, dimension(Npts) :: Arecv, Asend
3315# elif defined ASSEMBLE_SENDRECV
3316 integer, allocatable :: Arecv(:)
3317 integer, dimension(Npts) :: Asend
3318# endif
3319!
3320 character (len=MPI_MAX_ERROR_STRING) :: string
3321
3322 character (len=*), parameter :: MyFile = &
3323 & __FILE__//", mp_assemblei_2d"
3324
3325# ifdef PROFILE
3326!
3327!-----------------------------------------------------------------------
3328! Turn on time clocks.
3329!-----------------------------------------------------------------------
3330!
3331 CALL wclock_on (ng, model, 70, __line__, myfile)
3332# endif
3333# ifdef MPI
3334!
3335!-----------------------------------------------------------------------
3336! Set distributed-memory communicator handle (context ID).
3337!-----------------------------------------------------------------------
3338!
3339 IF (PRESENT(inpcomm)) THEN
3340 mycomm=inpcomm
3341 ELSE
3342 mycomm=ocn_comm_world
3343 END IF
3344# endif
3345!
3346!-----------------------------------------------------------------------
3347! Check input parameters.
3348!-----------------------------------------------------------------------
3349!
3350! Maximum automatic buffer memory size in bytes.
3351!
3352# if defined ASSEMBLE_ALLGATHER
3353 bmemmax(ng)=max(bmemmax(ng), real((npts+SIZE(arecv))*kind(a),r8))
3354# else
3355 bmemmax(ng)=max(bmemmax(ng), real(2*npts*kind(a),r8))
3356# endif
3357!
3358 asize(1)=ubound(a, dim=1)
3359 asize(2)=ubound(a, dim=2)
3360 mynpts=asize(1)*asize(2)
3361 IF (npts.ne.mynpts) THEN
3362 IF (master) THEN
3363 WRITE (stdout,10) npts, mynpts
3364 END IF
3365 exit_flag=7
3366 END IF
3367!
3368 IF (aspv.ne.0) THEN
3369 IF (master) THEN
3370 WRITE (stdout,20) aspv
3371 END IF
3372 exit_flag=7
3373 END IF
3374!
3375!-----------------------------------------------------------------------
3376! Collect data from all nodes.
3377!-----------------------------------------------------------------------
3378!
3379! Reshape input 2D data into 1D array to facilitate communications.
3380!
3381 asend=reshape(a, (/npts/))
3382
3383# if defined ASSEMBLE_ALLGATHER
3384!
3385! Collect data from all nodes.
3386!
3387 CALL mpi_allgather (asend, npts, mpi_integer, &
3388 & arecv, npts, mpi_integer, &
3389 & mycomm, myerror)
3390 IF (myerror.ne.mpi_success) THEN
3391 CALL mpi_error_string (myerror, string, lstr, serror)
3392 lstr=len_trim(string)
3393 WRITE (stdout,30) 'MPI_ALLGATHER', myrank, myerror, &
3394 & string(1:lstr)
3395 exit_flag=2
3396 RETURN
3397 END IF
3398!
3399! Pack data according to special values: sum or ignore.
3400!
3401 nnodes=ntilei(ng)*ntilej(ng)-1
3402 IF (aspv.eq.0.0_r8) THEN
3403 DO i=1,npts
3404 asend(i)=0.0_r8
3405 DO rank=0,nnodes
3406 asend(i)=asend(i)+arecv(i,rank)
3407 END DO
3408 END DO
3409 ELSE
3410 DO i=1,npts
3411 DO rank=0,nnodes
3412 IF (arecv(i,rank).ne.aspv) THEN
3413 asend(i)=arecv(i,rank)
3414 END IF
3415 END DO
3416 END DO
3417 END IF
3418!
3419! Load collected data in output 2D array.
3420!
3421 a=reshape(asend, asize)
3422
3423# elif defined ASSEMBLE_ALLREDUCE
3424!
3425! Collect data from all nodes as a reduced sum.
3426!
3427 CALL mpi_allreduce (asend, arecv, npts, mpi_integer, mpi_sum, &
3428 & mycomm, myerror)
3429 IF (myerror.ne.mpi_success) THEN
3430 CALL mpi_error_string (myerror, string, lstr, serror)
3431 lstr=len_trim(string)
3432 WRITE (stdout,30) 'MPI_ALLREDUCE', myrank, myerror, &
3433 & string(1:lstr)
3434 exit_flag=2
3435 RETURN
3436 END IF
3437!
3438! Load collected data.
3439!
3440 a=reshape(arecv, asize)
3441
3442# elif defined ASSEMBLE_SENDRECV
3443!
3444 IF (myrank.eq.mymaster) THEN
3445!
3446! If master node, allocate and receive buffer.
3447!
3448 IF (.not.allocated(arecv)) THEN
3449 allocate (arecv(npts))
3450 END IF
3451!
3452! If master node, loop over other nodes to receive and accumulate the
3453! data.
3454!
3455 DO rank=1,ntilei(ng)*ntilej(ng)-1
3456 CALL mpi_irecv (arecv, npts, mpi_integer, rank, rank+5, &
3457 & mycomm, request, myerror)
3458 CALL mpi_wait (request, status, myerror)
3459 IF (myerror.ne.mpi_success) THEN
3460 CALL mpi_error_string (myerror, string, lstr, serror)
3461 lstr=len_trim(string)
3462 WRITE (stdout,30) 'MPI_IRECV', rank, myerror, string(1:lstr)
3463 exit_flag=2
3464 RETURN
3465 END IF
3466 DO i=1,npts
3467 asend(i)=asend(i)+arecv(i)
3468 END DO
3469 END DO
3470 deallocate (arecv)
3471!
3472! Load collected data in output 2D array.
3473!
3474 a=reshape(asend, asize)
3475!
3476! Otherwise, send data to master node.
3477!
3478 ELSE
3479 CALL mpi_isend (asend, npts, mpi_integer, mymaster, myrank+5, &
3480 & mycomm, request, myerror)
3481 CALL mpi_wait (request, status, myerror)
3482 IF (myerror.ne.mpi_success) THEN
3483 CALL mpi_error_string (myerror, string, lstr, serror)
3484 lstr=len_trim(string)
3485 WRITE (stdout,30) 'MPI_ISEND', myrank, myerror, string(1:lstr)
3486 exit_flag=2
3487 RETURN
3488 END IF
3489 END IF
3490!
3491! Broadcast accumulated (full) data to all nodes.
3492!
3493 CALL mpi_bcast (a, npts, mpi_integer, mymaster, mycomm, myerror)
3494 IF (myerror.ne.mpi_success) THEN
3495 CALL mpi_error_string (myerror, string, lstr, serror)
3496 lstr=len_trim(string)
3497 WRITE (stdout,30) 'MPI_BCAST', myrank, myerror, string(1:lstr)
3498 exit_flag=2
3499 RETURN
3500 END IF
3501# endif
3502# ifdef PROFILE
3503!
3504!-----------------------------------------------------------------------
3505! Turn off time clocks.
3506!-----------------------------------------------------------------------
3507!
3508 CALL wclock_off (ng, model, 70, __line__, myfile)
3509# endif
3510!
3511 10 FORMAT (/,' MP_ASSEMBLEI_2D - inconsistent array size, Npts = ', &
3512 & i10,2x,i10,/,19x,'number of addressed array elements ', &
3513 & 'is incorrect.')
3514 20 FORMAT (/,' MP_ASSEMBLEI_2D - illegal special value, Aspv = ',i4, &
3515 & /,19x,'a zero value is needed for global reduction.')
3516 30 FORMAT (/,' MP_ASSEMBLEI_2D - error during ',a,' call, Task = ', &
3517 & i3.3,' Error = ',i3,/,19x,a)
3518!
3519 RETURN

References mod_param::bmemmax, mod_scalars::exit_flag, mod_parallel::master, mod_parallel::mymaster, mod_parallel::myrank, mod_param::ntilei, mod_param::ntilej, mod_parallel::ocn_comm_world, mod_iounits::stdout, wclock_off(), and wclock_on().

Here is the call graph for this function:

The documentation for this interface was generated from the following file: