diff --git a/src/CPL/LIS_cpl/module_lis_HYDRO.F b/src/CPL/LIS_cpl/module_lis_HYDRO.F index e30839204..de493c55c 100644 --- a/src/CPL/LIS_cpl/module_lis_HYDRO.F +++ b/src/CPL/LIS_cpl/module_lis_HYDRO.F @@ -72,7 +72,7 @@ subroutine lis_cpl_HYDRO(n) #endif if(nlst(did)%nsoil < 1) then write(6,*) "FATAL ERROR: nsoil is less than 1" - call hydro_stop("In module_lis_HYDRO.F module_lis_HYDRO() - nsoil is less than 1") + call hydro_stop("In module_lis_HYDRO.F module_lis_HYDRO() - nsoil is less than 1") endif allocate(nlst(did)%zsoil8(nlst(did)%nsoil)) nlst(did)%zsoil8(1) = -noah271_struc(n)%lyrthk(1) @@ -85,15 +85,15 @@ subroutine lis_cpl_HYDRO(n) #endif - CALL mpi_initialized( mpi_inited, ierr ) + call MPI_Initialized( mpi_inited, ierr ) if ( .NOT. mpi_inited ) then - call MPI_INIT( ierr ) ! stand alone land model. + call MPI_Init( ierr ) ! stand alone land model. if (ierr /= MPI_SUCCESS) stop "MPI_INIT" - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) if (ierr /= MPI_SUCCESS) stop "MPI_COMM_DUP" endif - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) endif if(nlst(did)%rtFlag .eq. 0) return @@ -204,7 +204,7 @@ subroutine lis_cpl_HYDRO(n) enddo #ifdef HYDRO_D - write(6,*) "NDHMS lis date ", LIS_rc%yr, LIS_rc%mo, LIS_rc%da, LIS_rc%hr, LIS_rc%mn, LIS_rc%ss + write(6,*) "NDHMS lis date ", LIS_rc%yr, LIS_rc%mo, LIS_rc%da, LIS_rc%hr, LIS_rc%mn, LIS_rc%ss #endif ! write(11,*) "RT_DOMAIN(did)%stc",RT_DOMAIN(did)%stc(:,:,1) ! write(12,*) "noah271_struc(n)%noah%stc(1)",noah271_struc(n)%noah%stc(1) diff --git a/src/CPL/WRF_cpl/module_wrf_HYDRO.F b/src/CPL/WRF_cpl/module_wrf_HYDRO.F index e6882f74e..fb1b1220d 100644 --- a/src/CPL/WRF_cpl/module_wrf_HYDRO.F +++ b/src/CPL/WRF_cpl/module_wrf_HYDRO.F @@ -106,7 +106,7 @@ subroutine wrf_cpl_HYDRO(HYDRO_dt,grid,its,ite,jts,jte) #ifdef MPP_LAND - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) call MPP_LAND_INIT(grid%e_we - grid%s_we - 1, grid%e_sn - grid%s_sn - 1) call mpp_land_bcast_int1 (nlst(did)%nsoil) @@ -214,9 +214,9 @@ subroutine wrf_cpl_HYDRO(HYDRO_dt,grid,its,ite,jts,jte) #endif else do k = 1, nlst(did)%nsoil - RT_DOMAIN(did)%STC(:,:,k) = grid%TSLB(its:ite,k,jts:jte) - RT_DOMAIN(did)%smc(:,:,k) = grid%smois(its:ite,k,jts:jte) - RT_DOMAIN(did)%sh2ox(:,:,k) = grid%sh2o(its:ite,k,jts:jte) + RT_DOMAIN(did)%STC(:,:,k) = grid%TSLB(its:ite,k,jts:jte) + RT_DOMAIN(did)%smc(:,:,k) = grid%smois(its:ite,k,jts:jte) + RT_DOMAIN(did)%sh2ox(:,:,k) = grid%sh2o(its:ite,k,jts:jte) end do rt_domain(did)%infxsrt = grid%infxsrt(its:ite,jts:jte) rt_domain(did)%soldrain = grid%soldrain(its:ite,jts:jte) @@ -235,7 +235,7 @@ subroutine wrf_cpl_HYDRO(HYDRO_dt,grid,its,ite,jts,jte) ! update WRF variable after running routing model. grid%sfcheadrt(its:ite,jts:jte) = rt_domain(did)%overland%control%surface_water_head_lsm -! provide groundwater soil flux to WRF for fully coupled simulations (FERSCH 09/2014) +! provide groundwater soil flux to WRF for fully coupled simulations (FERSCH 09/2014) if(nlst(did)%GWBASESWCRT .eq. 3 ) then !Wei Yu: comment the following two lines. Not ready for WRF3.7 release !yw grid%qsgw(its:ite,jts:jte) = gw2d(did)%qsgw @@ -269,7 +269,7 @@ subroutine wrf2lsm (z1,v1,kk1,z,vout,ix,jx,kk,vegtyp) do j = 1, jx do i = 1, ix do k = 1, kk - call interpLayer(Z1,v1(i,1:kk1,j),kk1,Z(k),vout(i,j,k)) + call interpLayer(Z1,v1(i,1:kk1,j),kk1,Z(k),vout(i,j,k)) end do end do end do @@ -291,7 +291,7 @@ subroutine lsm2wrf (z1,v1,kk1,z,vout,ix,jx,kk,vegtyp) do j = 1, jx do i = 1, ix do k = 1, kk - call interpLayer(Z1,v1(i,j,1:kk1),kk1,Z(k),vout(i,k,j)) + call interpLayer(Z1,v1(i,j,1:kk1),kk1,Z(k),vout(i,k,j)) end do end do end do diff --git a/src/HYDRO_drv/module_HYDRO_drv.F b/src/HYDRO_drv/module_HYDRO_drv.F index 63959f0cd..a9e9e8e80 100644 --- a/src/HYDRO_drv/module_HYDRO_drv.F +++ b/src/HYDRO_drv/module_HYDRO_drv.F @@ -1841,7 +1841,7 @@ subroutine HYDRO_finish() close(78) #endif call mpp_land_sync() - call MPI_finalize(ierr) + call MPI_Finalize(ierr) stop #else diff --git a/src/IO/netcdf_layer.f90 b/src/IO/netcdf_layer.f90 index 850f2e826..286e9122e 100644 --- a/src/IO/netcdf_layer.f90 +++ b/src/IO/netcdf_layer.f90 @@ -43,7 +43,7 @@ end function create_file_signature end type NetCDF_serial_ type, extends(NetCDF_layer_) :: NetCDF_parallel_ - integer :: MPI_communicator + integer :: MPI_Communicator integer :: default_info = MPI_INFO_NULL contains procedure, pass(object) :: create_file => create_file_parallel diff --git a/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F b/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F index 1e70b1a6b..7066cd3a8 100644 --- a/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F +++ b/src/Land_models/NoahMP/IO_code/module_hrldas_netcdf_io.F @@ -437,9 +437,9 @@ subroutine read_hrldas_hdrinfo(wrfinput_flnm, ix, jx, & integer :: rank #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F "// & - "read_hrldas_hdrinfo() - MPI_COMM_RANK" + "read_hrldas_hdrinfo() - MPI_Comm_rank" #else rank = 0 #endif @@ -619,9 +619,9 @@ subroutine readland_hrldas(wrfinput_flnm, & crocus_opt = local_crocus_opt ! setting module scope variable #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F readland_hrldas()"// & - " - MPI_COMM_RANK" + " - MPI_Comm_rank" #else rank = 0 #endif @@ -641,8 +641,8 @@ subroutine readland_hrldas(wrfinput_flnm, & if (ierr /= 0) then write(*,'("READLAND_HRLDAS: Problem opening wrfinput file: ''", A, "''")') trim(wrfinput_flnm) #ifdef _PARALLEL_ - call mpi_finalize(ierr) - if (ierr /= 0) write(*, '("Problem with MPI_finalize.")') + call MPI_Finalize(ierr) + if (ierr /= 0) write(*, '("Problem with MPI_Finalize.")') #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F readland_hrldas()"// & " - Problem opening wrfinput file." @@ -758,9 +758,9 @@ subroutine read_mmf_runoff(wrfinput_flnm, & integer :: rank #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F read_mmf_runoff()"// & - " - MPI_COMM_RANK" + " - MPI_Comm_rank" #else rank = 0 #endif @@ -780,8 +780,8 @@ subroutine read_mmf_runoff(wrfinput_flnm, & if (ierr /= 0) then write(*,'("read_mmf_runoff: Problem opening wrfinput file: ''", A, "''")') trim(wrfinput_flnm) #ifdef _PARALLEL_ - call mpi_finalize(ierr) - if (ierr /= 0) write(*, '("Problem with MPI_finalize.")') + call MPI_Finalize(ierr) + if (ierr /= 0) write(*, '("Problem with MPI_Finalize.")') #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F read_mmf_runoff()"// & " - Problem opening wrfinput file." @@ -1531,9 +1531,9 @@ subroutine readinit_hrldas(netcdf_flnm, xstart, xend, ystart, yend, nsoil, sldpt #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F"// & - " readinit_hrldas() - MPI_COMM_RANK" + " readinit_hrldas() - MPI_Comm_rank" ierr = nf90_open_par(netcdf_flnm, NF90_NOWRITE, HYDRO_COMM_WORLD, MPI_INFO_NULL, ncid) #else @@ -1552,7 +1552,7 @@ subroutine readinit_hrldas(netcdf_flnm, xstart, xend, ystart, yend, nsoil, sldpt #endif endif #ifdef _PARALLEL_ - call mpi_finalize(ierr) + call MPI_Finalize(ierr) #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F readinit_hrldas()"// & " - Problem opening netcdf file." @@ -1676,9 +1676,9 @@ subroutine init_interp(xstart, xend, ystart, yend, nsoil, sldpth, var, nvar, src integer :: rank #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F init_interp()"// & - " - MPI_COMM_RANK." + " - MPI_Comm_rank." #else rank = 0 #endif @@ -1982,15 +1982,15 @@ subroutine READFORC_HRLDAS(flnm_template, forcing_timestep, target_date, xstart, #endif if (ierr /= 0) then #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F"// & - " READFORC_HRLDAS() - MPI_COMM_RANK" + " READFORC_HRLDAS() - MPI_Comm_rank" if (rank == 0) then #endif write(*,'("A) Problem opening netcdf file: ''", A, "''")') trim(flnm) #ifdef _PARALLEL_ endif - call mpi_finalize(ierr) + call MPI_Finalize(ierr) #endif stop "FATAL ERROR: In module_hrldas_netcdf_io.F READFORC_HRLDAS()"// & " - Problem opening netcdf file" @@ -3117,9 +3117,9 @@ subroutine prepare_restart_file_seq(outdir, version, igrid, llanduse, olddate, s #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F"// & - " prepare_restart_file_seq() - MPI_COMM_RANK problem" + " prepare_restart_file_seq() - MPI_Comm_rank problem" #else @@ -3451,9 +3451,9 @@ subroutine read_restart(restart_flnm, & restart_filename_remember = restart_flnm #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F "// & - "read_restart() - MPI_COMM_RANK" + "read_restart() - MPI_Comm_rank" ierr = nf90_open_par(trim(restart_flnm), NF90_NOWRITE, HYDRO_COMM_WORLD, MPI_INFO_NULL, ncid) #else @@ -3633,9 +3633,9 @@ subroutine get_from_restart_2d_float(parallel_xstart, parallel_xend, subwindow_x #ifdef _PARALLEL_ - call MPI_COMM_RANK(HYDRO_COMM_WORLD, rank, ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD, rank, ierr) if (ierr /= MPI_SUCCESS) stop "FATAL ERROR: In module_hrldas_netcdf_io.F "// & - "get_from_restart_2d_float() - MPI_COMM_RANK" + "get_from_restart_2d_float() - MPI_Comm_rank" ierr = nf90_open_par(trim(restart_filename_remember), NF90_NOWRITE, HYDRO_COMM_WORLD, MPI_INFO_NULL, ncid) diff --git a/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F b/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F index 09ad0c0a4..86f8dcf5a 100644 --- a/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F +++ b/src/Land_models/NoahMP/Utility_routines/module_wrf_utilities.F @@ -47,8 +47,8 @@ END SUBROUTINE wrf_error_fatal SUBROUTINE wrf_abort use module_cpl_land integer ierr - CALL MPI_ABORT(HYDRO_COMM_WORLD,1,ierr) - call MPI_finalize(ierr) + call MPI_Abort(HYDRO_COMM_WORLD,1,ierr) + call MPI_Finalize(ierr) STOP 'wrf_abort' END SUBROUTINE wrf_abort diff --git a/src/MPP/CPL_WRF.F b/src/MPP/CPL_WRF.F index 7b52a58c4..8e619708c 100644 --- a/src/MPP/CPL_WRF.F +++ b/src/MPP/CPL_WRF.F @@ -67,17 +67,17 @@ subroutine CPL_LAND_INIT(istart,iend,jstart,jend) data cyclic/.false.,.false./ ! not cyclic data reorder/.false./ - CALL mpi_initialized( mpi_inited, ierr ) + call MPI_Initialized( mpi_inited, ierr ) if ( .NOT. mpi_inited ) then - call mpi_init(ierr) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_INIT failed") - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_DUP failed") + call MPI_Init(ierr) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Init failed") + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_dup failed") endif - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_global_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, total_pe_num, ierr ) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_RANK and/or MPI_COMM_SIZE failed") + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_global_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, total_pe_num, ierr ) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_rank and/or MPI_Comm_size failed") allocate(node_info(9,total_pe_num)) @@ -118,7 +118,7 @@ subroutine CPL_LAND_INIT(istart,iend,jstart,jend) call MPI_Cart_create(HYDRO_COMM_WORLD, ndim, dims, & cyclic, reorder, cartGridComm, ierr) - call MPI_CART_GET(cartGridComm, 2, dims, cyclic, coords, ierr) + call MPI_Cart_get(cartGridComm, 2, dims, cyclic, coords, ierr) p_up_down = coords(0) p_left_right = coords(1) @@ -137,21 +137,21 @@ subroutine send_info() if(my_global_id .eq. 0) then do i = 1, total_pe_num-1 - call mpi_recv(node_info(:,i+1),size,MPI_INTEGER, & + call MPI_Recv(node_info(:,i+1),size,MPI_INTEGER, & i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) enddo else - call mpi_send(node_info(:,my_global_id+1),size, & + call MPI_Send(node_info(:,my_global_id+1),size, & MPI_INTEGER,0,tag,HYDRO_COMM_WORLD,ierr) endif - call MPI_barrier( HYDRO_COMM_WORLD ,ierr) + call MPI_Barrier( HYDRO_COMM_WORLD ,ierr) size = 9 * total_pe_num - call mpi_bcast(node_info,size,MPI_INTEGER, & + call MPI_Bcast(node_info,size,MPI_INTEGER, & 0,HYDRO_COMM_WORLD,ierr) - call MPI_barrier( HYDRO_COMM_WORLD ,ierr) + call MPI_Barrier( HYDRO_COMM_WORLD ,ierr) return end subroutine send_info diff --git a/src/MPP/module_mpp_GWBUCKET.F b/src/MPP/module_mpp_GWBUCKET.F index 0b121dcf8..f8408d042 100644 --- a/src/MPP/module_mpp_GWBUCKET.F +++ b/src/MPP/module_mpp_GWBUCKET.F @@ -57,7 +57,7 @@ subroutine collectSizeInd(numbasns) if(my_id .ne. IO_id) then tag = 66 - call mpi_send(numbasns,1,MPI_INTEGER, IO_id, & + call MPI_Send(numbasns,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -65,7 +65,7 @@ subroutine collectSizeInd(numbasns) sizeInd(i+1) = numbasns else tag = 66 - call mpi_recv(rcv,1,& + call MPI_Recv(rcv,1,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) sizeInd(i+1) = rcv @@ -101,10 +101,10 @@ subroutine gw_write_io_real(numbasns,inV,ind,outV) if(my_id .ne. IO_id) then if(numbasns .gt. 0) then tag = 62 - call mpi_send(inV,numbasns,MPI_REAL, IO_id, & + call MPI_Send(inV,numbasns,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag2 = 63 - call mpi_send(ind,numbasns,MPI_INTEGER8, IO_id, & + call MPI_Send(ind,numbasns,MPI_INTEGER8, IO_id, & tag2,HYDRO_COMM_WORLD,ierr) endif else @@ -117,10 +117,10 @@ subroutine gw_write_io_real(numbasns,inV,ind,outV) if(i .ne. IO_id) then if(sizeInd(i+1) .gt. 0) then tag = 62 - call mpi_recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag2 = 63 - call mpi_recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_INTEGER8,i,tag2,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, sizeInd(i+1) outV(ibuff(k)) = vbuff(k) @@ -159,10 +159,10 @@ subroutine gw_write_io_int(numbasns,inV,ind,outV) if(my_id .ne. IO_id) then if(numbasns .gt. 0) then tag = 62 - call mpi_send(inV,numbasns,MPI_INTEGER8, IO_id, & + call MPI_Send(inV,numbasns,MPI_INTEGER8, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag2 = 63 - call mpi_send(ind,numbasns,MPI_INTEGER8, IO_id, & + call MPI_Send(ind,numbasns,MPI_INTEGER8, IO_id, & tag2,HYDRO_COMM_WORLD,ierr) endif else @@ -175,10 +175,10 @@ subroutine gw_write_io_int(numbasns,inV,ind,outV) if(i .ne. IO_id) then if(sizeInd(i+1) .gt. 0) then tag = 62 - call mpi_recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(vbuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag2 = 63 - call mpi_recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& + call MPI_Recv(ibuff(1:sizeInd(i+1)),sizeInd(i+1),& MPI_INTEGER8,i,tag2,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, sizeInd(i+1) outV(ibuff(k)) = vbuff(k) diff --git a/src/MPP/module_mpp_ReachLS.F b/src/MPP/module_mpp_ReachLS.F index ef027c1c3..3ce9b05d7 100644 --- a/src/MPP/module_mpp_ReachLS.F +++ b/src/MPP/module_mpp_ReachLS.F @@ -102,30 +102,30 @@ subroutine updateLinkV8_mem(LinkV, outV) if(my_id .ne. IO_id) then tag = 101 - call mpi_send(LLINKLEN,1,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKLEN,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) if(LLINKLEN .gt. 0) then tag = 102 - call mpi_send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 103 - call mpi_send(LinkV,LLINKLEN,MPI_DOUBLE_PRECISION, IO_id, & + call MPI_Send(LinkV,LLINKLEN,MPI_DOUBLE_PRECISION, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif else do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 101 - call mpi_recv(lsize,1,MPI_INTEGER, i, & + call MPI_Recv(lsize,1,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(lsize .gt. 0) then allocate(lindex(lsize) ) allocate(tmpBuf(lsize) ) tag = 102 - call mpi_recv(lindex,lsize,MPI_INTEGER, i, & + call MPI_Recv(lindex,lsize,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 103 - call mpi_recv(tmpBuf,lsize,& + call MPI_Recv(tmpBuf,lsize,& MPI_DOUBLE_PRECISION,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, lsize gLinkV_r8(lindex(k)) = gLinkV_r8(lindex(k)) + tmpBuf(k) @@ -166,30 +166,30 @@ subroutine updateLinkV4_mem(LinkV, outV) if(my_id .ne. IO_id) then tag = 101 - call mpi_send(LLINKLEN,1,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKLEN,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) if(LLINKLEN .gt. 0) then tag = 102 - call mpi_send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & + call MPI_Send(LLINKIDINDX,LLINKLEN,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 103 - call mpi_send(LinkV,LLINKLEN,MPI_REAL, IO_id, & + call MPI_Send(LinkV,LLINKLEN,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif else do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 101 - call mpi_recv(lsize,1,MPI_INTEGER, i, & + call MPI_Recv(lsize,1,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(lsize .gt. 0) then allocate(lindex(lsize) ) allocate(tmpBuf(lsize) ) tag = 102 - call mpi_recv(lindex,lsize,MPI_INTEGER, i, & + call MPI_Recv(lindex,lsize,MPI_INTEGER, i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 103 - call mpi_recv(tmpBuf,lsize,& + call MPI_Recv(tmpBuf,lsize,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, lsize gLinkV_r4(lindex(k)) = gLinkV_r4(lindex(k)) + tmpBuf(k) @@ -224,14 +224,14 @@ subroutine updateLinkV8(LinkV, outV) if(my_id .ne. IO_id) then tag = 102 - call mpi_send(gLinkV,gnlinksl,MPI_DOUBLE_PRECISION, IO_id, & + call MPI_Send(gLinkV,gnlinksl,MPI_DOUBLE_PRECISION, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else gLinkV_r = gLinkV do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 102 - call mpi_recv(gLinkV,gnlinksl,& + call MPI_Recv(gLinkV,gnlinksl,& MPI_DOUBLE_PRECISION,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) gLinkV_r = gLinkV_r + gLinkV end if @@ -257,14 +257,14 @@ subroutine updateLinkV4(LinkV, outV) if(my_id .ne. IO_id) then tag = 102 - call mpi_send(gLinkV,gnlinksl,MPI_REAL, IO_id, & + call MPI_Send(gLinkV,gnlinksl,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else gLinkV_r = gLinkV do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 102 - call mpi_recv(gLinkV,gnlinksl,& + call MPI_Recv(gLinkV,gnlinksl,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) gLinkV_r = gLinkV_r + gLinkV end if @@ -280,7 +280,7 @@ subroutine gbcastReal(inV, outV) real, dimension(:) :: inV integer :: ierr call ReachLS_write_io(inV,outV) - call mpi_bcast(outV(1:gnlinksl),gnlinksl,MPI_REAL, & + call MPI_Bcast(outV(1:gnlinksl),gnlinksl,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine gbcastReal @@ -297,7 +297,7 @@ subroutine gbcastReal2_old(index,size1,inV, insize, outV) bsize = linkls_e(i+1) - linkls_s(i+1) + 1 if(linkls_e(i+1) .gt. 0) then if(my_id .eq. i) tmpV(1:bsize) = inV(1:bsize) - call mpi_bcast(tmpV(1:bsize),bsize,MPI_REAL, & + call MPI_Bcast(tmpV(1:bsize),bsize,MPI_REAL, & i,HYDRO_COMM_WORLD,ierr) do j = 1, size1 do k = 1, bsize @@ -324,7 +324,7 @@ subroutine gbcastReal2(index,size1,inV, insize, outV) integer :: ierr, k, i, m, j, bsize outV = 0 call ReachLS_write_io(inV,gbuf) - call mpi_bcast(gbuf,gnlinksl,MPI_REAL, & + call MPI_Bcast(gbuf,gnlinksl,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) do j = 1, size1 outV(j) = gbuf(index(j)) @@ -340,7 +340,7 @@ subroutine gbcastInt(inV, outV) integer, dimension(:) :: inV integer :: ierr call ReachLS_write_io(inV,outV) - call mpi_bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER, & + call MPI_Bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine gbcastInt @@ -350,7 +350,7 @@ subroutine gbcastInt8(inV, outV) integer(kind=int64), dimension(:) :: inV integer :: ierr call ReachLS_write_io(inV,outV) - call mpi_bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER8, & + call MPI_Bcast(outV(1:gnlinksl),gnlinksl,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) end subroutine gbcastInt8 @@ -367,7 +367,7 @@ subroutine getLocalIndx(glinksl,LINKID, LLINKID) call ReachLS_write_io(LINKID,gLinkId) - call mpi_bcast(gLinkId(1:glinksl),glinksl,MPI_INTEGER8, & + call MPI_Bcast(gLinkId(1:glinksl),glinksl,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) ! The following loops are replaced by a hashtable-based algorithm @@ -406,8 +406,8 @@ subroutine ReachLS_ini(glinksl,nlinksl,linklsS, linklsE) integer :: i, ii, ierr ! get my_id and numprocs - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) nlinksl = glinksl / numprocs @@ -473,7 +473,7 @@ subroutine MapGrid2ReachIni(in2d) if(my_id .eq. n-1) then tmpS = sDataRec endif - call mpi_bcast(tmpS,numprocs,MPI_INTEGER, & + call MPI_Bcast(tmpS,numprocs,MPI_INTEGER, & n-1,HYDRO_COMM_WORLD,ierr) rDataRec(n) = tmpS(n) enddo @@ -495,7 +495,7 @@ subroutine ReachLS_decompReal(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL, i-1 ,tag,HYDRO_COMM_WORLD,ierr) endif @@ -503,7 +503,7 @@ subroutine ReachLS_decompReal(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! + call MPI_Recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! aLinksl(my_id+1), & MPI_REAL, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -525,7 +525,7 @@ subroutine ReachLS_decompReal8(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL8, i-1 ,tag,HYDRO_COMM_WORLD,ierr) endif @@ -533,7 +533,7 @@ subroutine ReachLS_decompReal8(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! + call MPI_Recv(outV(1:(linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !! this one has +1! aLinksl(my_id+1), & MPI_REAL8, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -555,7 +555,7 @@ subroutine ReachLS_decompInt(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER, i-1,tag,HYDRO_COMM_WORLD,ierr) endif @@ -563,7 +563,7 @@ subroutine ReachLS_decompInt(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & + call MPI_Recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & alinksl(my_id+1), & MPI_INTEGER, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -587,7 +587,7 @@ subroutine ReachLS_decompInt8(inV,outV) endif else if(aLinksl(i) .gt. 0) then - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER8, i-1,tag,HYDRO_COMM_WORLD,ierr) endif @@ -595,7 +595,7 @@ subroutine ReachLS_decompInt8(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - call mpi_recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & + call MPI_Recv(outV(1:linkls_e(my_id+1)-linkls_s(my_id+1)+1), & alinksl(my_id+1), & MPI_INTEGER8, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -622,8 +622,8 @@ subroutine ReachLS_decompChar(inV,outV) endif else if(aLinksl(i) .gt. 0) then - ! The mpi_send takes what you give it and THEN treats each caracter as an array element. - call mpi_send(inV(linkls_s(i):linkls_e(i)), & + ! The MPI_Send takes what you give it and THEN treats each caracter as an array element. + call MPI_Send(inV(linkls_s(i):linkls_e(i)), & strLen*aLinksl(i), & MPI_CHARACTER, i-1, tag, HYDRO_COMM_WORLD, ierr) endif @@ -631,8 +631,8 @@ subroutine ReachLS_decompChar(inV,outV) end do else if(aLinksl(my_id+1) .gt. 0) then - ! The mpi_recv treats each caracter as an array element. - call mpi_recv(outV(1 : (linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !jlm should have +1 + ! The MPI_Recv treats each caracter as an array element. + call MPI_Recv(outV(1 : (linkls_e(my_id+1)-linkls_s(my_id+1)+1) ), & !jlm should have +1 strLen*alinksl(my_id+1), & MPI_CHARACTER, io_id, tag, HYDRO_COMM_WORLD, mpp_status,ierr ) endif @@ -657,7 +657,7 @@ subroutine ReachLS_wReal(inV,outV) else if(aLinksl(i) .gt. 0) then - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -667,7 +667,7 @@ subroutine ReachLS_wReal(inV,outV) if(aLinksl(my_id+1) .gt. 0) then tag = 12 ss = size(inv,1) - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_REAL,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -691,7 +691,7 @@ subroutine ReachLS_wReal8(inV,outV) else if(aLinksl(i) .gt. 0) then - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL8,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -701,7 +701,7 @@ subroutine ReachLS_wReal8(inV,outV) if(aLinksl(my_id+1) .gt. 0) then tag = 12 ss = size(inv,1) - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_REAL8,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -725,7 +725,7 @@ subroutine ReachLS_wInt(inV,outV) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -734,7 +734,7 @@ subroutine ReachLS_wInt(inV,outV) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -757,7 +757,7 @@ subroutine ReachLS_wInt8(inV,outV) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER8,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -766,7 +766,7 @@ subroutine ReachLS_wInt8(inV,outV) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_INTEGER8,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -790,7 +790,7 @@ subroutine ReachLS_wInt2(inV,outV,len,glen) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -799,7 +799,7 @@ subroutine ReachLS_wInt2(inV,outV,len,glen) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -823,7 +823,7 @@ subroutine ReachLS_wReal2(inV,outV,len,glen) else if(aLinksl(i) .gt. 0) then tag = 12 - call mpi_recv(outV(linkls_s(i):linkls_e(i)), & + call MPI_Recv(outV(linkls_s(i):linkls_e(i)), & aLinksl(i), & MPI_REAL,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -832,7 +832,7 @@ subroutine ReachLS_wReal2(inV,outV,len,glen) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - call mpi_send(inV(1:aLinksl(my_id+1) ), & + call MPI_Send(inV(1:aLinksl(my_id+1) ), & aLinksl(my_id+1), & MPI_REAL,io_id,tag,HYDRO_COMM_WORLD,ierr) endif @@ -857,8 +857,8 @@ subroutine ReachLS_wChar(inV,outV) if(aLinksl(i) .gt. 0) then tag = 12 ! ? seems asymmetric with ReachLS_decompChar - call mpi_recv(outV( linkls_s(i) : linkls_e(i) ), & -! call mpi_recv(outV( ((linkls_s(i)-1)+1) : (linkls_e(i)) ), & + call MPI_Recv(outV( linkls_s(i) : linkls_e(i) ), & +! call MPI_Recv(outV( ((linkls_s(i)-1)+1) : (linkls_e(i)) ), & aLinksl(i), & MPI_CHARACTER, i-1, tag, HYDRO_COMM_WORLD, mpp_status, ierr ) endif @@ -867,8 +867,8 @@ subroutine ReachLS_wChar(inV,outV) else if(aLinksl(my_id+1) .gt. 0) then tag = 12 - ! The mpi_send takes what you give it and THEN treats each caracter as an array element. - call mpi_send(inV(1:aLinksl(my_id+1)), & + ! The MPI_Send takes what you give it and THEN treats each caracter as an array element. + call MPI_Send(inV(1:aLinksl(my_id+1)), & aLinksl(my_id+1), & MPI_CHARACTER, io_id, tag, HYDRO_COMM_WORLD, ierr) endif @@ -1004,7 +1004,7 @@ subroutine getToInd(from,to,ind,indLen,gToNodeOut) ToInd(my_id+1) = kk do i = 0, numprocs - 1 - call mpi_bcast(ToInd(i+1),1,MPI_INTEGER8, & + call MPI_Bcast(ToInd(i+1),1,MPI_INTEGER8, & i,HYDRO_COMM_WORLD,ierr) end do @@ -1045,7 +1045,7 @@ subroutine com_decomp1dInt(inV,gsize,outV,lsize) endif else if(ssize .gt. 0 ) then - call mpi_send(inV(start:start+ssize-1), ssize, & + call MPI_Send(inV(start:start+ssize-1), ssize, & MPI_INTEGER, i-1,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1058,7 +1058,7 @@ subroutine com_decomp1dInt(inV,gsize,outV,lsize) endif if( lsize .gt. 0) then allocate(outV(lsize) ) - call mpi_recv(outV,lsize, & + call MPI_Recv(outV,lsize, & MPI_INTEGER, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1102,7 +1102,7 @@ subroutine com_decomp1dInt8(inV,gsize,outV,lsize) endif else if(ssize .gt. 0 ) then - call mpi_send(inV(start:start+ssize-1), ssize, & + call MPI_Send(inV(start:start+ssize-1), ssize, & MPI_INTEGER8, i-1,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1115,7 +1115,7 @@ subroutine com_decomp1dInt8(inV,gsize,outV,lsize) endif if( lsize .gt. 0) then allocate(outV(lsize) ) - call mpi_recv(outV,lsize, & + call MPI_Recv(outV,lsize, & MPI_INTEGER8, io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1154,7 +1154,7 @@ subroutine com_write1dInt(inV,lsize,outV,gsize) endif else if(rsize .gt. 0 ) then - call mpi_recv(outV(start:start+rsize-1), rsize, & + call MPI_Recv(outV(start:start+rsize-1), rsize, & MPI_INTEGER, i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1166,7 +1166,7 @@ subroutine com_write1dInt(inV,lsize,outV,gsize) lsize = ncomsize endif if( lsize .gt. 0) then - call mpi_send(inV, lsize, & + call MPI_Send(inV, lsize, & MPI_INTEGER, io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1205,7 +1205,7 @@ subroutine com_write1dInt8(inV,lsize,outV,gsize) endif else if(rsize .gt. 0 ) then - call mpi_recv(outV(start:start+rsize-1), rsize, & + call MPI_Recv(outV(start:start+rsize-1), rsize, & MPI_INTEGER8, i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif @@ -1217,7 +1217,7 @@ subroutine com_write1dInt8(inV,lsize,outV,gsize) lsize = ncomsize endif if( lsize .gt. 0) then - call mpi_send(inV, lsize, & + call MPI_Send(inV, lsize, & MPI_INTEGER8, io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1259,7 +1259,7 @@ subroutine pack_decomp_int(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(my_id .ne. IO_id) then tag = 72 if(lnsizes(my_id + 1) .gt. 0) then - call mpi_recv(bufid,lnsizes(my_id + 1),& + call MPI_Recv(bufid,lnsizes(my_id + 1),& MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif else @@ -1267,7 +1267,7 @@ subroutine pack_decomp_int(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(i .ne. my_id) then tag = 72 if(lnsizes(i+1) .gt. 0) then - call mpi_send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & + call MPI_Send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & lnsizes(i+1),MPI_INTEGER,i, tag,HYDRO_COMM_WORLD,ierr) endif else @@ -1315,7 +1315,7 @@ subroutine pack_decomp_int8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(my_id .ne. IO_id) then tag = 72 if(lnsizes(my_id + 1) .gt. 0) then - call mpi_recv(bufid,lnsizes(my_id + 1),& + call MPI_Recv(bufid,lnsizes(my_id + 1),& MPI_INTEGER8,io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif else @@ -1323,7 +1323,7 @@ subroutine pack_decomp_int8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(i .ne. my_id) then tag = 72 if(lnsizes(i+1) .gt. 0) then - call mpi_send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & + call MPI_Send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & lnsizes(i+1),MPI_INTEGER8,i, tag,HYDRO_COMM_WORLD,ierr) endif else @@ -1363,7 +1363,7 @@ subroutine pack_decomp_real8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(my_id .ne. IO_id) then tag = 72 if(lnsizes(my_id + 1) .gt. 0) then - call mpi_recv(bufid,lnsizes(my_id + 1),& + call MPI_Recv(bufid,lnsizes(my_id + 1),& MPI_DOUBLE_PRECISION,io_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif else @@ -1371,7 +1371,7 @@ subroutine pack_decomp_real8(g1bufid, ndata, nprocs_map, lnsizes, istart,bufid) if(i .ne. my_id) then tag = 72 if(lnsizes(i+1) .gt. 0) then - call mpi_send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & + call MPI_Send(buf(istart(i+1):istart(i+1)+lnsizes(i+1)-1), & lnsizes(i+1),MPI_DOUBLE_PRECISION,i, tag,HYDRO_COMM_WORLD,ierr) endif else @@ -1415,15 +1415,15 @@ subroutine TONODE2RSL (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) + call MPI_Recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(tmpSize .gt. 0) then allocate(buf(tmpSize)) allocate(tmpInd(tmpSize)) tag = 83 - call mpi_recv(tmpInd, tmpSize , & + call MPI_Recv(tmpInd, tmpSize , & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 84 - call mpi_recv(buf, tmpSize , & + call MPI_Recv(buf, tmpSize , & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, tmpSize if(buf(k) .ne. flag) then @@ -1437,13 +1437,13 @@ subroutine TONODE2RSL (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) + call MPI_Send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) if(size .gt. 0) then tag = 83 - call mpi_send(ind(1:size),size, & + call MPI_Send(ind(1:size),size, & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) tag = 84 - call mpi_send(inVar(1:size),size, & + call MPI_Send(inVar(1:size),size, & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -1480,15 +1480,15 @@ subroutine TONODE2RSL8 (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) + call MPI_Recv(tmpSize,1,MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(tmpSize .gt. 0) then allocate(buf(tmpSize)) allocate(tmpInd(tmpSize)) tag = 83 - call mpi_recv(tmpInd, tmpSize , & + call MPI_Recv(tmpInd, tmpSize , & MPI_INTEGER,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 84 - call mpi_recv(buf, tmpSize , & + call MPI_Recv(buf, tmpSize , & MPI_INTEGER8,i-1,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, tmpSize if(buf(k) .ne. flag) then @@ -1502,13 +1502,13 @@ subroutine TONODE2RSL8 (ind,inVar,size,gNLINKSL,NLINKSL,ioVar,flag) end do else tag = 82 - call mpi_send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) + call MPI_Send(size,1,MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) if(size .gt. 0) then tag = 83 - call mpi_send(ind(1:size),size, & + call MPI_Send(ind(1:size),size, & MPI_INTEGER,io_id,tag,HYDRO_COMM_WORLD,ierr) tag = 84 - call mpi_send(inVar(1:size),size, & + call MPI_Send(inVar(1:size),size, & MPI_INTEGER8,io_id,tag,HYDRO_COMM_WORLD,ierr) endif endif diff --git a/src/MPP/mpp_land.F b/src/MPP/mpp_land.F index 0084a2d16..6447fc30f 100644 --- a/src/MPP/mpp_land.F +++ b/src/MPP/mpp_land.F @@ -34,7 +34,7 @@ MODULE MODULE_MPP_LAND integer, public :: global_nx, global_ny, local_nx,local_ny integer, public :: global_rt_nx, global_rt_ny integer, public :: local_rt_nx,local_rt_ny,rt_AGGFACTRT - integer, public :: numprocs ! total process, get by mpi initialization. + integer, public :: numprocs ! total process, get by MPI initialization. integer :: local_startx, local_starty integer :: local_startx_rt, local_starty_rt, local_endx_rt, local_endy_rt @@ -86,8 +86,8 @@ subroutine LOG_MAP2d() data cyclic/.false.,.false./ ! not cyclic data reorder/.false./ - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) call getNX_NY(numprocs, left_right_np,up_down_np) if(my_id.eq.IO_id) then @@ -131,7 +131,7 @@ subroutine LOG_MAP2d() call MPI_Cart_create(HYDRO_COMM_WORLD, ndim, dims, & cyclic, reorder, cartGridComm, ierr) - call MPI_CART_GET(cartGridComm, 2, dims, cyclic, coords, ierr) + call MPI_Cart_get(cartGridComm, 2, dims, cyclic, coords, ierr) p_up_down = coords(0) p_left_right = coords(1) @@ -154,17 +154,17 @@ subroutine MPP_LAND_INIT(in_global_nx,in_global_ny) global_ny = in_global_ny end if - call mpi_initialized( mpi_inited, ierr ) + call MPI_Initialized( mpi_inited, ierr ) if ( .not. mpi_inited ) then - call MPI_INIT_THREAD( MPI_THREAD_FUNNELED, provided, ierr ) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_INIT failed") - call MPI_COMM_DUP(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_DUP failed") + call MPI_Init_thread( MPI_THREAD_FUNNELED, provided, ierr ) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Init failed") + call MPI_Comm_dup(MPI_COMM_WORLD, HYDRO_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_dup failed") endif - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) - if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_COMM_RANK and/or MPI_COMM_SIZE failed") + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) + if (ierr /= MPI_SUCCESS) call fatal_error_stop("MPI Error: MPI_Comm_rank and/or MPI_Comm_size failed") ! create 2d logical mapping of the CPU. call log_map2d() @@ -247,26 +247,26 @@ subroutine MPP_LAND_LR_COM(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = ny - call mpi_send(in_out_data(nx-1,:),size,MPI_REAL, & + call MPI_Send(in_out_data(nx-1,:),size,MPI_REAL, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = ny - call mpi_recv(in_out_data(1,:),size,MPI_REAL, & + call MPI_Recv(in_out_data(1,:),size,MPI_REAL, & left_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif if(left_id .ge. 0 ) then ! ### send to left second. size = ny tag = 21 - call mpi_send(in_out_data(2,:),size,MPI_REAL, & + call MPI_Send(in_out_data(2,:),size,MPI_REAL, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = ny - call mpi_recv(in_out_data(nx,:),size,MPI_REAL,& + call MPI_Recv(in_out_data(nx,:),size,MPI_REAL,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif @@ -275,13 +275,13 @@ subroutine MPP_LAND_LR_COM(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = 2*ny - call mpi_send(in_out_data(nx-1:nx,:),size,MPI_REAL, & + call MPI_Send(in_out_data(nx-1:nx,:),size,MPI_REAL, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = 2*ny - call mpi_recv(data_r,size,MPI_REAL,left_id,tag, & + call MPI_Recv(data_r,size,MPI_REAL,left_id,tag, & HYDRO_COMM_WORLD,mpp_status,ierr) in_out_data(1,:) = in_out_data(1,:) + data_r(1,:) in_out_data(2,:) = in_out_data(2,:) + data_r(2,:) @@ -290,13 +290,13 @@ subroutine MPP_LAND_LR_COM(in_out_data,NX,NY,flag) if(left_id .ge. 0 ) then ! ### send to left second. size = 2*ny tag = 21 - call mpi_send(in_out_data(1:2,:),size,MPI_REAL, & + call MPI_Send(in_out_data(1:2,:),size,MPI_REAL, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = 2*ny - call mpi_recv(in_out_data(nx-1:nx,:),size,MPI_REAL,& + call MPI_Recv(in_out_data(nx-1:nx,:),size,MPI_REAL,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif endif ! end if black for flag. @@ -315,26 +315,26 @@ subroutine MPP_LAND_LR_COM8(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = ny - call mpi_send(in_out_data(nx-1,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(nx-1,:),size,MPI_DOUBLE_PRECISION, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = ny - call mpi_recv(in_out_data(1,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(1,:),size,MPI_DOUBLE_PRECISION, & left_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif if(left_id .ge. 0 ) then ! ### send to left second. size = ny tag = 21 - call mpi_send(in_out_data(2,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(2,:),size,MPI_DOUBLE_PRECISION, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = ny - call mpi_recv(in_out_data(nx,:),size,MPI_DOUBLE_PRECISION,& + call MPI_Recv(in_out_data(nx,:),size,MPI_DOUBLE_PRECISION,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif @@ -343,13 +343,13 @@ subroutine MPP_LAND_LR_COM8(in_out_data,NX,NY,flag) if(right_id .ge. 0) then ! ### send to right first. tag = 11 size = 2*ny - call mpi_send(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION, & right_id,tag,HYDRO_COMM_WORLD,ierr) end if if(left_id .ge. 0) then ! receive from left tag = 11 size = 2*ny - call mpi_recv(data_r,size,MPI_DOUBLE_PRECISION,left_id,tag, & + call MPI_Recv(data_r,size,MPI_DOUBLE_PRECISION,left_id,tag, & HYDRO_COMM_WORLD,mpp_status,ierr) in_out_data(1,:) = in_out_data(1,:) + data_r(1,:) in_out_data(2,:) = in_out_data(2,:) + data_r(2,:) @@ -358,13 +358,13 @@ subroutine MPP_LAND_LR_COM8(in_out_data,NX,NY,flag) if(left_id .ge. 0 ) then ! ### send to left second. size = 2*ny tag = 21 - call mpi_send(in_out_data(1:2,:),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(1:2,:),size,MPI_DOUBLE_PRECISION, & left_id,tag,HYDRO_COMM_WORLD,ierr) endif if(right_id .ge. 0) then ! receive from right tag = 21 size = 2*ny - call mpi_recv(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION,& + call MPI_Recv(in_out_data(nx-1:nx,:),size,MPI_DOUBLE_PRECISION,& right_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif endif ! end if black for flag. @@ -393,7 +393,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) if(i .ne. my_id) then !block receive from other node. tag = 1 - call mpi_recv(s_r,2,MPI_INTEGER,i, & + call MPI_Recv(s_r,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) local_nx_size(i+1) = s_r(1) local_ny_size(i+1) = s_r(2) @@ -406,7 +406,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) tag = 1 s_r(1) = local_nx s_r(2) = local_ny - call mpi_send(s_r,2,MPI_INTEGER, IO_id, & + call MPI_Send(s_r,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -416,7 +416,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) if(i .ne. my_id) then !block receive from other node. tag = 2 - call mpi_recv(s_r,2,MPI_INTEGER,i, & + call MPI_Recv(s_r,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) local_rt_nx_size(i+1) = s_r(1) local_rt_ny_size(i+1) = s_r(2) @@ -429,7 +429,7 @@ subroutine get_local_size(local_nx, local_ny,rt_nx,rt_ny) tag = 2 s_r(1) = rt_nx s_r(2) = rt_ny - call mpi_send(s_r,2,MPI_INTEGER, IO_id, & + call MPI_Send(s_r,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -450,26 +450,26 @@ subroutine MPP_LAND_UB_COM(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx - call mpi_send(in_out_data(:,ny-1),size,MPI_REAL, & + call MPI_Send(in_out_data(:,ny-1),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx - call mpi_recv(in_out_data(:,1),size,MPI_REAL, & + call MPI_Recv(in_out_data(:,1),size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx - call mpi_send(in_out_data(:,2),size,MPI_REAL, & + call MPI_Send(in_out_data(:,2),size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx - call mpi_recv(in_out_data(:,ny),size,MPI_REAL, & + call MPI_Recv(in_out_data(:,ny),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -478,13 +478,13 @@ subroutine MPP_LAND_UB_COM(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx*2 - call mpi_send(in_out_data(:,ny-1:ny),size,MPI_REAL, & + call MPI_Send(in_out_data(:,ny-1:ny),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx*2 - call mpi_recv(data_r,size,MPI_REAL, & + call MPI_Recv(data_r,size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) in_out_data(:,1) = in_out_data(:,1) + data_r(:,1) in_out_data(:,2) = in_out_data(:,2) + data_r(:,2) @@ -493,13 +493,13 @@ subroutine MPP_LAND_UB_COM(in_out_data,NX,NY,flag) if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx*2 - call mpi_send(in_out_data(:,1:2),size,MPI_REAL, & + call MPI_Send(in_out_data(:,1:2),size,MPI_REAL, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx * 2 - call mpi_recv(in_out_data(:,ny-1:ny),size,MPI_REAL, & + call MPI_Recv(in_out_data(:,ny-1:ny),size,MPI_REAL, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif ! end of block flag @@ -519,26 +519,26 @@ subroutine MPP_LAND_UB_COM8(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx - call mpi_send(in_out_data(:,ny-1),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,ny-1),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx - call mpi_recv(in_out_data(:,1),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(:,1),size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) endif if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx - call mpi_send(in_out_data(:,2),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,2),size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx - call mpi_recv(in_out_data(:,ny),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(:,ny),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif @@ -547,13 +547,13 @@ subroutine MPP_LAND_UB_COM8(in_out_data,NX,NY,flag) if(up_id .ge. 0 ) then ! ### send to up first. tag = 31 size = nx*2 - call mpi_send(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,ierr) endif if(down_id .ge. 0 ) then ! receive from down tag = 31 size = nx*2 - call mpi_recv(data_r,size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(data_r,size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD, mpp_status,ierr) in_out_data(:,1) = in_out_data(:,1) + data_r(:,1) in_out_data(:,2) = in_out_data(:,2) + data_r(:,2) @@ -562,13 +562,13 @@ subroutine MPP_LAND_UB_COM8(in_out_data,NX,NY,flag) if(down_id .ge. 0 ) then ! send down. tag = 41 size = nx*2 - call mpi_send(in_out_data(:,1:2),size,MPI_DOUBLE_PRECISION, & + call MPI_Send(in_out_data(:,1:2),size,MPI_DOUBLE_PRECISION, & down_id,tag,HYDRO_COMM_WORLD,ierr) endif if(up_id .ge. 0 ) then ! receive from upper tag = 41 size = nx * 2 - call mpi_recv(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & + call MPI_Recv(in_out_data(:,ny-1:ny),size,MPI_DOUBLE_PRECISION, & up_id,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif endif ! end of block flag @@ -622,7 +622,7 @@ subroutine calculate_start_p() ! block receive from other node. if(i.ne.my_id) then tag = 1 - call mpi_recv(r_s,2,MPI_INTEGER,i, & + call MPI_Recv(r_s,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) startx(i+1) = r_s(1) starty(i+1) = r_s(2) @@ -630,7 +630,7 @@ subroutine calculate_start_p() end do else tag = 1 - call mpi_send(r_s,2,MPI_INTEGER, IO_id, & + call MPI_Send(r_s,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -695,8 +695,8 @@ subroutine decompose_data_real (in_buff,out_buff) if(my_id .eq. IO_id) then - ! allocate the buffer to hold data as required by mpi_scatterv - ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors) + ! allocate the buffer to hold data as required by MPI_Scatterv + ! be careful with the index range if using array prepared for MPI in fortran (offset_vectors) allocate(send_buff(0: (global_nx*global_ny) -1),stat = ierr) ! for each sub region in the global buffer linearize the data and place it in the @@ -725,15 +725,15 @@ subroutine decompose_data_real (in_buff,out_buff) ! send the to each process size_vector(mpi_rank+1) data elements ! and store the results in out_buff - call mpi_scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & + call MPI_Scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & out_buff, size_vectors(my_id+1), MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) ! remove the send buffer deallocate(send_buff) else - ! other processes only need to make mpi_scatterv call - call mpi_scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & + ! other processes only need to make MPI_Scatterv call + call MPI_Scatterv(send_buff, size_vectors, offset_vectors, MPI_REAL, & out_buff, local_nx*local_ny, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) end if @@ -760,13 +760,13 @@ subroutine decompose_data_int (in_buff,out_buff) else ! send data to the rest process. size = local_nx_size(i+1)*local_ny_size(i+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = local_nx*local_ny - call mpi_recv(out_buff,size,MPI_INTEGER,IO_id, & + call MPI_Recv(out_buff,size,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if return @@ -780,7 +780,7 @@ subroutine write_IO_int(in_buff,out_buff) if(my_id .ne. IO_id) then size = local_nx*local_ny tag = 2 - call mpi_send(in_buff,size,MPI_INTEGER, IO_id, & + call MPI_Send(in_buff,size,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -793,7 +793,7 @@ subroutine write_IO_int(in_buff,out_buff) else size = local_nx_size(i+1)*local_ny_size(i+1) tag = 2 - call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -819,7 +819,7 @@ subroutine write_IO_char_head(in, out, imageHead) if(my_id .ne. IO_id) then lenSize = imageHead(my_id+1)*len(in(1)) !! some times necessary for character arrays? if(lenSize .eq. 0) return - call mpi_send(in,lenSize,MPI_CHARACTER,IO_id,tag,HYDRO_COMM_WORLD,ierr) + call MPI_Send(in,lenSize,MPI_CHARACTER,IO_id,tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs-1 lenSize = imageHead(i+1)*len(in(1)) !! necessary? @@ -833,7 +833,7 @@ subroutine write_IO_char_head(in, out, imageHead) if(i .eq. IO_id) then out(theStart:theEnd) = in(1:imageHead(i+1)) else - call mpi_recv(out(theStart:theEnd),lenSize,& + call MPI_Recv(out(theStart:theEnd),lenSize,& MPI_CHARACTER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -859,7 +859,7 @@ subroutine write_IO_real(in_buff,out_buff) if(my_id .ne. IO_id) then size = local_nx*local_ny tag = 2 - call mpi_send(in_buff,size,MPI_REAL, IO_id, & + call MPI_Send(in_buff,size,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -872,7 +872,7 @@ subroutine write_IO_real(in_buff,out_buff) else size = local_nx_size(i+1)*local_ny_size(i+1) tag = 2 - call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -888,7 +888,7 @@ end subroutine write_IO_real ! if(my_id .ne. IO_id) then ! size = local_rt_nx*local_rt_ny ! tag = 2 -! call mpi_send(in_buff,size,MPI_REAL, IO_id, & +! call MPI_Send(in_buff,size,MPI_REAL, IO_id, & ! tag,HYDRO_COMM_WORLD,ierr) ! else ! do i = 0, numprocs - 1 @@ -903,7 +903,7 @@ end subroutine write_IO_real ! else ! size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1) ! tag = 2 -! call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& +! call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& ! MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) ! end if ! end do @@ -922,14 +922,14 @@ subroutine write_IO_RT_real (in_buff,out_buff) if(my_id .eq. IO_id) then - ! allocate the buffer to hold data as required by mpi_scatterv + ! allocate the buffer to hold data as required by MPI_Scatterv ! (this will be larger than out_buff due to halo cell overlap) - ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors) + ! be careful with the index range if using array prepared for MPI in fortran (offset_vectors) allocate(recv_buff(0: sum(size_vectors_rt) -1),stat = ierr) ! recieve from each process size_vector(mpi_rank+1) data elements ! and store the results in recv_buffer - call mpi_gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & + call MPI_Gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) @@ -953,8 +953,8 @@ subroutine write_IO_RT_real (in_buff,out_buff) deallocate(recv_buff) else - ! other processes only need to make mpi_gatherv call - call mpi_gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_REAL, & + ! other processes only need to make MPI_Gatherv call + call MPI_Gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_REAL, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) end if @@ -972,14 +972,14 @@ subroutine write_IO_RT_int (in_buff,out_buff) if(my_id .eq. IO_id) then - ! allocate the buffer to hold data as required by mpi_scatterv + ! allocate the buffer to hold data as required by MPI_Scatterv ! (this will be larger than out_buff due to halo cell overlap) - ! be careful with the index range if using array prepared for mpi in fortran (offset_vectors) + ! be careful with the index range if using array prepared for MPI in fortran (offset_vectors) allocate(recv_buff(0: sum(size_vectors_rt) -1),stat = ierr) ! recieve from each process size_vector(mpi_rank+1) data elements ! and store the results in recv_buffer - call mpi_gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & + call MPI_Gatherv(in_buff, size_vectors_rt(my_id+1), MPI_REAL, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) @@ -1003,8 +1003,8 @@ subroutine write_IO_RT_int (in_buff,out_buff) deallocate(recv_buff) else - ! other processes only need to make mpi_gatherv call - call mpi_gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_INTEGER, & + ! other processes only need to make MPI_Gatherv call + call MPI_Gatherv(in_buff, local_rt_nx*local_rt_ny, MPI_INTEGER, & recv_buff, size_vectors_rt, offset_vectors_rt, MPI_INTEGER, & IO_id, HYDRO_COMM_WORLD, ierr) end if @@ -1020,7 +1020,7 @@ end subroutine write_IO_RT_int ! if(my_id .ne. IO_id) then ! size = local_rt_nx*local_rt_ny ! tag = 2 -! call mpi_send(in_buff,size,MPI_INTEGER, IO_id, & +! call MPI_Send(in_buff,size,MPI_INTEGER, IO_id, & ! tag,HYDRO_COMM_WORLD,ierr) ! else ! do i = 0, numprocs - 1 @@ -1035,7 +1035,7 @@ end subroutine write_IO_RT_int ! else ! size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1) ! tag = 2 -! call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& +! call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& ! MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) ! end if ! end do @@ -1052,7 +1052,7 @@ subroutine write_IO_RT_int8(in_buff,out_buff) if(my_id .ne. IO_id) then size = local_rt_nx*local_rt_ny tag = 2 - call mpi_send(in_buff,size,MPI_INTEGER8, IO_id, & + call MPI_Send(in_buff,size,MPI_INTEGER8, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 @@ -1067,7 +1067,7 @@ subroutine write_IO_RT_int8(in_buff,out_buff) else size = local_rt_nx_size(i+1)*local_rt_ny_size(i+1) tag = 2 - call mpi_recv(out_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Recv(out_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if end do @@ -1078,7 +1078,7 @@ end subroutine write_IO_RT_int8 subroutine mpp_land_bcast_log1(inout) logical inout integer ierr - call mpi_bcast(inout,1,MPI_LOGICAL, & + call MPI_Bcast(inout,1,MPI_LOGICAL, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_log1 @@ -1088,7 +1088,7 @@ subroutine mpp_land_bcast_int(size,inout) integer size integer inout(size) integer ierr - call mpi_bcast(inout,size,MPI_INTEGER, & + call MPI_Bcast(inout,size,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_int @@ -1097,7 +1097,7 @@ subroutine mpp_land_bcast_int8(size,inout) integer size integer(kind=int64) inout(size) integer ierr - call mpi_bcast(inout,size,MPI_INTEGER8, & + call MPI_Bcast(inout,size,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_int8 @@ -1107,7 +1107,7 @@ subroutine mpp_land_bcast_int8_1d(inout) integer(kind=int64) inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_INTEGER8, & + call MPI_Bcast(inout,len,MPI_INTEGER8, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_int8_1d @@ -1117,7 +1117,7 @@ subroutine mpp_land_bcast_int1d(inout) integer inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_INTEGER, & + call MPI_Bcast(inout,len,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_int1d @@ -1128,14 +1128,14 @@ subroutine mpp_land_bcast_int1d_root(inout, rootId) integer, intent(in) :: rootId integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,len,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_int1d_root subroutine mpp_land_bcast_int1(inout) integer inout integer ierr - call mpi_bcast(inout,1,MPI_INTEGER, & + call MPI_Bcast(inout,1,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_int1 @@ -1144,14 +1144,14 @@ subroutine mpp_land_bcast_int1_root(inout, rootId) integer inout integer ierr integer, intent(in) :: rootId - call mpi_bcast(inout,1,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,1,MPI_INTEGER,rootId,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_int1_root subroutine mpp_land_bcast_logical(inout) logical :: inout integer ierr - call mpi_bcast(inout,1,MPI_LOGICAL, & + call MPI_Bcast(inout,1,MPI_LOGICAL, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_logical @@ -1160,14 +1160,14 @@ subroutine mpp_land_bcast_logical_root(inout, rootId) logical :: inout integer, intent(in) :: rootId integer ierr - call mpi_bcast(inout,1,MPI_LOGICAL,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,1,MPI_LOGICAL,rootId,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_logical_root subroutine mpp_land_bcast_real1(inout) real inout integer ierr - call mpi_bcast(inout,1,MPI_REAL, & + call MPI_Bcast(inout,1,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_real1 @@ -1175,7 +1175,7 @@ end subroutine mpp_land_bcast_real1 subroutine mpp_land_bcast_real1_double(inout) real*8 inout integer ierr - call mpi_bcast(inout,1,MPI_REAL8, & + call MPI_Bcast(inout,1,MPI_REAL8, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_real1_double @@ -1185,7 +1185,7 @@ subroutine mpp_land_bcast_real_1d(inout) real inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_real, & + call MPI_Bcast(inout,len,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_real_1d @@ -1196,7 +1196,7 @@ subroutine mpp_land_bcast_real_1d_root(inout, rootId) integer, intent(in) :: rootId integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_real,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,len,MPI_REAL,rootId,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_real_1d_root @@ -1205,7 +1205,7 @@ subroutine mpp_land_bcast_real8_1d(inout) real*8 inout(:) integer ierr len = size(inout,1) - call mpi_bcast(inout,len,MPI_double, & + call MPI_Bcast(inout,len,MPI_DOUBLE, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_real8_1d @@ -1215,7 +1215,7 @@ subroutine mpp_land_bcast_real(size1,inout) ! real inout(size1) real , dimension(:) :: inout integer ierr, len - call mpi_bcast(inout,size1,MPI_real, & + call MPI_Bcast(inout,size1,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_real @@ -1227,7 +1227,7 @@ subroutine mpp_land_bcast_int2d(inout) length1 = size(inout,1) length2 = size(inout,2) do k = 1, length2 - call mpi_bcast(inout(:,k),length1,MPI_INTEGER, & + call MPI_Bcast(inout(:,k),length1,MPI_INTEGER, & IO_id,HYDRO_COMM_WORLD,ierr) end do return @@ -1240,7 +1240,7 @@ subroutine mpp_land_bcast_real2(inout) length1 = size(inout,1) length2 = size(inout,2) do k = 1, length2 - call mpi_bcast(inout(:,k),length1,MPI_real, & + call MPI_Bcast(inout(:,k),length1,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) end do return @@ -1255,7 +1255,7 @@ subroutine mpp_land_bcast_real3d(inout) length3 = size(inout,3) do k = 1, length3 do j = 1, length2 - call mpi_bcast(inout(:,j,k), length1, MPI_real, & + call MPI_Bcast(inout(:,j,k), length1, MPI_REAL, & IO_id, HYDRO_COMM_WORLD, ierr) end do end do @@ -1266,7 +1266,7 @@ subroutine mpp_land_bcast_rd(size,inout) integer size real*8 inout(size) integer ierr - call mpi_bcast(inout,size,MPI_REAL8, & + call MPI_Bcast(inout,size,MPI_REAL8, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_rd @@ -1275,7 +1275,7 @@ subroutine mpp_land_bcast_char(size,inout) integer size character inout(*) integer ierr - call mpi_bcast(inout,size,MPI_CHARACTER, & + call MPI_Bcast(inout,size,MPI_CHARACTER, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_char @@ -1285,7 +1285,7 @@ subroutine mpp_land_bcast_char_root(size,inout,rootId) character inout(*) integer, intent(in) :: rootId integer ierr - call mpi_bcast(inout,size,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,size,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_char_root @@ -1294,7 +1294,7 @@ subroutine mpp_land_bcast_char1d(inout) integer :: lenSize integer :: ierr lenSize = size(inout,1)*len(inout) - call mpi_bcast(inout,lenSize,MPI_CHARACTER, & + call MPI_Bcast(inout,lenSize,MPI_CHARACTER, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_char1d @@ -1305,7 +1305,7 @@ subroutine mpp_land_bcast_char1d_root(inout,rootId) integer :: lenSize integer :: ierr lenSize = size(inout,1)*len(inout) - call mpi_bcast(inout,lenSize,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) + call MPI_Bcast(inout,lenSize,MPI_CHARACTER,rootId,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_char1d_root @@ -1314,7 +1314,7 @@ subroutine mpp_land_bcast_char1(inout) character(len=*) inout integer ierr len = LEN_TRIM(inout) - call mpi_bcast(inout,len,MPI_CHARACTER, & + call MPI_Bcast(inout,len,MPI_CHARACTER, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine mpp_land_bcast_char1 @@ -1496,13 +1496,13 @@ subroutine decompose_RT_real (in_buff,out_buff,g_nx,g_ny,nx,ny) else ! send data to the rest process. size = (iend-ibegin+1)*(jend-jbegin+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_REAL, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = nx*ny - call mpi_recv(out_buff,size,MPI_REAL,IO_id, & + call MPI_Recv(out_buff,size,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if return @@ -1532,13 +1532,13 @@ subroutine decompose_RT_int (in_buff,out_buff,g_nx,g_ny,nx,ny) else ! send data to the rest process. size = (iend-ibegin+1)*(jend-jbegin+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = nx*ny - call mpi_recv(out_buff,size,MPI_INTEGER,IO_id, & + call MPI_Recv(out_buff,size,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if return @@ -1568,13 +1568,13 @@ subroutine decompose_RT_int8 (in_buff,out_buff,g_nx,g_ny,nx,ny) else ! send data to the rest process. size = (iend-ibegin+1)*(jend-jbegin+1) - call mpi_send(in_buff(ibegin:iend,jbegin:jend),size,& + call MPI_Send(in_buff(ibegin:iend,jbegin:jend),size,& MPI_INTEGER8, i,tag,HYDRO_COMM_WORLD,ierr) end if end do else size = nx*ny - call mpi_recv(out_buff,size,MPI_INTEGER8,IO_id, & + call MPI_Recv(out_buff,size,MPI_INTEGER8,IO_id, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) end if return @@ -1627,8 +1627,8 @@ subroutine wrf_LAND_set_INIT(info,total_pe,AGGFACTRT) integer :: ierr, status integer i - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) - call MPI_COMM_SIZE( HYDRO_COMM_WORLD, numprocs, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_size( HYDRO_COMM_WORLD, numprocs, ierr ) if(numprocs .ne. total_pe) then write(6,*) "FATAL ERROR: In wrf_LAND_set_INIT() - numprocs .ne. total_pe ",numprocs, total_pe @@ -1692,7 +1692,7 @@ end subroutine wrf_LAND_set_INIT subroutine getMy_global_id() integer ierr - call MPI_COMM_RANK( HYDRO_COMM_WORLD, my_id, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, my_id, ierr ) return end subroutine getMy_global_id @@ -1919,14 +1919,14 @@ subroutine mpp_land_max_int1(v) if(i .ne. my_id) then !block receive from other node. tag = 101 - call mpi_recv(r1,1,MPI_INTEGER,i, & + call MPI_Recv(r1,1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(max <= r1) max = r1 end if end do else tag = 101 - call mpi_send(v,1,MPI_INTEGER, IO_id, & + call MPI_Send(v,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if call mpp_land_bcast_int1(max) @@ -1944,14 +1944,14 @@ subroutine mpp_land_max_real1(v) if(i .ne. my_id) then !block receive from other node. tag = 101 - call mpi_recv(r1,1,MPI_REAL,i, & + call MPI_Recv(r1,1,MPI_REAL,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(max <= r1) max = r1 end if end do else tag = 101 - call mpi_send(v,1,MPI_REAL, IO_id, & + call MPI_Send(v,1,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if call mpp_land_bcast_real1(max) @@ -1968,14 +1968,14 @@ subroutine mpp_same_int1(v) if(i .ne. my_id) then !block receive from other node. tag = 109 - call mpi_recv(r1,1,MPI_INTEGER,i, & + call MPI_Recv(r1,1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(v .ne. r1) v = -99 end if end do else tag = 109 - call mpi_send(v,1,MPI_INTEGER, IO_id, & + call MPI_Send(v,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if call mpp_land_bcast_int1(v) @@ -2014,11 +2014,11 @@ subroutine write_chanel_real(v,map_l2g,gnlinks,nlinks,g_v) !block receive from other node. tag = 109 - call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 119 - call mpi_recv(tmp_v(1:message_len),message_len,MPI_REAL,i, & + call MPI_Recv(tmp_v(1:message_len),message_len,MPI_REAL,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,message_len @@ -2047,10 +2047,10 @@ subroutine write_chanel_real(v,map_l2g,gnlinks,nlinks,g_v) end do else tag = 109 - call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(map_l2g,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 119 - call mpi_send(v,nlinks,MPI_REAL,IO_id, & + call MPI_Send(v,nlinks,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -2088,11 +2088,11 @@ subroutine write_chanel_int(v,map_l2g,gnlinks,nlinks,g_v) !block receive from other node. tag = 109 - call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 119 - call mpi_recv(tmp_v(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_v(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,message_len @@ -2121,10 +2121,10 @@ subroutine write_chanel_int(v,map_l2g,gnlinks,nlinks,g_v) end do else tag = 109 - call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(map_l2g,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 119 - call mpi_send(v,nlinks,MPI_INTEGER,IO_id, & + call MPI_Send(v,nlinks,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if if(allocated(tmp_map)) deallocate(tmp_map) @@ -2162,10 +2162,10 @@ subroutine write_chanel_int8(v,map_l2g,gnlinks,nlinks,g_v) !block receive from other node. tag = 109 - call mpi_recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & + call MPI_Recv(tmp_map(1:message_len),message_len,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 119 - call mpi_recv(tmp_v(1:message_len),message_len,MPI_INTEGER8,i, & + call MPI_Recv(tmp_v(1:message_len),message_len,MPI_INTEGER8,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,message_len @@ -2194,10 +2194,10 @@ subroutine write_chanel_int8(v,map_l2g,gnlinks,nlinks,g_v) end do else tag = 109 - call mpi_send(map_l2g,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(map_l2g,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 119 - call mpi_send(v,nlinks,MPI_INTEGER8,IO_id, & + call MPI_Send(v,nlinks,MPI_INTEGER8,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if if(allocated(tmp_map)) deallocate(tmp_map) @@ -2218,10 +2218,10 @@ subroutine write_lake_real(v,nodelist_in,nlakes) if(i .ne. my_id) then !block receive from other node. tag = 129 - call mpi_recv(nodelist,nlakes,MPI_INTEGER,i, & + call MPI_Recv(nodelist,nlakes,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 139 - call mpi_recv(recv(:),nlakes,MPI_REAL,i, & + call MPI_Recv(recv(:),nlakes,MPI_REAL,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,nlakes @@ -2234,10 +2234,10 @@ subroutine write_lake_real(v,nodelist_in,nlakes) end do else tag = 129 - call mpi_send(nodelist,nlakes,MPI_INTEGER, IO_id, & + call MPI_Send(nodelist,nlakes,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 139 - call mpi_send(v,nlakes,MPI_REAL,IO_id, & + call MPI_Send(v,nlakes,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if end subroutine write_lake_real @@ -2258,10 +2258,10 @@ subroutine write_lake_char(v,nodelist_in,nlakes) if(i .ne. my_id) then !block receive from other node. tag = 129 - call mpi_recv(nodelist,nlakes,MPI_INTEGER,i, & + call MPI_Recv(nodelist,nlakes,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 139 - call mpi_recv(recv(:),in_len,MPI_CHARACTER,i, & + call MPI_Recv(recv(:),in_len,MPI_CHARACTER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1,nlakes @@ -2274,10 +2274,10 @@ subroutine write_lake_char(v,nodelist_in,nlakes) end do else tag = 129 - call mpi_send(nodelist,nlakes,MPI_INTEGER, IO_id, & + call MPI_Send(nodelist,nlakes,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 139 - call mpi_send(v,in_len,MPI_CHARACTER,IO_id, & + call MPI_Send(v,in_len,MPI_CHARACTER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if end subroutine write_lake_char @@ -2294,7 +2294,7 @@ subroutine read_rst_crt_r(unit,out,size) 99 continue call mpp_land_bcast_int1(ierr2) if(ierr2 .ne. 0) return - call mpi_bcast(out,size,MPI_REAL, & + call MPI_Bcast(out,size,MPI_REAL, & IO_id,HYDRO_COMM_WORLD,ierr) return end subroutine read_rst_crt_r @@ -2317,13 +2317,13 @@ subroutine sum_int1d(vin,nsize) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) vin(:) = vin(:) + recv(:) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vin) @@ -2339,7 +2339,7 @@ subroutine combine_int1d(vin,nsize, flag) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(recv(k) .ne. flag) then @@ -2349,7 +2349,7 @@ subroutine combine_int1d(vin,nsize, flag) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vin) @@ -2365,7 +2365,7 @@ subroutine combine_int8_1d(vin,nsize, flag) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER8,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER8,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(recv(k) .ne. flag) then @@ -2375,7 +2375,7 @@ subroutine combine_int8_1d(vin,nsize, flag) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER8,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER8,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int8_1d(vin) @@ -2401,14 +2401,14 @@ subroutine sum_real8(vin,nsize) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_DOUBLE_PRECISION,i, & + call MPI_Recv(recv,nsize,MPI_DOUBLE_PRECISION,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) vin(:) = vin(:) + recv(:) endif end do v = vin else - call mpi_send(vin,nsize,MPI_DOUBLE_PRECISION,IO_id, & + call MPI_Send(vin,nsize,MPI_DOUBLE_PRECISION,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_real(nsize,v) @@ -2422,10 +2422,10 @@ end subroutine sum_real8 ! ! if ( my_id .eq. IO_id ) then ! g_ix = ix -! call mpi_reduce( MPI_IN_PLACE, g_ix, 4, MPI_INTEGER, & +! call MPI_Reduce( MPI_IN_PLACE, g_ix, 4, MPI_INTEGER, & ! MPI_SUM, 0, HYDRO_COMM_WORLD, ierr ) ! else -! call mpi_reduce( ix, 0, 4, MPI_INTEGER, & +! call MPI_Reduce( ix, 0, 4, MPI_INTEGER, & ! MPI_SUM, 0, HYDRO_COMM_WORLD, ierr ) ! endif ! call mpp_land_bcast_int1(g_ix) @@ -2456,24 +2456,24 @@ subroutine gather_1d_real_tmp(vl,s_in,e_in,vg,sg) if(i .ne. my_id) then !block receive from other node. tag = 202 - call mpi_recv(index_s,2,MPI_INTEGER,i, & + call MPI_Recv(index_s,2,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 203 e = index_s(2) s = index_s(1) size = e - s + 1 - call mpi_recv(vg(s:e),size,MPI_REAL, & + call MPI_Recv(vg(s:e),size,MPI_REAL, & i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) endif end do else tag = 202 - call mpi_send(index_s,2,MPI_INTEGER, IO_id, & + call MPI_Send(index_s,2,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 203 - call mpi_send(vl,size,MPI_REAL,IO_id, & + call MPI_Send(vl,size,MPI_REAL,IO_id, & tag,HYDRO_COMM_WORLD,ierr) end if @@ -2485,7 +2485,7 @@ subroutine sum_real1(inout) real:: inout, send integer :: ierr send = inout - CALL MPI_ALLREDUCE(send,inout,1,MPI_REAL,MPI_SUM,HYDRO_COMM_WORLD,ierr) + call MPI_Allreduce(send,inout,1,MPI_REAL,MPI_SUM,HYDRO_COMM_WORLD,ierr) end subroutine sum_real1 subroutine sum_double(inout) @@ -2493,8 +2493,8 @@ subroutine sum_double(inout) real*8:: inout, send integer :: ierr send = inout - !yw CALL MPI_ALLREDUCE(send,inout,1,MPI_DOUBLE,MPI_SUM,HYDRO_COMM_WORLD,ierr) - CALL MPI_ALLREDUCE(send,inout,1,MPI_DOUBLE_PRECISION,MPI_SUM,HYDRO_COMM_WORLD,ierr) + !yw call MPI_Allreduce(send,inout,1,MPI_DOUBLE,MPI_SUM,HYDRO_COMM_WORLD,ierr) + call MPI_Allreduce(send,inout,1,MPI_DOUBLE_PRECISION,MPI_SUM,HYDRO_COMM_WORLD,ierr) end subroutine sum_double subroutine mpp_chrt_nlinks_collect(nlinks) @@ -2508,14 +2508,14 @@ subroutine mpp_chrt_nlinks_collect(nlinks) if(my_id .eq. IO_id) then do i = 0,numprocs -1 if(i .ne. my_id) then - call mpi_recv(mpp_nlinks(i+1),1,MPI_INTEGER,i, & + call MPI_Recv(mpp_nlinks(i+1),1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) else mpp_nlinks(i+1) = 0 end if end do else - call mpi_send(nlinks,1,MPI_INTEGER, IO_id, & + call MPI_Send(nlinks,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif @@ -2589,13 +2589,13 @@ subroutine mpp_collect_1d_int(nlinks,vinout) if(my_id .eq. IO_id) then do i = 0,numprocs -1 if(i .ne. my_id) then - call mpi_recv(buf,nlinks,MPI_INTEGER,i, & + call MPI_Recv(buf,nlinks,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) vinout = vinout + buf end if end do else - call mpi_send(vinout,nlinks,MPI_INTEGER, IO_id, & + call MPI_Send(vinout,nlinks,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vinout) @@ -2618,11 +2618,11 @@ subroutine mpp_collect_1d_int_mem(nlinks,vinout) do i = 0,numprocs -1 if(i .ne. my_id) then tag = 120 - call mpi_recv(lsize,1,MPI_INTEGER,i, & + call MPI_Recv(lsize,1,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) if(lsize .gt. 0) then tag = 121 - call mpi_recv(tmpBuf(1:lsize),lsize,MPI_INTEGER,i, & + call MPI_Recv(tmpBuf(1:lsize),lsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, lsize m = tmpBuf(k) @@ -2641,11 +2641,11 @@ subroutine mpp_collect_1d_int_mem(nlinks,vinout) end if end do tag = 120 - call mpi_send(lsize,1,MPI_INTEGER, IO_id, & + call MPI_Send(lsize,1,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) if(lsize .gt. 0) then tag = 121 - call mpi_send(tmpIn(1:lsize),lsize,MPI_INTEGER, IO_id, & + call MPI_Send(tmpIn(1:lsize),lsize,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif endif @@ -2664,12 +2664,12 @@ subroutine updateLake_seqInt(in,nsize,in0) tag = 29 if(my_id .ne. IO_id) then - call mpi_send(in,nsize,MPI_INTEGER, IO_id, & + call MPI_Send(in,nsize,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then - call mpi_recv(tmp,nsize,& + call MPI_Recv(tmp,nsize,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(in0(k) .ne. tmp(k)) in(k) = tmp(k) @@ -2692,12 +2692,12 @@ subroutine updateLake_seqInt8(in,nsize,in0) tag = 29 if(my_id .ne. IO_id) then - call mpi_send(in,nsize,MPI_INTEGER8, IO_id, & + call MPI_Send(in,nsize,MPI_INTEGER8, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then - call mpi_recv(tmp,nsize,& + call MPI_Recv(tmp,nsize,& MPI_INTEGER8,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(in0(k) .ne. tmp(k)) in(k) = tmp(k) @@ -2729,7 +2729,7 @@ subroutine updateLake_seq(in,nsize,in0) allocate(prev(nsize)) if (my_id == IO_id) prev = in0 - call mpi_bcast(prev, nsize, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Bcast(prev, nsize, MPI_REAL, IO_id, HYDRO_COMM_WORLD, ierr) if (my_id == IO_id) then adjustment = in @@ -2737,7 +2737,7 @@ subroutine updateLake_seq(in,nsize,in0) adjustment = in - prev end if - call mpi_allreduce(adjustment, in, nsize, MPI_REAL, MPI_SUM, HYDRO_COMM_WORLD, ierr) ! TODO: check ierr! + call MPI_Allreduce(adjustment, in, nsize, MPI_REAL, MPI_SUM, HYDRO_COMM_WORLD, ierr) ! TODO: check ierr! deallocate(adjustment) deallocate(prev) @@ -2758,12 +2758,12 @@ subroutine updateLake_seq_char(in,nsize,in0) tag = 29 if(my_id .ne. IO_id) then - call mpi_send(in,in_len,MPI_CHARACTER, IO_id, & + call MPI_Send(in,in_len,MPI_CHARACTER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then - call mpi_recv(tmp,in_len,& + call MPI_Recv(tmp,in_len,& MPI_CHARACTER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(in0(k) .ne. tmp(k)) in(k) = tmp(k) @@ -2787,19 +2787,19 @@ subroutine updateLake_grid(in,nsize,lake_index) if(my_id .ne. IO_id) then tag = 29 - call mpi_send(in,nsize,MPI_REAL, IO_id, & + call MPI_Send(in,nsize,MPI_REAL, IO_id, & tag,HYDRO_COMM_WORLD,ierr) tag = 30 - call mpi_send(lake_index,nsize,MPI_INTEGER, IO_id, & + call MPI_Send(lake_index,nsize,MPI_INTEGER, IO_id, & tag,HYDRO_COMM_WORLD,ierr) else do i = 0, numprocs - 1 if(i .ne. IO_id) then tag = 29 - call mpi_recv(tmp,nsize,& + call MPI_Recv(tmp,nsize,& MPI_REAL,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) tag = 30 - call mpi_recv(lake_index,nsize,& + call MPI_Recv(lake_index,nsize,& MPI_INTEGER,i,tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(lake_index(k) .gt. 0) in(k) = tmp(k) @@ -2824,7 +2824,7 @@ subroutine match1dLake(vin,nsize,flag) if(my_id .eq. IO_id) then do i = 0, numprocs - 1 if(i .ne. my_id) then - call mpi_recv(recv,nsize,MPI_INTEGER,i, & + call MPI_Recv(recv,nsize,MPI_INTEGER,i, & tag,HYDRO_COMM_WORLD,mpp_status,ierr) do k = 1, nsize if(recv(k) .eq. flag) vin(k) = flag @@ -2839,7 +2839,7 @@ subroutine match1dLake(vin,nsize,flag) endif end do else - call mpi_send(vin,nsize,MPI_INTEGER,IO_id, & + call MPI_Send(vin,nsize,MPI_INTEGER,IO_id, & tag,HYDRO_COMM_WORLD,ierr) endif call mpp_land_bcast_int1d(vin) @@ -2849,13 +2849,13 @@ end subroutine match1dLake subroutine mpp_land_abort() implicit none integer ierr - CALL MPI_ABORT(HYDRO_COMM_WORLD,1,IERR) + call MPI_Abort(HYDRO_COMM_WORLD,1,ierr) end subroutine mpp_land_abort ! mpp_land_abort subroutine mpp_land_sync() implicit none integer ierr - call MPI_barrier( HYDRO_COMM_WORLD ,ierr) + call MPI_Barrier( HYDRO_COMM_WORLD ,ierr) if(ierr .ne. 0) call mpp_land_abort() return end subroutine mpp_land_sync ! mpp_land_sync @@ -2867,10 +2867,10 @@ subroutine mpp_comm_scalar_real(scalar, fromImage, toImage) integer:: ierr, tag tag=2 if(my_id .eq. fromImage) & - call mpi_send(scalar, 1, MPI_REAL, & + call MPI_Send(scalar, 1, MPI_REAL, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(scalar, 1, MPI_REAL, & + call MPI_Recv(scalar, 1, MPI_REAL, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) end subroutine mpp_comm_scalar_real @@ -2882,10 +2882,10 @@ subroutine mpp_comm_scalar_char(scalar, fromImage, toImage) tag=2 length=len(scalar) if(my_id .eq. fromImage) & - call mpi_send(scalar, length, MPI_CHARACTER, & + call MPI_Send(scalar, length, MPI_CHARACTER, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(scalar, length, MPI_CHARACTER, & + call MPI_Recv(scalar, length, MPI_CHARACTER, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) end subroutine mpp_comm_scalar_char @@ -2897,14 +2897,14 @@ subroutine mpp_comm_1d_real(vector, fromImage, toImage) integer:: ierr, tag integer:: my_id, numprocs tag=2 - call MPI_COMM_RANK(HYDRO_COMM_WORLD,my_id,ierr) - call MPI_COMM_SIZE(HYDRO_COMM_WORLD,numprocs,ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD,my_id,ierr) + call MPI_Comm_size(HYDRO_COMM_WORLD,numprocs,ierr) if(numprocs > 1) then if(my_id .eq. fromImage) & - call mpi_send(vector, size(vector), MPI_REAL, & + call MPI_Send(vector, size(vector), MPI_REAL, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(vector, size(vector), MPI_REAL, & + call MPI_Recv(vector, size(vector), MPI_REAL, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) endif end subroutine mpp_comm_1d_real @@ -2917,15 +2917,15 @@ subroutine mpp_comm_1d_char(vector, fromImage, toImage) integer:: ierr, tag, totalLength integer:: my_id,numprocs tag=2 - call MPI_COMM_RANK(HYDRO_COMM_WORLD,my_id,ierr) - call MPI_COMM_SIZE(HYDRO_COMM_WORLD,numprocs,ierr) + call MPI_Comm_rank(HYDRO_COMM_WORLD,my_id,ierr) + call MPI_Comm_size(HYDRO_COMM_WORLD,numprocs,ierr) totalLength=len(vector(1))*size(vector,1) if(numprocs > 1) then if(my_id .eq. fromImage) & - call mpi_send(vector, totalLength, MPI_CHARACTER, & + call MPI_Send(vector, totalLength, MPI_CHARACTER, & toImage, tag, HYDRO_COMM_WORLD, ierr) if(my_id .eq. toImage) & - call mpi_recv(vector, totalLength, MPI_CHARACTER, & + call MPI_Recv(vector, totalLength, MPI_CHARACTER, & fromImage, tag, HYDRO_COMM_WORLD, mpp_status, ierr) endif end subroutine mpp_comm_1d_char diff --git a/src/Routing/module_NWM_io.F b/src/Routing/module_NWM_io.F index efaa6c74a..43e7da435 100644 --- a/src/Routing/module_NWM_io.F +++ b/src/Routing/module_NWM_io.F @@ -172,7 +172,7 @@ subroutine output_chrt_NWM(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -1144,7 +1144,7 @@ subroutine output_NoahMP_NWM(outDir,iGrid,output_timestep,itime,startdate,date,i ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -1857,7 +1857,7 @@ subroutine output_rt_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -2433,7 +2433,7 @@ subroutine output_lakes_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -3125,7 +3125,7 @@ subroutine output_chrtout_grd_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -3613,7 +3613,7 @@ subroutine output_lsmOut_NWM(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -4067,7 +4067,7 @@ subroutine output_frxstPts(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -4375,7 +4375,7 @@ subroutine output_chanObs_NWM(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -5090,7 +5090,7 @@ subroutine output_gw_NWM(domainId,iGrid) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else diff --git a/src/Routing/module_gw_gw2d.F b/src/Routing/module_gw_gw2d.F index 7d663e723..5243b7a36 100644 --- a/src/Routing/module_gw_gw2d.F +++ b/src/Routing/module_gw_gw2d.F @@ -822,12 +822,12 @@ subroutine gwstep(ix, jx, dx, & #ifdef MPP_LAND -call mpi_reduce(delcur, mpiDelcur, 1, MPI_REAL, MPI_SUM, 0, HYDRO_COMM_WORLD, ierr) -call MPI_COMM_SIZE( HYDRO_COMM_WORLD, mpiSize, ierr ) +call MPI_Reduce(delcur, mpiDelcur, 1, MPI_REAL, MPI_SUM, 0, HYDRO_COMM_WORLD, ierr) +call MPI_Comm_size( HYDRO_COMM_WORLD, mpiSize, ierr ) if(my_id .eq. IO_id) delcur = mpiDelcur/mpiSize -call mpi_bcast(delcur, 1, mpi_real, 0, HYDRO_COMM_WORLD, ierr) +call MPI_Bcast(delcur, 1, MPI_REAL, 0, HYDRO_COMM_WORLD, ierr) #endif @@ -907,10 +907,10 @@ subroutine gwstep(ix, jx, dx, & #ifdef HYDRO_D #ifdef MPP_LAND - call MPI_REDUCE(dtot,gdtot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) - call MPI_REDUCE(dtoa,gdtoa,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) - call MPI_REDUCE(eocn,geocn,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) - call MPI_REDUCE(ebot,gebot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(dtot,gdtot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(dtoa,gdtoa,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(eocn,geocn,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) + call MPI_Reduce(ebot,gebot,1, MPI_REAL, MPI_SUM, IO_id, HYDRO_COMM_WORLD, ierr) if(my_id .eq. IO_id) then write (*,900) & @@ -1262,11 +1262,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (ZSPS,j)th equations. ! ! Receive (ZSPS+1,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1294,9 +1294,9 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & #endif ! ! Receive (0,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1328,11 +1328,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (ZSPS,j)th equations. ! ! Receive (ZSPS+1,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() call add_dt(ct,tf,ti,dt) @@ -1362,8 +1362,8 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & call add_dt(ct,tf,ti,dt) #endif - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) do 60 j = 1, XSPS ! Backward elimination in (0,j)th equations. @@ -1375,7 +1375,7 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & 70 continue 60 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else if (z_pid .lt. ZDNS) then @@ -1385,9 +1385,9 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & #endif ! ! Receive (ZSPS+1,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1420,11 +1420,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (1,j)th equations. ! ! Receive (0,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1450,8 +1450,8 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & #endif ! ! Send (ZSPS,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, 1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) #ifdef TIMING tf = click() @@ -1468,7 +1468,7 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & 110 continue 100 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else @@ -1484,11 +1484,11 @@ subroutine parysolv1(c,b,r,ct,pid,z_pid, & ! ! Send (1,j)th equations. ! ! Receive (0,j)th equations. - call mpi_cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) - call MPI_ISEND(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, rowshift, -1, source, dest, ierr) + call MPI_Isend(zntmp, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( zn, cnt, MPI_REAL, dest, ZN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1574,11 +1574,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & ! ! Send (i,XSPS)th equations. ! ! Receive (i,(XSPS + 1))th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1609,9 +1609,9 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Receive (i,0)th equations. - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1642,11 +1642,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & ! ! Send (i,XSPS)th equations. ! ! Receive (i,(XSPS + 1))th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() call add_dt(ct,tf,ti,dt) @@ -1675,8 +1675,8 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & tf = click() call add_dt(ct,tf,ti,dt) #endif - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) do 60 i = 1, ZSPS ! Backward elimination in (i,0)th equations. @@ -1690,7 +1690,7 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & r(i,j) = r(i,j) - b(i,j)*r(i,XSPS) - c(i,j)*r(i,1) 70 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else if (x_pid .lt. XDNS) then @@ -1700,9 +1700,9 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Receive (i,XSPS+1)th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1732,11 +1732,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Send (i,1)th equations. ! ! Receive (i,0)th equations. - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() @@ -1762,8 +1762,8 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & #endif ! ! Send (i,XSPS)th equations. - call mpi_cart_shift(cartGridComm, colshift, 1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Cart_shift(cartGridComm, colshift, 1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) #ifdef TIMING tf = click() call add_dt(ct,tf,ti,dt) @@ -1781,7 +1781,7 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & r(i,j) = r(i,j) - c(i,j)*r(i,1) - b(i,j)*r(i,XSPS) 110 continue - call mpi_wait(sendReq, mpp_status, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) else @@ -1798,11 +1798,11 @@ subroutine parxsolv1(c,b,r,ct,pid,x_pid, & ! ! Send (i,1)th equations. ! ! Receive (i,0)th equations. - call mpi_cart_shift(cartGridComm, colshift, -1, source, dest, ierr) - call MPI_ISEND(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) - call MPI_IRECV( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) - call mpi_wait(sendReq, mpp_status, ierr) - call mpi_wait(recvReq, mpp_status, ierr) + call MPI_Cart_shift(cartGridComm, colshift, -1, source, dest, ierr) + call MPI_Isend(xntmp, cnt, MPI_REAL, dest, XN_REC, cartGridComm, sendReq, ierr) + call MPI_Irecv( xn, cnt, MPI_REAL, dest, XN_REC, cartGridComm, recvReq, ierr) + call MPI_Wait(sendReq, mpp_status, ierr) + call MPI_Wait(recvReq, mpp_status, ierr) #ifdef TIMING tf = click() diff --git a/src/Routing/module_reservoir_routing.F b/src/Routing/module_reservoir_routing.F index b2b20b459..bc157b2ba 100644 --- a/src/Routing/module_reservoir_routing.F +++ b/src/Routing/module_reservoir_routing.F @@ -1,5 +1,5 @@ ! Intended purpose is to provide a module for all subroutines related to -! reservoir routing, including active management, level pool, and integrating live +! reservoir routing, including active management, level pool, and integrating live ! data feeds. As of NWMv2.0, this module stub can read in a timeslice file ! to incorporate data from external sources, should a data service become available. @@ -83,7 +83,7 @@ subroutine read_reservoir_obs(domainId) ! If not MPI, then default to 0, which is the I/O ID. if(mppFlag .eq. 1) then #ifdef MPP_LAND - call MPI_COMM_RANK( HYDRO_COMM_WORLD, myId, ierr ) + call MPI_Comm_rank( HYDRO_COMM_WORLD, myId, ierr ) call nwmCheck(diagFlag,ierr,'ERROR: Unable to determine MPI process ID.') #endif else @@ -92,7 +92,7 @@ subroutine read_reservoir_obs(domainId) ! Open up and read in the NetCDF file containing disharge data. if(myId .eq. 0) then - ! Initialize our missing flag to 0. If at any point we don't find a file, + ! Initialize our missing flag to 0. If at any point we don't find a file, ! the flag value will go to 1 to indicate no files were found. missingFlag = 0 diff --git a/src/utils/module_hydro_stop.F b/src/utils/module_hydro_stop.F index 724d61dce..47fe9c182 100644 --- a/src/utils/module_hydro_stop.F +++ b/src/utils/module_hydro_stop.F @@ -35,7 +35,7 @@ subroutine HYDRO_stop(msg) ! call flush(my_id+90) call mpp_land_abort() - call MPI_finalize(ierr) + call MPI_Finalize(ierr) #else stop "FATAL ERROR: Program stopped. Recompile with environment variable HYDRO_D set to 1 for enhanced debug information." #endif