-
Notifications
You must be signed in to change notification settings - Fork 5
/
no_mpi.F90
188 lines (133 loc) · 4.84 KB
/
no_mpi.F90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
!>
!! \brief This module contains data and routines for MPI parallelization
!!
!! Module for C2Ray / Capreole (3D)
!!
!! \b Author: Garrelt Mellema
!!
!! \b Date: 2010-03-04
!!
!! \b Version: This is a dummy module for systems where there is no MPI for F90.
!!
!! This module is also accepted by the F compiler (Dec 9, 2003)\n
module my_mpi
use file_admin, only: logf, results_dir
#ifdef XLF
USE XLFUTILITY, only: hostnm => hostnm_ , flush => flush_
#endif
#ifdef IFORT
USE IFPORT, only: hostnm, flush
#endif
#ifdef MY_OPENMP
USE OMP_LIB, only: omp_get_num_threads, omp_get_thread_num
#endif
implicit none
integer,parameter,public :: NPDIM=3 !< dimension of problem
! All of these are set to be consistent with the MPI version
integer,public :: rank !< rank of the processor
integer,public :: npr !<< number of processors
integer,public :: nthreads !< number of threads (per processor)
integer,public :: MPI_COMM_NEW !< the (new) communicator (dummy)
integer,dimension(NPDIM),public :: dims !< number of processors in each dimension (dummy)
integer,dimension(NPDIM),public :: grid_struct !< coordinates of the processors in the grid (dummy)
integer,public :: nbrleft,nbrright !< left and right neighbours
integer,public :: nbrdown,nbrup !< up and down neighbours
integer,public :: nbrabove,nbrbelow !< above and below neighbours
integer,parameter,public :: MPI_PROC_NULL=-1
#ifdef SUN
integer :: hostnm
#endif
public :: mpi_setup,mpi_end
private :: mpi_basic,mpi_topology,fnd2dnbrs
contains
!----------------------------------------------------------------------------
!> Sets up MPI, this routine is normally the one called.\n
!! It opens log files, reports on machine name, and calls
!! mpi_basic and mpi_topology to set up the MPI communicator.
subroutine mpi_setup ( )
character(len=512) :: filename ! name of the log file
integer :: ierror
#ifdef MY_OPENMP
integer :: tn
#endif
character(len=100) :: hostname
call mpi_basic ()
! Open processor dependent log file
if (logf /= 6) then
filename=trim(adjustl(trim(adjustl(results_dir))//"C2Ray.log"))
open(unit=logf,file=filename,status="unknown",action="write", &
position="append")
endif
write(unit=logf,fmt="(A)") "Log file for C2-Ray run"
nthreads=1
! Figure out hostname
! NOTE: compiler dependent!!!
ierror=hostnm(hostname)
if (ierror == 0) then
write(logf,*) "Running on processor named ",hostname
else
write(logf,*) "Error establishing identity of processor."
endif
! Report number of OpenMP threads
!$omp parallel default(shared)
#ifdef MY_OPENMP
nthreads=omp_get_num_threads()
#endif
!$omp end parallel
#ifdef MY_OPENMP
write(logf,*) ' The code was compiled for OpenMP'
write(logf,*) ' Number of OpenMP threads used is ',nthreads
#endif
! Let OpenMP threads report
!$omp parallel default(shared) private(tn)
#ifdef MY_OPENMP
tn=omp_get_thread_num()+1
write(logf,*) 'Thread number ',tn,' reporting'
#endif
!$omp end parallel
flush(logf)
call mpi_topology ()
end subroutine mpi_setup
!----------------------------------------------------------------------------
!> Sets up basic MPI. Here it just sets the rank and npr variables
subroutine mpi_basic ( )
rank=0 ! Find processor rank
! Find total number of processors (npr)
npr=1
end subroutine mpi_basic
!----------------------------------------------------------------------------
!> Creates a new topology (for domain decomposition). Here (no MPI) it just
!! defines the communicator as 0.
subroutine mpi_topology ( )
! Make a new topology
dims=1
! makes MPI_COMM_NEW
MPI_COMM_NEW=0
! makes grid_struct
grid_struct=0
! Find the neighbours.
! My neighbors are now +/- 1 with my rank. Handle the case of the
! boundaries by using MPI_PROC_NULL.
call fnd2dnbrs ( )
end subroutine mpi_topology
!----------------------------------------------------------------------------
!> Ends MPI. Here it just closes the log file.
subroutine mpi_end ( )
! Close log file
close(logf)
end subroutine mpi_end
!----------------------------------------------------------------------------
!> This routine finds the neighbouring processors in a 3-d decomposition of
!! the grid. Here these are just set to zero.
subroutine fnd2dnbrs ( )
! This routine determines the neighbours in a 3-d decomposition of
! the grid. This assumes that MPI_Cart_create has already been called
! Single processor version
nbrleft=MPI_PROC_NULL
nbrright=MPI_PROC_NULL
nbrup=MPI_PROC_NULL
nbrdown=MPI_PROC_NULL
nbrabove=MPI_PROC_NULL
nbrbelow=MPI_PROC_NULL
end subroutine fnd2dnbrs
end module my_mpi