Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
!=== COPYRIGHT AND LICENSE STATEMENT ===
!
! This file is part of the TensorProductMultigrid code.
!
! (c) The copyright relating to this work is owned jointly by the
! Crown, Met Office and NERC [2014]. However, it has been created
! with the help of the GungHo Consortium, whose members are identified
! at https://puma.nerc.ac.uk/trac/GungHo/wiki .
!
! Main Developer: Eike Mueller
!
! TensorProductMultigrid is free software: you can redistribute it and/or
! modify it under the terms of the GNU Lesser General Public License as
! published by the Free Software Foundation, either version 3 of the
! License, or (at your option) any later version.
!
! TensorProductMultigrid is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU Lesser General Public License for more details.
!
! You should have received a copy of the GNU Lesser General Public License
! along with TensorProductMultigrid (see files COPYING and COPYING.LESSER).
! If not, see <http://www.gnu.org/licenses/>.
!
!=== COPYRIGHT AND LICENSE STATEMENT ===
!==================================================================
!
! MPI communication routines for multigrid code
!
! Eike Mueller, University of Bath, Feb 2012
!
!==================================================================
module communication
use messages
use datatypes
use parameters
!use mpi
use modd_mpif
use timer
implicit none
public::comm_preinitialise
public::comm_initialise
public::comm_finalise
public::scalarprod

Juan Escobar
committed
public::boundary_mnh
public::haloswap
public::ihaloswap
public::collect
public::distribute
public::i_am_master_mpi
public::master_rank
public::pproc
public::MPI_COMM_HORIZ
public::comm_parameters
public::comm_measuretime
! Number of processors
! n_proc = 2^(2*pproc), with integer pproc
integer :: pproc
! Rank of master process
integer, parameter :: master_rank = 0
! Am I the master process?
logical :: i_am_master_mpi
integer, parameter :: dim = 3 ! Dimension
integer, parameter :: dim_horiz = 2 ! Horizontal dimension
integer :: MPI_COMM_HORIZ ! Communicator with horizontal partitioning
private
! Data types for halo exchange in both x- and y-direction
integer, dimension(:,:,:), allocatable :: halo_type
! MPI vector data types
! Halo for data exchange in north-south direction
integer, allocatable, dimension(:,:) :: halo_ns

Juan Escobar
committed
integer, allocatable, dimension(:,:) :: halo_nst
integer, allocatable, dimension(:,:) :: halo_wet
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
! Vector data type for interior of field a(level,m)
integer, allocatable, dimension(:,:) :: interior
! Vector data type for one quarter of interior of field
! at level a(level,m). This has the same size (and can be
! used for communications with) the interior of a(level,m+1)
integer, allocatable, dimension(:,:) :: sub_interior
! Timer for halo swaps
type(time), allocatable, dimension(:,:) :: t_haloswap
! Timer for collect and distribute
type(time), allocatable, dimension(:) :: t_collect
type(time), allocatable, dimension(:) :: t_distribute
! Parallelisation parameters
! Measure communication times?
logical :: comm_measuretime
! Parallel communication parameters
type comm_parameters
! Size of halos
integer :: halo_size
end type comm_parameters
type(comm_parameters) :: comm_param
! Data layout
! ===========
!
! The number of processes has to be of the form nproc = 2^(2*pproc) to
! ensure that data can be distributed between processes.
! The processes are arranged in a (2^pproc) x (2^pproc) cartesian grid
! in the horizontal plane (i.e. vertical columns are always local to one
! process), which is implemented via the communicator MPI_COMM_HORIZ.
! This MPI_cart_rank() and MPI_cart_shift() can then be used to
! easily identify neighbouring processes.
! The number of data grid cells in each direction has to be a multiply
! of 2**(L-1) where L is the number of levels (including the coarse
! and fine level), with the coarse level corresponding to level=1.
! Also define L_split as the level where we start to pull together
! data. For levels > L_split each position in the cartesian grid is
! included in the work, below this only a subset of processes is
! used.
!
! Each grid a(level,m) is identified by two numbers:
! (1) The multigrid level it belongs to (level)
! (2) The number of active processes that operate on it (2^(2*m)).
!
! For level > L_split, m=procp. For L_split we store a(L_split,pproc) and
! a(L_split,pproc-1), and only processes with even coordinates in both
! horizontal directions use this grid.
! Below that level, store a(L_split-1,pproc-1) and a(L_split-1,pproc-2),
! where only processes for which both horiontal coordinates are
! multiples of four use the latter. This is continued until only on
! process is left.
!
!
! level
! L a(L,pproc)
! L-1 a(L-1,pproc)
! ...
! L_split a(L_split,pproc) a(L_split ,pproc-1)
! L_split-1 a(L_split-1,pproc-1) a(L_split-1,pproc-2)
!
! ... a(3,1)
! a(2,1)
! a(1,1)
!
! When moving from left to right in the above graph the total number of
! grid cells does not change, but the number of data points per process
! increases by a factor of 4.
!
! Parallel operations
! ===================
!
! (*) Halo exchange. Update halo with data from neighbouring
! processors in cartesian grid on current (level,m)
! (*) Collect data on all processes at (level,m) on those
! processes that are still active on (level,m-1).
! (*) Distribute data at (level,m-1) and duplicate on all processes
! that are active at (level,m).
!
! Note that in the cartesian processor grid the first coordinate
! is the North-South (y-) direction, the second coordinate is the
! East-West (x-) direction, i.e. the layout is this:
!
! p_0 (0,0) p_1 (0,1) p_2 (0,2) p_3 (0,3)
!
! p_4 (1,0) p_5 (1,1) p_6 (1,2) p_7 (1,3)
!
! p_8 (2,0) p_9 (2,1) p_10 (2,2) p_11 (2,3)
!
! [...]
!
!
! Normal multigrid restriction and prolongation are used to
! move between levels with fixed m.
!
!
contains
!==================================================================
! Pre-initialise communication routines
!==================================================================
subroutine comm_preinitialise()
implicit none
integer :: nproc, ierr, rank
call mpi_comm_size(MPI_COMM_WORLD, nproc, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
i_am_master_mpi = (rank == master_rank)
! Check that nproc = 2^(2*p)
pproc = floor(log(1.0d0*nproc)/log(4.0d0))
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
if ( (nproc - 4**pproc) .ne. 0) then
call fatalerror("Number of processors has to be 2^(2*pproc) with integer pproc.")
end if
if (i_am_master_mpi) then
write(STDOUT,'("PARALLEL RUN")')
write(STDOUT,'("Number of processors : 2^(2*pproc) = ",I10," with pproc = ",I6)') &
nproc, pproc
end if
! Create halo data types
end subroutine comm_preinitialise
!==================================================================
! Initialise communication routines
!==================================================================
subroutine comm_initialise(n_lev, & !} multigrid parameters
lev_split, & !}
grid_param, & ! Grid parameters
comm_param_in) ! Parallel communication
! parameters
implicit none
integer, intent(in) :: n_lev
integer, intent(in) :: lev_split
type(grid_parameters), intent(inout) :: grid_param
type(comm_parameters), intent(in) :: comm_param_in
integer :: n
integer :: nz
integer :: rank, nproc, ierr
integer :: count, blocklength, stride
integer, dimension(2) :: p_horiz
integer :: m, level, nlocal
logical :: reduced_m
integer :: halo_size
character(len=32) :: t_label
n = grid_param%n
nz = grid_param%nz
comm_param = comm_param_in
halo_size = comm_param%halo_size
call mpi_comm_size(MPI_COMM_WORLD, nproc, ierr)
! Create cartesian topology
call mpi_cart_create(MPI_COMM_WORLD, & ! Old communicator name
dim_horiz, & ! horizontal dimension
(/2**pproc,2**pproc/), & ! extent in each horizontal direction
(/.false.,.false./), & ! periodic?
.true., & ! reorder?
MPI_COMM_HORIZ, & ! Name of new communicator
ierr)
! calculate and display rank and corrdinates in cartesian grid
call mpi_comm_rank(MPI_COMM_HORIZ, rank, ierr)
call mpi_cart_coords(MPI_COMM_HORIZ,rank,dim_horiz,p_horiz,ierr)
Loading
Loading full blame...