Skip to content
Snippets Groups Projects
communication.f90 54.4 KiB
Newer Older
!=== COPYRIGHT AND LICENSE STATEMENT ===
!
!  This file is part of the TensorProductMultigrid code.
!  
!  (c) The copyright relating to this work is owned jointly by the
!  Crown, Met Office and NERC [2014]. However, it has been created
!  with the help of the GungHo Consortium, whose members are identified
!  at https://puma.nerc.ac.uk/trac/GungHo/wiki .
!  
!  Main Developer: Eike Mueller
!  
!  TensorProductMultigrid is free software: you can redistribute it and/or
!  modify it under the terms of the GNU Lesser General Public License as
!  published by the Free Software Foundation, either version 3 of the
!  License, or (at your option) any later version.
!  
!  TensorProductMultigrid is distributed in the hope that it will be useful,
!  but WITHOUT ANY WARRANTY; without even the implied warranty of
!  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
!  GNU Lesser General Public License for more details.
!  
!  You should have received a copy of the GNU Lesser General Public License
!  along with TensorProductMultigrid (see files COPYING and COPYING.LESSER).
!  If not, see <http://www.gnu.org/licenses/>.
!
!=== COPYRIGHT AND LICENSE STATEMENT ===


!==================================================================
!
!  MPI communication routines for multigrid code
!
!    Eike Mueller, University of Bath, Feb 2012
!
!==================================================================

module communication
  use messages
  use datatypes
  use parameters
  use timer

  implicit none

public::comm_preinitialise
public::comm_initialise
public::comm_finalise
public::scalarprod_mnh
public::haloswap_mnh
public::ihaloswap_mnh
public::ihaloswap
public::collect
public::distribute
public::i_am_master_mpi
public::master_rank
public::pproc
public::MPI_COMM_HORIZ
public::comm_parameters
public::comm_measuretime


  ! Number of processors
  ! n_proc = 2^(2*pproc), with integer pproc
  integer :: pproc

! Rank of master process
  integer, parameter :: master_rank = 0
! Am I the master process?
  logical :: i_am_master_mpi

  integer, parameter :: dim = 3  ! Dimension
  integer, parameter :: dim_horiz = 2  ! Horizontal dimension
  integer :: MPI_COMM_HORIZ ! Communicator with horizontal partitioning

private

! Data types for halo exchange in both x- and y-direction
  integer, dimension(:,:,:), allocatable :: halo_type

! MPI vector data types
  ! Halo for data exchange in north-south direction
  integer, allocatable, dimension(:,:) :: halo_ns
  integer, allocatable, dimension(:,:) :: halo_nst
  integer, allocatable, dimension(:,:) :: halo_wet
  ! Vector data type for interior of field a(level,m)
  integer, allocatable, dimension(:,:) :: interior
  ! Vector data type for one quarter of interior of field
  ! at level a(level,m). This has the same size (and can be
  ! used for communications with) the interior of a(level,m+1)
  integer, allocatable, dimension(:,:) :: sub_interior
  ! Timer for halo swaps
  type(time), allocatable, dimension(:,:) :: t_haloswap
  ! Timer for collect and distribute
  type(time), allocatable, dimension(:) :: t_collect
  type(time), allocatable, dimension(:) :: t_distribute
  ! Parallelisation parameters
  ! Measure communication times?
  logical :: comm_measuretime

  ! Parallel communication parameters
  type comm_parameters
    ! Size of halos
    integer :: halo_size
  end type comm_parameters

  type(comm_parameters) :: comm_param

! Data layout
! ===========
!
!  The number of processes has to be of the form nproc = 2^(2*pproc) to
!  ensure that data can be distributed between processes.
!  The processes are arranged in a (2^pproc) x (2^pproc) cartesian grid
!  in the horizontal plane (i.e. vertical columns are always local to one
!  process), which is implemented via the communicator MPI_COMM_HORIZ.
!  This MPI_cart_rank() and MPI_cart_shift() can then be used to
!  easily identify neighbouring processes.

!  The number of data grid cells in each direction has to be a multiply
!  of 2**(L-1) where L is the number of levels (including the coarse
!  and fine level), with the coarse level corresponding to level=1.
!  Also define L_split as the level where we start to pull together
!  data. For levels > L_split each position in the cartesian grid is
!  included in the work, below this only a subset of processes is
!  used.
!
!  Each grid a(level,m) is identified by two numbers:
!  (1) The multigrid level it belongs to (level)
!  (2) The number of active processes that operate on it (2^(2*m)).
!
!  For level > L_split, m=procp. For L_split we store a(L_split,pproc) and
!  a(L_split,pproc-1), and only processes with even coordinates in both
!  horizontal directions use this grid.
!  Below that level, store a(L_split-1,pproc-1) and a(L_split-1,pproc-2),
!  where only processes for which both horiontal coordinates are
!  multiples of four use the latter. This is continued until only on
!  process is left.
!
!
!  level
!    L          a(L,pproc)
!    L-1        a(L-1,pproc)
!    ...
!    L_split    a(L_split,pproc)    a(L_split  ,pproc-1)
!    L_split-1                      a(L_split-1,pproc-1)  a(L_split-1,pproc-2)
!
!                                                                  ... a(3,1)
!                                                                      a(2,1)
!                                                                      a(1,1)
!
!  When moving from left to right in the above graph the total number of
!  grid cells does not change, but the number of data points per process
!  increases by a factor of 4.
!
! Parallel operations
! ===================
!
!   (*)  Halo exchange. Update halo with data from neighbouring
!        processors in cartesian grid on current (level,m)
!   (*)  Collect data on all processes at (level,m) on those
!        processes that are still active on (level,m-1).
!   (*)  Distribute data at (level,m-1) and duplicate on all processes
!        that are active at (level,m).
!
!   Note that in the cartesian processor grid the first coordinate
!   is the North-South (y-) direction, the second coordinate is the
!   East-West (x-) direction, i.e. the layout is this:
!
!   p_0 (0,0)   p_1 (0,1)   p_2 (0,2)   p_3 (0,3)
!
!   p_4 (1,0)   p_5 (1,1)   p_6 (1,2)   p_7 (1,3)
!
!   p_8 (2,0)   p_9 (2,1)   p_10 (2,2)  p_11 (2,3)
!
!                       [...]
!
!
!   Normal multigrid restriction and prolongation are used to
!   move between levels with fixed m.
!
!

contains

!==================================================================
! Pre-initialise communication routines
!==================================================================
  subroutine comm_preinitialise()
    implicit none
    integer :: nproc, ierr, rank
    call mpi_comm_size(MPI_COMM_WORLD, nproc, ierr)
    call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
    i_am_master_mpi = (rank == master_rank)
    ! Check that nproc = 2^(2*p)
    pproc = floor(log(1.0d0*nproc)/log(4.0d0))
    if ( (nproc - 4**pproc) .ne. 0) then
      call fatalerror("Number of processors has to be 2^(2*pproc) with integer pproc.")
    end if
    if (i_am_master_mpi) then
      write(STDOUT,'("PARALLEL RUN")')
      write(STDOUT,'("Number of processors : 2^(2*pproc) = ",I10," with pproc = ",I6)') &
        nproc, pproc
    end if
    ! Create halo data types

  end subroutine comm_preinitialise

!==================================================================
! Initialise communication routines
!==================================================================
  subroutine comm_initialise(n_lev,        & !} multigrid parameters
                            lev_split,     & !}
                            grid_param,    & ! Grid parameters
                            comm_param_in)   ! Parallel communication
                                             ! parameters
    implicit none
    integer, intent(in) :: n_lev
    integer, intent(in) :: lev_split
    type(grid_parameters), intent(inout) :: grid_param
    type(comm_parameters), intent(in)    :: comm_param_in
    integer :: n
    integer :: nz
    integer :: rank, nproc, ierr
    integer :: count, blocklength, stride
    integer, dimension(2) :: p_horiz
    integer :: m, level, nlocal
    logical :: reduced_m
    integer :: halo_size
    character(len=32) :: t_label

    n = grid_param%n
    nz = grid_param%nz

    comm_param = comm_param_in
    halo_size = comm_param%halo_size

    call mpi_comm_size(MPI_COMM_WORLD, nproc, ierr)

    ! Create cartesian topology
    call mpi_cart_create(MPI_COMM_WORLD,        & ! Old communicator name
                         dim_horiz,             & ! horizontal dimension
                         (/2**pproc,2**pproc/), & ! extent in each horizontal direction
                         (/.false.,.false./),   & ! periodic?
                         .true.,                & ! reorder?
                         MPI_COMM_HORIZ,        & ! Name of new communicator
                         ierr)
   ! calculate and display rank and corrdinates in cartesian grid
    call mpi_comm_rank(MPI_COMM_HORIZ, rank, ierr)
    call mpi_cart_coords(MPI_COMM_HORIZ,rank,dim_horiz,p_horiz,ierr)
Loading
Loading full blame...