Skip to content
Snippets Groups Projects
communication.f90 44.2 KiB
Newer Older
  • Learn to ignore specific revisions
  •         ! Send to NW
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1)-stepsize,p_horiz(2)/), &
                               dest_rank, &
                               ierr)
            send_tag = 1
            call mpi_send(a%s(0,1,1),1,interior(level,m),dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Collect: send from SW failed in mpi_send().")
    #endif
          end if
          if ( corner_se ) then
            ! send to NW
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1)-stepsize,p_horiz(2)-stepsize/), &
                               dest_rank, &
                               ierr)
            send_tag = 2
            call mpi_send(a%s(0,1,1),1,interior(level,m),dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Collect: send from SE failed in mpi_send().")
    #endif
          end if
    
        end if
        call finish_timer(t_collect(m))
    
      end subroutine collect
    
    !==================================================================
    !  Distribute data in a(level,m-1) and store in b(level,m)
    !
    !  Example for p-m = 1, i.e. stepsize = 2:
    !
    !   NW (0,0)  -->  NE (2,0)
    !
    !     !      .
    !     v        .
    !                .
    !   SW (0,2)       SE (2,2) [receive from to 0,0]
    !==================================================================
      subroutine distribute(level,m, &  ! multigrid and processor level
                            a,       &  ! IN: Data on level (level,m-1)
                            b)          ! OUT: Data on level (level,m)
        implicit none
        integer, intent(in) :: level
        integer, intent(in) :: m
        type(scalar3d), intent(in) :: a
        type(scalar3d), intent(inout) :: b
        integer :: a_n, b_n   ! horizontal grid sizes
        integer :: nz ! vertical grid size
        integer, dimension(2) :: p_horiz
        integer :: stepsize
        integer :: ierr, source_rank, dest_rank, send_tag, recv_tag, rank, iz
        integer :: stat(MPI_STATUS_SIZE)
        integer :: send_request(3)
        logical :: corner_nw, corner_ne, corner_sw, corner_se
    
        call start_timer(t_distribute(m))
    
        stepsize = 2**(pproc-m)
    
        a_n = a%ix_max-a%ix_min+1
        b_n = b%ix_max-b%ix_min+1
        nz = a%grid_param%nz
    
        ! Work out rank, only execute on relevant processes
        call mpi_comm_rank(MPI_COMM_HORIZ, rank, ierr)
        call mpi_cart_coords(MPI_COMM_HORIZ,rank,dim_horiz,p_horiz,ierr)
    
        ! Ignore all processes that do not participate at this level
        if ( (stepsize .eq. 1) .or. ((mod(p_horiz(1),stepsize) == 0) .and. (mod(p_horiz(2),stepsize)) == 0)) then
          ! Work out coordinates in local 2 x 2 block
          if (stepsize .eq. 1) then
            corner_nw = ((mod(p_horiz(1),2) == 0) .and. (mod(p_horiz(2),2) == 0))
            corner_ne = ((mod(p_horiz(1),2) == 0) .and. (mod(p_horiz(2),2) == 1))
            corner_sw = ((mod(p_horiz(1),2) == 1) .and. (mod(p_horiz(2),2) == 0))
            corner_se = ((mod(p_horiz(1),2) == 1) .and. (mod(p_horiz(2),2) == 1))
          else
            corner_nw = ((mod(p_horiz(1)/stepsize,2) == 0) .and. (mod(p_horiz(2)/stepsize,2) == 0))
            corner_ne = ((mod(p_horiz(1)/stepsize,2) == 0) .and. (mod(p_horiz(2)/stepsize,2) == 1))
            corner_sw = ((mod(p_horiz(1)/stepsize,2) == 1) .and. (mod(p_horiz(2)/stepsize,2) == 0))
            corner_se = ((mod(p_horiz(1)/stepsize,2) == 1) .and. (mod(p_horiz(2)/stepsize,2) == 1))
          end if
          if ( corner_nw ) then
            ! (Asynchronous) send to NE
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1),p_horiz(2)+stepsize/), &
                               dest_rank, &
                               ierr)
    
            send_tag = 0
            call mpi_isend(a%s(0,1,a_n/2+1), 1,sub_interior(level,m-1),dest_rank, send_tag, &
                           MPI_COMM_HORIZ,send_request(1),ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Distribute: send to NE failed in mpi_isend().")
    #endif
            ! (Asynchronous) send to SW
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1)+stepsize,p_horiz(2)/), &
                               dest_rank, &
                               ierr)
            send_tag = 1
            call mpi_isend(a%s(0,a_n/2+1,1),1,sub_interior(level,m-1), dest_rank, send_tag, &
                           MPI_COMM_HORIZ, send_request(2), ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Distribute: send to SW failed in mpi_isend().")
    #endif
            ! (Asynchronous) send to SE
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1)+stepsize,p_horiz(2)+stepsize/), &
                               dest_rank, &
                               ierr)
            send_tag = 2
            call mpi_isend(a%s(0,a_n/2+1,a_n/2+1),1,sub_interior(level,m-1), dest_rank, send_tag, &
                          MPI_COMM_HORIZ, send_request(3), ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Distribute: send to SE failed in mpi_isend().")
    #endif
            ! While sending, copy local data
            do iz=0,nz+1
              b%s(iz,1:b_n,1:b_n) = a%s(iz,1:b_n,1:b_n)
            end do
            ! Only proceed when async sends to complete
            call mpi_waitall(3, send_request, MPI_STATUSES_IGNORE, ierr)
          end if
          if ( corner_ne ) then
    
            ! Receive from NW
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1),p_horiz(2)-stepsize/), &
                               source_rank, &
                               ierr)
            recv_tag = 0
            call mpi_recv(b%s(0,1,1),1,interior(level,m),source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Distribute: receive on NE failed in mpi_recv().")
    #endif
          end if
          if ( corner_sw ) then
            ! Receive from NW
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1)-stepsize,p_horiz(2)/), &
                               source_rank, &
                               ierr)
            recv_tag = 1
            call mpi_recv(b%s(0,1,1),1,interior(level,m),source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Distribute: receive on SW failed in mpi_recv().")
    #endif
          end if
          if ( corner_se ) then
            ! Receive from NW
            call mpi_cart_rank(MPI_COMM_HORIZ, &
                               (/p_horiz(1)-stepsize,p_horiz(2)-stepsize/), &
                               source_rank, &
                               ierr)
            recv_tag = 2
            call mpi_recv(b%s(0,1,1),1,interior(level,m),source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
    #ifndef NDEBUG
      if (ierr .ne. 0) &
        call fatalerror("Distribute: receive on NW failed in mpi_recv().")
    #endif
          end if
    
        end if
        call finish_timer(t_distribute(m))
    
      end subroutine distribute
    
    end module communication