diff --git a/src/ZSOLVER/tensorproductmultigrid_Source/communication.f90 b/src/ZSOLVER/tensorproductmultigrid_Source/communication.f90
index 6348751a2d93a9f645fe5da18a1e51f2ea0e0095..c077863815235d688f0c4eaa4cac9059898289a3 100644
--- a/src/ZSOLVER/tensorproductmultigrid_Source/communication.f90
+++ b/src/ZSOLVER/tensorproductmultigrid_Source/communication.f90
@@ -42,9 +42,11 @@ module communication
   use mpi
   use mpi, only NMNH_COMM_WORLD => MPI_COMM_WORLD
   use mpi, only MNH_STATUSES_IGNORE => MPI_STATUSES_IGNORE
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
   USE MODD_VAR_ll,      ONLY: NMNH_COMM_WORLD, MNH_STATUSES_IGNORE
+  use modd_precision,   only: MNHREAL_MPI
 #endif   
   use timer
 
@@ -335,7 +337,7 @@ contains
       blocklength = (nz+2)*halo_size
       stride = (nlocal+2*halo_size)*(nz+2)
 #ifndef MPIVIDE
-      call mpi_type_vector(count,blocklength,stride,MPI_DOUBLE_PRECISION, &
+      call mpi_type_vector(count,blocklength,stride,MNHREAL_MPI, &
                            halo_ns(level,m),ierr)
       call mpi_type_commit(halo_ns(level,m),ierr)
 #endif
@@ -347,7 +349,7 @@ contains
       blocklength =  nlocalx*halo_size                            ! (nz+2)*halo_size
       stride =       (nlocalx+2*halo_size) * (nlocaly+2*halo_size) ! (nlocal+2*halo_size)*(nz+2)
 #ifndef MPIVIDE
-      call mpi_type_vector(count,blocklength,stride,MPI_DOUBLE_PRECISION, &
+      call mpi_type_vector(count,blocklength,stride,MNHREAL_MPI, &
                            halo_nst(level,m),ierr)
       call mpi_type_commit(halo_nst(level,m),ierr)
 #endif
@@ -362,7 +364,7 @@ contains
       blocklength =  1*halo_size                                 ! (nz+2)*halo_size
       stride =       nlocalx+2*halo_size                          ! (nlocal+2*halo_size)*(nz+2)
 #ifndef MPIVIDE
-      call mpi_type_vector(count,blocklength,stride,MPI_DOUBLE_PRECISION, &
+      call mpi_type_vector(count,blocklength,stride,MNHREAL_MPI, &
                            halo_wet(level,m),ierr)
       call mpi_type_commit(halo_wet(level,m),ierr)
 #endif
@@ -384,14 +386,14 @@ contains
          blocklength = nlocal*(nz+2)
          stride = (nz+2)*(nlocal+2*halo_size)
 #ifndef MPIVIDE
-         call mpi_type_vector(count,blocklength,stride,MPI_DOUBLE_PRECISION,interior(level,m),ierr)
+         call mpi_type_vector(count,blocklength,stride,MNHREAL_MPI,interior(level,m),ierr)
          call mpi_type_commit(interior(level,m),ierr)
 #endif
          count = nlocal/2
          blocklength = nlocal/2*(nz+2)
          stride = (nlocal+2*halo_size)*(nz+2)
 #ifndef MPIVIDE
-         call mpi_type_vector(count,blocklength,stride,MPI_DOUBLE_PRECISION,sub_interior(level,m),ierr)
+         call mpi_type_vector(count,blocklength,stride,MNHREAL_MPI,sub_interior(level,m),ierr)
          call mpi_type_commit(sub_interior(level,m),ierr)
 #endif
       end if
@@ -403,7 +405,7 @@ contains
             coord_debut     = (/ 0                  , 0                  , 0    /)
 #ifndef MPIVIDE
             call MPI_TYPE_CREATE_SUBARRAY(nb_dims,profil_tab,profil_sous_tab,coord_debut,&
-                 MPI_ORDER_FORTRAN,MPI_DOUBLE_PRECISION,interiorT(level,m),ierr)
+                 MPI_ORDER_FORTRAN,MNHREAL_MPI,interiorT(level,m),ierr)
             call mpi_type_commit(interiorT(level,m),ierr)
 #endif
             call mnh_allocate_mg_halo(tab_interiorT_ne(level,m)%haloTin,nlocalx,nlocaly,nz+2)
@@ -420,7 +422,7 @@ contains
             coord_debut     = (/ 0                  , 0                  , 0    /)
 #ifndef MPIVIDE
             call MPI_TYPE_CREATE_SUBARRAY(nb_dims,profil_tab,profil_sous_tab,coord_debut,&
-                 MPI_ORDER_FORTRAN,MPI_DOUBLE_PRECISION,sub_interiorT(level,m),ierr)
+                 MPI_ORDER_FORTRAN,MNHREAL_MPI,sub_interiorT(level,m),ierr)
             call mpi_type_commit(sub_interiorT(level,m),ierr)
 #endif
             call mnh_allocate_mg_halo(tab_sub_interiorT_ne(level,m)%haloTin,nlocalx/2,nlocaly/2,nz+2)
@@ -625,9 +627,9 @@ contains
         if (LUseO) local_sum = 0.0_rl
         if (LUseT) local_sumt = 0.0_rl
       end if
-      if (LUseO) call mpi_allreduce(local_sum,global_sum,1,MPI_DOUBLE_PRECISION, &
+      if (LUseO) call mpi_allreduce(local_sum,global_sum,1,MNHREAL_MPI, &
                          MPI_SUM,MPI_COMM_HORIZ,ierr)
-      if (LUseT) call mpi_allreduce(local_sumt,global_sumt,1,MPI_DOUBLE_PRECISION, &
+      if (LUseT) call mpi_allreduce(local_sumt,global_sumt,1,MNHREAL_MPI, &
                          MPI_SUM,MPI_COMM_HORIZ,ierr)
     else
       if (LUseO) then   
@@ -701,7 +703,7 @@ contains
       else
         local_sum = 0.0_rl
       end if
-      call mpi_allreduce(local_sum,global_sum,1,MPI_DOUBLE_PRECISION, &
+      call mpi_allreduce(local_sum,global_sum,1,MNHREAL_MPI, &
                          MPI_SUM,MPI_COMM_HORIZ,ierr)
     else
       global_sum = 0.0_rl
@@ -958,7 +960,7 @@ contains
         ! Receive from west
         recvtag = 1000
         if (LUseO) call mpi_irecv(a%s(0,yoffset,0-(halo_size-1)),blocklength,  &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_w_rank,recvtag, &
                        MPI_COMM_HORIZ, recv_requests(3), ierr)
         recvtag = 1010
         if (LUseT) then
@@ -969,7 +971,7 @@ contains
         ! Receive from east
         sendtag = 1001
         if (LUseO) call mpi_irecv(a%s(0,yoffset,a_nx+1),blocklength,          &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_e_rank,recvtag, &
                        MPI_COMM_HORIZ, recv_requests(4), ierr)
         sendtag = 1011
         if (LUseT) then
@@ -980,7 +982,7 @@ contains
         ! Send to east
         sendtag = 1000
         if (LUseO) call mpi_isend(a%s(0,yoffset,a_nx-(halo_size-1)),blocklength,  &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,sendtag, &
+                       MNHREAL_MPI,neighbour_e_rank,sendtag, &
                        MPI_COMM_HORIZ, send_requests(3), ierr)
         sendtag = 1010
         if (LUseT) then
@@ -991,7 +993,7 @@ contains
         ! Send to west
         recvtag = 1001
         if (LUseO) call mpi_isend(a%s(0,yoffset,1),blocklength,                &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,sendtag,   &
+                       MNHREAL_MPI,neighbour_w_rank,sendtag,   &
                        MPI_COMM_HORIZ, send_requests(4), ierr)
         recvtag = 1011
         if (LUseT) then
@@ -1082,22 +1084,22 @@ contains
         ! Receive from west
         recvtag = 0
         call mpi_irecv(a%s(0,yoffset,0-(halo_size-1)),blocklength,    &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_w_rank,recvtag, &
                        MPI_COMM_HORIZ, recv_requests(3), ierr)
         ! Receive from east
         sendtag = 1
         call mpi_irecv(a%s(0,yoffset,a_n+1),blocklength,          &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_e_rank,recvtag, &
                        MPI_COMM_HORIZ, recv_requests(4), ierr)
         ! Send to east
         sendtag = 0
         call mpi_isend(a%s(0,yoffset,a_n-(halo_size-1)),blocklength,  &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,sendtag, &
+                       MNHREAL_MPI,neighbour_e_rank,sendtag, &
                        MPI_COMM_HORIZ, send_requests(3), ierr)
         ! Send to west
         recvtag = 1
         call mpi_isend(a%s(0,yoffset,1),blocklength,                &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,sendtag,   &
+                       MNHREAL_MPI,neighbour_w_rank,sendtag,   &
                        MPI_COMM_HORIZ, send_requests(4), ierr)
       end if
     end if
@@ -1349,7 +1351,7 @@ contains
 !!$           pztab_halo_nt_haloTout => tab_halo_nt(level,m)%haloTout
            !$acc host_data use_device(pztab_halo_nt_haloTout)
            call mpi_irecv(pztab_halo_nt_haloTout,size(pztab_halo_nt_haloTout),      &
-                       MPI_DOUBLE_PRECISION,neighbour_n_rank,recvtag,  &
+                       MNHREAL_MPI,neighbour_n_rank,recvtag,  &
                        MPI_COMM_HORIZ, requests_nsT(1), ierr)
            !$acc end host_data
            end if
@@ -1372,7 +1374,7 @@ contains
 !!$           pztab_halo_st_haloTout => tab_halo_st(level,m)%haloTout
            !$acc host_data use_device (pztab_halo_st_haloTout)
            call mpi_irecv(pztab_halo_st_haloTout,size(pztab_halo_st_haloTout),  &
-                       MPI_DOUBLE_PRECISION,neighbour_s_rank,recvtag,  &
+                       MNHREAL_MPI,neighbour_s_rank,recvtag,  &
                        MPI_COMM_HORIZ, requests_nsT(2), ierr)
            !$acc end host_data
            end if
@@ -1400,7 +1402,7 @@ contains
            if (Gneighbour_s) then
            !$acc host_data use_device(pztab_halo_st_haloTin)
            call mpi_isend(pztab_halo_st_haloTin,size(pztab_halo_st_haloTin),    &
-                       MPI_DOUBLE_PRECISION,neighbour_s_rank,sendtag,  &
+                       MNHREAL_MPI,neighbour_s_rank,sendtag,  &
                        MPI_COMM_HORIZ, requests_nsT(3), ierr)
            !$acc end host_data
            end if
@@ -1422,7 +1424,7 @@ contains
            if (Gneighbour_n) then
            !$acc host_data use_device(pztab_halo_nt_haloTin)
            call mpi_isend(pztab_halo_nt_haloTin,size(pztab_halo_nt_haloTin),   &
-                       MPI_DOUBLE_PRECISION,neighbour_n_rank,sendtag,  &
+                       MNHREAL_MPI,neighbour_n_rank,sendtag,  &
                        MPI_COMM_HORIZ, requests_nsT(4), ierr)
            !$acc end host_data
            end if
@@ -1441,7 +1443,7 @@ contains
         ! Receive from west
         recvtag = 1000
         if (LUseO) call mpi_irecv(a%s(0,yoffset,0-(halo_size-1)),blocklength,    &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_w_rank,recvtag, &
                        MPI_COMM_HORIZ, requests_ew(1), ierr)
         recvtag = 1010
         if (LUseT) then
@@ -1450,7 +1452,7 @@ contains
 !!$           pztab_halo_wt_haloTout => tab_halo_wt(level,m)%haloTout
            !$acc host_data use_device(pztab_halo_wt_haloTout)
            call mpi_irecv(pztab_halo_wt_haloTout,size(pztab_halo_wt_haloTout),  &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_w_rank,recvtag, &
                        MPI_COMM_HORIZ, requests_ewT(1), ierr)
            !$acc end host_data
            end if
@@ -1464,7 +1466,7 @@ contains
         ! Receive from east
         sendtag = 1001
         if (LUseO) call mpi_irecv(a%s(0,yoffset,a_nx+1),blocklength,          &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_e_rank,recvtag, &
                        MPI_COMM_HORIZ, requests_ew(2), ierr)
         sendtag = 1011
         if (LUseT) then
@@ -1473,7 +1475,7 @@ contains
 !!$           pztab_halo_et_haloTout => tab_halo_et(level,m)%haloTout
            !$acc host_data use_device(pztab_halo_et_haloTout)
            call mpi_irecv(pztab_halo_et_haloTout,size(pztab_halo_et_haloTout),  &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_e_rank,recvtag, &
                        MPI_COMM_HORIZ, requests_ewT(2), ierr)
            !$acc end host_data
            end if
@@ -1488,7 +1490,7 @@ contains
         ! Send to east
         sendtag = 1000
         if (LUseO) call mpi_isend(a%s(0,yoffset,a_nx-(halo_size-1)),blocklength,  &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,sendtag, &
+                       MNHREAL_MPI,neighbour_e_rank,sendtag, &
                        MPI_COMM_HORIZ, requests_ew(3), ierr)
         sendtag = 1010
         if (LUseT) then
@@ -1496,7 +1498,7 @@ contains
            if (Gneighbour_e) then
            !$acc host_data use_device(pztab_halo_et_haloTin)
            call mpi_isend(pztab_halo_et_haloTin,size(pztab_halo_et_haloTin),  &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,sendtag, &
+                       MNHREAL_MPI,neighbour_e_rank,sendtag, &
                        MPI_COMM_HORIZ, requests_ewT(3), ierr)
            !$acc end host_data
            end if
@@ -1510,7 +1512,7 @@ contains
         ! Send to west
         recvtag = 1001
         if (LUseO) call mpi_isend(a%s(0,yoffset,1),blocklength,                &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,sendtag,   &
+                       MNHREAL_MPI,neighbour_w_rank,sendtag,   &
                        MPI_COMM_HORIZ, requests_ew(4), ierr)
         recvtag = 1011
         if (LUseT) then
@@ -1518,7 +1520,7 @@ contains
            if (Gneighbour_w) then
            !$acc host_data use_device(pztab_halo_wt_haloTin)
            call mpi_isend(pztab_halo_wt_haloTin,size(pztab_halo_wt_haloTin),  &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,sendtag,   &
+                       MNHREAL_MPI,neighbour_w_rank,sendtag,   &
                        MPI_COMM_HORIZ, requests_ewT(4), ierr)
            !$acc end host_data
            end if
@@ -1680,22 +1682,22 @@ contains
         ! Receive from west
         recvtag = 0
         call mpi_irecv(a%s(0,yoffset,0-(halo_size-1)),blocklength,    &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_w_rank,recvtag, &
                        MPI_COMM_HORIZ, requests_ew(1), ierr)
         ! Receive from east
         sendtag = 1
         call mpi_irecv(a%s(0,yoffset,a_n+1),blocklength,          &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,recvtag, &
+                       MNHREAL_MPI,neighbour_e_rank,recvtag, &
                        MPI_COMM_HORIZ, requests_ew(2), ierr)
         ! Send to east
         sendtag = 0
         call mpi_isend(a%s(0,yoffset,a_n-(halo_size-1)),blocklength,  &
-                       MPI_DOUBLE_PRECISION,neighbour_e_rank,sendtag, &
+                       MNHREAL_MPI,neighbour_e_rank,sendtag, &
                        MPI_COMM_HORIZ, requests_ew(3), ierr)
         ! Send to west
         recvtag = 1
         call mpi_isend(a%s(0,yoffset,1),blocklength,                &
-                       MPI_DOUBLE_PRECISION,neighbour_w_rank,sendtag,   &
+                       MNHREAL_MPI,neighbour_w_rank,sendtag,   &
                        MPI_COMM_HORIZ, requests_ew(4), ierr)
         ! Wait for East <-> West communication to complete
         if (halo_size == 1) then
@@ -1806,7 +1808,7 @@ contains
            ztab_sub_interiorT_ne_m_1_haloTout => tab_sub_interiorT_ne(level,m-1)%haloTout
            !$acc host_data use_device(ztab_sub_interiorT_ne_m_1_haloTout)
            call mpi_irecv(ztab_sub_interiorT_ne_m_1_haloTout,size(ztab_sub_interiorT_ne_m_1_haloTout), &
-                MPI_DOUBLE_PRECISION,source_rank, recv_tag, MPI_COMM_HORIZ, &
+                MNHREAL_MPI,source_rank, recv_tag, MPI_COMM_HORIZ, &
                 recv_requestT(1),ierr)
            !$acc end host_data
 #else
@@ -1834,7 +1836,7 @@ contains
            ztab_sub_interiorT_sw_m_1_haloTout => tab_sub_interiorT_sw(level,m-1)%haloTout
            !$acc host_data use_device(ztab_sub_interiorT_sw_m_1_haloTout)
            call mpi_irecv(ztab_sub_interiorT_sw_m_1_haloTout,size(ztab_sub_interiorT_sw_m_1_haloTout), &
-                MPI_DOUBLE_PRECISION,source_rank, recv_tag, MPI_COMM_HORIZ, &
+                MNHREAL_MPI,source_rank, recv_tag, MPI_COMM_HORIZ, &
                 recv_requestT(2),ierr)
            !$acc end host_data
 #else
@@ -1863,7 +1865,7 @@ contains
            ztab_sub_interiorT_se_m_1_haloTout => tab_sub_interiorT_se(level,m-1)%haloTout
            !$acc host_data use_device(ztab_sub_interiorT_se_m_1_haloTout)
            call mpi_irecv(ztab_sub_interiorT_se_m_1_haloTout,size(ztab_sub_interiorT_se_m_1_haloTout), &
-                MPI_DOUBLE_PRECISION,source_rank, recv_tag, MPI_COMM_HORIZ, &
+                MNHREAL_MPI,source_rank, recv_tag, MPI_COMM_HORIZ, &
                 recv_requestT(3),ierr)
            !$acc end host_data
 #else
@@ -1941,7 +1943,7 @@ contains
            !$acc end kernels           
            !$acc host_data use_device(ztab_interiorT_ne_m_haloTin)
            call mpi_send(ztab_interiorT_ne_m_haloTin,size(ztab_interiorT_ne_m_haloTin), &
-                MPI_DOUBLE_PRECISION,dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
+                MNHREAL_MPI,dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
            !$acc end host_data
 #else
            call mpi_send(a%st(1,1,0),1,interiorT(level,m),dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
@@ -1975,7 +1977,7 @@ contains
            !$acc end kernels           
            !$acc host_data use_device(ztab_interiorT_sw_m_haloTin)
            call mpi_send(ztab_interiorT_sw_m_haloTin,size(ztab_interiorT_sw_m_haloTin), &
-                MPI_DOUBLE_PRECISION,dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
+                MNHREAL_MPI,dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
            !$acc end host_data
 #else
            call mpi_send(a%st(1,1,0),1,interiorT(level,m),dest_rank,send_tag,MPI_COMM_HORIZ,ierr)        
@@ -2010,7 +2012,7 @@ contains
            !$acc end kernels
            !$acc host_data use_device(ztab_interiorT_se_m_haloTin)
            call mpi_send(ztab_interiorT_se_m_haloTin,size(ztab_interiorT_se_m_haloTin), &
-                MPI_DOUBLE_PRECISION,dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
+                MNHREAL_MPI,dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
            !$acc end host_data
 #else
            call mpi_send(a%st(1,1,0),1,interiorT(level,m),dest_rank,send_tag,MPI_COMM_HORIZ,ierr)
@@ -2128,7 +2130,7 @@ contains
            !$acc end kernels           
            !$acc host_data use_device(ztab_sub_interiorT_ne_m_1_haloTin)
            call mpi_isend(ztab_sub_interiorT_ne_m_1_haloTin,size(ztab_sub_interiorT_ne_m_1_haloTin), &
-                MPI_DOUBLE_PRECISION,dest_rank, send_tag, &
+                MNHREAL_MPI,dest_rank, send_tag, &
                 MPI_COMM_HORIZ,send_requestT(1),ierr)
            !$acc end host_data
 #else
@@ -2161,7 +2163,7 @@ contains
            !$acc end kernels           
            !$acc host_data use_device(ztab_sub_interiorT_sw_m_1_haloTin)
            call mpi_isend(ztab_sub_interiorT_sw_m_1_haloTin,size(ztab_sub_interiorT_sw_m_1_haloTin), &
-                MPI_DOUBLE_PRECISION, dest_rank, send_tag, &
+                MNHREAL_MPI, dest_rank, send_tag, &
                 MPI_COMM_HORIZ, send_requestT(2), ierr)
            !$acc end host_data
 #else
@@ -2195,7 +2197,7 @@ contains
            !$acc end kernels           
            !$acc host_data use_device(ztab_sub_interiorT_se_m_1_haloTin)
            call mpi_isend(ztab_sub_interiorT_se_m_1_haloTin,size(ztab_sub_interiorT_se_m_1_haloTin), &
-                MPI_DOUBLE_PRECISION, dest_rank, send_tag, &
+                MNHREAL_MPI, dest_rank, send_tag, &
                 MPI_COMM_HORIZ, send_requestT(3), ierr)
            !$acc end host_data
 #else
@@ -2245,7 +2247,7 @@ contains
            ztab_interiorT_ne_m_haloTout => tab_interiorT_ne(level,m)%haloTout
            !$acc host_data use_device(ztab_interiorT_ne_m_haloTout)
            call mpi_recv(ztab_interiorT_ne_m_haloTout,size(ztab_interiorT_ne_m_haloTout), &
-                MPI_DOUBLE_PRECISION,source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
+                MNHREAL_MPI,source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
            !$acc end host_data
            !$acc kernels present_cr(zb_st,ztab_interiorT_ne_m_haloTout)
            !$mnh_do_concurrent(ii=1:b_nx,ij=1:b_ny,ik=1:nz+2)
@@ -2279,7 +2281,7 @@ contains
            ztab_interiorT_sw_m_haloTout => tab_interiorT_sw(level,m)%haloTout
            !$acc host_data use_device(ztab_interiorT_sw_m_haloTout)
            call mpi_recv(ztab_interiorT_sw_m_haloTout,size(ztab_interiorT_sw_m_haloTout), &           
-                MPI_DOUBLE_PRECISION,source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
+                MNHREAL_MPI,source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
            !$acc end host_data
            !$acc kernels present_cr(zb_st,ztab_interiorT_sw_m_haloTout)
            !$mnh_do_concurrent(ii=1:b_nx,ij=1:b_ny,ik=1:nz+2)
@@ -2313,7 +2315,7 @@ contains
            ztab_interiorT_se_m_haloTout => tab_interiorT_se(level,m)%haloTout
            !$acc host_data use_device(ztab_interiorT_se_m_haloTout)
            call mpi_recv(ztab_interiorT_se_m_haloTout,size(ztab_interiorT_se_m_haloTout), &
-                MPI_DOUBLE_PRECISION,source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
+                MNHREAL_MPI,source_rank,recv_tag,MPI_COMM_HORIZ,stat,ierr)
            !$acc end host_data
            !$acc kernels present_cr(zb_st,ztab_interiorT_se_m_haloTout)
            !$mnh_do_concurrent(ii=1:b_nx,ij=1:b_ny,ik=1:nz+2)
diff --git a/src/ZSOLVER/tensorproductmultigrid_Source/conjugategradient.f90 b/src/ZSOLVER/tensorproductmultigrid_Source/conjugategradient.f90
index d6f883a98a348630a0c5cb84ea5fa811953ae43f..c259a72aa5a76479bcb132bb46857e692d3b4096 100644
--- a/src/ZSOLVER/tensorproductmultigrid_Source/conjugategradient.f90
+++ b/src/ZSOLVER/tensorproductmultigrid_Source/conjugategradient.f90
@@ -235,7 +235,7 @@ contains
     call scalarprod_mnh(m,r,z,rz_old)
     ! res0 <- ||r||
     call scalarprod_mnh(m,r,r,res0)
-    res0 = dsqrt(res0)
+    res0 = sqrt(res0)
     if (cg_param%verbose > 0) then
       if (i_am_master_mpi) then
         write(STDOUT,'("    *** CG Solver ( ",I10," dof ) ***")') n_lin
@@ -257,7 +257,7 @@ contains
         if (LUseO) call daxpy(n_lin,-alpha,Ap%s,1,r%s,1)
         if (LUseT) call daxpy(n_lin,-alpha,Ap%st,1,r%st,1)
         call scalarprod_mnh(m,r,r,res)
-        res = dsqrt(res)
+        res = sqrt(res)
         if (cg_param%verbose > 1) then
           if (i_am_master_mpi) then
             write(STDOUT,'("    <CG> Iteration ",I6," ||r|| = ",E12.6)') &
@@ -451,7 +451,7 @@ contains
     call scalarprod(m,r,z,rz_old)
     ! res0 <- ||r||
     call scalarprod(m,r,r,res0)
-    res0 = dsqrt(res0)
+    res0 = sqrt(res0)
     if (cg_param%verbose > 0) then
       if (i_am_master_mpi) then
         write(STDOUT,'("    *** CG Solver ( ",I10," dof ) ***")') n_lin
@@ -473,7 +473,7 @@ contains
         if (LUseO) call daxpy(n_lin,-alpha,Ap%s,1,r%s,1)
         if (LUseT) call daxpy(n_lin,-alpha,Ap%st,1,r%st,1)
         call scalarprod(m,r,r,res)
-        res = dsqrt(res)
+        res = sqrt(res)
         if (cg_param%verbose > 1) then
           if (i_am_master_mpi) then
             write(STDOUT,'("    <CG> Iteration ",I6," ||r|| = ",E12.6)') &
diff --git a/src/ZSOLVER/tensorproductmultigrid_Source/datatypes.f90 b/src/ZSOLVER/tensorproductmultigrid_Source/datatypes.f90
index 8c5f1b8aae658eab34535c97eeac811e0c80ac66..4fe009e6a4e9f505840e9f3f31053e19d1626c3c 100644
--- a/src/ZSOLVER/tensorproductmultigrid_Source/datatypes.f90
+++ b/src/ZSOLVER/tensorproductmultigrid_Source/datatypes.f90
@@ -42,9 +42,11 @@ module datatypes
 #ifndef MNH
   use mpi
   use mpi, only NMNH_COMM_WORLD => MPI_COMM_WORLD
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
   USE MODD_VAR_ll,      ONLY: NMNH_COMM_WORLD
+  use modd_precision,   only: MNHREAL_MPI
 #endif  
   use parameters
   use messages
@@ -387,13 +389,13 @@ private
     
     if (LUseO) then
        call mpi_allreduce(tmp,global_tmp, 1, &
-            MPI_DOUBLE_PRECISION,MPI_SUM,NMNH_COMM_WORLD,ierr)   
-       global_tmp = dsqrt(global_tmp)
+            MNHREAL_MPI,MPI_SUM,NMNH_COMM_WORLD,ierr)   
+       global_tmp = sqrt(global_tmp)
     endif
     if (LUseT) then
        call mpi_allreduce(tmpt,global_tmpt, 1, &
-            MPI_DOUBLE_PRECISION,MPI_SUM,NMNH_COMM_WORLD,ierr)
-       global_tmpt = dsqrt(global_tmpt)
+            MNHREAL_MPI,MPI_SUM,NMNH_COMM_WORLD,ierr)
+       global_tmpt = sqrt(global_tmpt)
     end if
     if (LUseO) then
        l2norm_mnh = global_tmp
@@ -453,8 +455,8 @@ private
     end if
 
     call mpi_allreduce(tmp,global_tmp, 1, &
-                       MPI_DOUBLE_PRECISION,MPI_SUM,NMNH_COMM_WORLD,ierr)
-    l2norm = dsqrt(global_tmp)
+                       MNHREAL_MPI,MPI_SUM,NMNH_COMM_WORLD,ierr)
+    l2norm = sqrt(global_tmp)
   end function l2norm
 
 !==================================================================
diff --git a/src/ZSOLVER/tensorproductmultigrid_Source/mode_mg_read_param.f90 b/src/ZSOLVER/tensorproductmultigrid_Source/mode_mg_read_param.f90
index d6cfa4fba56eab61de84958c701b435f2b14fb99..d61155c7de8dde99da4cd383fadd42289d99c8c5 100644
--- a/src/ZSOLVER/tensorproductmultigrid_Source/mode_mg_read_param.f90
+++ b/src/ZSOLVER/tensorproductmultigrid_Source/mode_mg_read_param.f90
@@ -39,9 +39,11 @@ subroutine read_general_parameters(filename,savefields_out)
 #ifndef MNH
   use mpi
   use mpi, only NMNH_COMM_WORLD => MPI_COMM_WORLD
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
   USE MODD_VAR_ll,      ONLY: NMNH_COMM_WORLD
+  use modd_precision,   only: MNHREAL_MPI
 #endif 
   implicit none
   character(*), intent(in) :: filename
@@ -75,8 +77,10 @@ subroutine read_solver_parameters(filename,solver_param_out)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif
   implicit none
   character(*), intent(in) :: filename
@@ -112,7 +116,7 @@ subroutine read_solver_parameters(filename,solver_param_out)
   end if
   call mpi_bcast(solvertype,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(maxiter,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(resreduction,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(resreduction,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(LMean,1,MPI_LOGICAL,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(LUseO,1,MPI_LOGICAL,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(LUseT,1,MPI_LOGICAL,master_rank,NMNH_COMM_WORLD,ierr)
@@ -131,8 +135,10 @@ subroutine read_grid_parameters_mnh(filename,grid_param,KN,KNZ,PL,PH)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif 
   implicit none
   character(*), intent(in) :: filename
@@ -167,8 +173,8 @@ subroutine read_grid_parameters_mnh(filename,grid_param,KN,KNZ,PL,PH)
   end if
   call mpi_bcast(n,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(nz,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(L,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(H,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(L,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(H,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(vertbc,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(graded,1,MPI_LOGICAL,master_rank,NMNH_COMM_WORLD,ierr)
   IF (PRESENT(KN)) THEN
@@ -206,8 +212,10 @@ subroutine read_grid_parameters(filename,grid_param)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif
   
   implicit none
@@ -245,8 +253,8 @@ subroutine read_grid_parameters(filename,grid_param)
   end if
   call mpi_bcast(n,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(nz,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(L,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(H,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(L,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(H,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(vertbc,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(graded,1,MPI_LOGICAL,master_rank,NMNH_COMM_WORLD,ierr)
   grid_param%n = n
@@ -268,8 +276,10 @@ subroutine read_comm_parameters(filename,comm_param)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif 
   implicit none
   character(*), intent(in) :: filename
@@ -310,8 +320,10 @@ subroutine read_model_parameters(filename,model_param)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif 
   implicit none
   character(*), intent(in) :: filename
@@ -333,9 +345,9 @@ subroutine read_model_parameters(filename,model_param)
     write(STDOUT,'("---------------------------------------------- ")')
     write(STDOUT,'("")')
   end if
-  call mpi_bcast(omega2,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(lambda2,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(delta,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(omega2,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(lambda2,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(delta,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
   model_param%omega2 = omega2
   model_param%lambda2 = lambda2
   model_param%delta = delta
@@ -351,8 +363,10 @@ subroutine read_smoother_parameters(filename,smoother_param)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif
   implicit none
   character(*), intent(in) :: filename
@@ -396,7 +410,7 @@ subroutine read_smoother_parameters(filename,smoother_param)
   end if
   call mpi_bcast(smoother,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(ordering,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(rho,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(rho,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
   smoother_param%smoother = smoother
   smoother_param%ordering = ordering
   smoother_param%rho = rho
@@ -419,8 +433,10 @@ subroutine read_multigrid_parameters(filename,mg_param)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif
 
   use MODE_OPENACC_SET_DEVICE, only : iswitch_cpu_gpu
@@ -537,8 +553,10 @@ subroutine read_conjugategradient_parameters(filename,cg_param)
   use messages
 #ifndef MNH
   use mpi
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
+  use modd_precision,   only: MNHREAL_MPI
 #endif 
   implicit none
   character(*), intent(in) :: filename
@@ -567,7 +585,7 @@ subroutine read_conjugategradient_parameters(filename,cg_param)
   end if
   call mpi_bcast(verbose,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(maxiter,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
-  call mpi_bcast(resreduction,1,MPI_DOUBLE_PRECISION,master_rank,NMNH_COMM_WORLD,ierr)
+  call mpi_bcast(resreduction,1,MNHREAL_MPI,master_rank,NMNH_COMM_WORLD,ierr)
   call mpi_bcast(n_prec,1,MPI_INTEGER,master_rank,NMNH_COMM_WORLD,ierr)
   cg_param%verbose = verbose
   cg_param%maxiter = maxiter
diff --git a/src/ZSOLVER/tensorproductmultigrid_Source/parameters.f90 b/src/ZSOLVER/tensorproductmultigrid_Source/parameters.f90
index 5195982fdeffc44390f2a2fa9fd116f6e0d76660..e37fb2a51dd4dff27ff74416be911a88de1cf653 100644
--- a/src/ZSOLVER/tensorproductmultigrid_Source/parameters.f90
+++ b/src/ZSOLVER/tensorproductmultigrid_Source/parameters.f90
@@ -41,8 +41,12 @@ module parameters
 ! floating point precision. Always use rl_kind in code
   integer, parameter :: single_precision=4        ! single precision
   integer, parameter :: double_precision=8        ! double precision
+#ifndef MNH  
   integer, parameter :: rl=double_precision       ! global switch between
                                                   ! single/double precision
+#else
+  integer, parameter :: rl=MNH_REAL
+#endif
 ! NOTE: As we still use BLAS subroutines, these need to be
 !       replaced as well when switching between double and
 !       single precision!
diff --git a/src/ZSOLVER/tensorproductmultigrid_Source/timer.f90 b/src/ZSOLVER/tensorproductmultigrid_Source/timer.f90
index b4ae1d40732137665d597150b491d075f61ead1a..451f933ee11d5a4408168cfe015d2ad3a9485851 100644
--- a/src/ZSOLVER/tensorproductmultigrid_Source/timer.f90
+++ b/src/ZSOLVER/tensorproductmultigrid_Source/timer.f90
@@ -39,9 +39,11 @@ module timer
 #ifndef MNH
   use mpi
   use mpi, only NMNH_COMM_WORLD => MPI_COMM_WORLD
+  use mpi, only MNHREAL_MPI => MPI_DOUBLE_PRECISION
 #else
   use modd_mpif
   USE MODD_VAR_ll,      ONLY: NMNH_COMM_WORLD
+  use modd_precision,   only: MNHREAL_MPI
 #endif 
   use parameters
 
@@ -167,11 +169,11 @@ contains
 
 
     elapsedtime = (t%elapsed) * factor
-    call mpi_reduce(elapsedtime,t_min,1,MPI_DOUBLE_PRECISION, &
+    call mpi_reduce(elapsedtime,t_min,1,MNHREAL_MPI, &
                     MPI_MIN, 0, NMNH_COMM_WORLD,ierr)
-    call mpi_reduce(elapsedtime,t_avg,1,MPI_DOUBLE_PRECISION, &
+    call mpi_reduce(elapsedtime,t_avg,1,MNHREAL_MPI, &
                     MPI_SUM, 0, NMNH_COMM_WORLD,ierr)
-    call mpi_reduce(elapsedtime,t_max,1,MPI_DOUBLE_PRECISION, &
+    call mpi_reduce(elapsedtime,t_max,1,MNHREAL_MPI, &
                     MPI_MAX, 0, NMNH_COMM_WORLD,ierr)
     call mpi_comm_size(NMNH_COMM_WORLD,nprocs,ierr)
     call mpi_comm_rank(NMNH_COMM_WORLD,rank,ierr)