Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
M
Méso-NH code
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Méso-NH
Méso-NH code
Commits
1701e3a6
Commit
1701e3a6
authored
5 years ago
by
Juan Escobar
Browse files
Options
Downloads
Patches
Plain Diff
Juan 01/04/2020:For Irene-fr/Skylake/Knl, update configure to use oenmpi/2.0.4
parent
efba5fa0
No related branches found
Branches containing commit
Tags
PACK-MNH-V5-4-3
Tags containing commit
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
A-INSTALL
+10
-4
10 additions, 4 deletions
A-INSTALL
src/configure
+3
-3
3 additions, 3 deletions
src/configure
src/job_make_examples_BullX_irene
+5
-5
5 additions, 5 deletions
src/job_make_examples_BullX_irene
with
18 additions
and
12 deletions
A-INSTALL
+
10
−
4
View file @
1701e3a6
...
@@ -602,12 +602,12 @@ sbatch job_make_examples_BullX_occigen
...
@@ -602,12 +602,12 @@ sbatch job_make_examples_BullX_occigen
# but with a commun disk space , connect to :
# but with a commun disk space , connect to :
#
#
# - ssh irene-fr : for Intel SkyLake/KNL processors
# - ssh irene-fr : for Intel SkyLake/KNL processors
# On Intel processors the MPI use is
INTELMPI
# On Intel processors the MPI use is
OPENMPI/2.0.4
# the configure will generate a
# the configure will generate a
# profile_mesonh-LXifort-R8I4-MNH-V5-4-3-MPI
INTEL
-O2
# profile_mesonh-LXifort-R8I4-MNH-V5-4-3-MPI
AUTO
-O2
#
#
# - ssh irene-amd : for AMD , processors
# - ssh irene-amd : for AMD , processors
# On AMD processors the MPI use is OPENMPI
# On AMD processors the MPI use is OPENMPI
/4.02
# the configure will generate a
# the configure will generate a
# profile_mesonh-LXifort-R8I4-MNH-V5-4-3-AMD-MPIAUTO-O2
# profile_mesonh-LXifort-R8I4-MNH-V5-4-3-AMD-MPIAUTO-O2
#
#
...
@@ -616,8 +616,14 @@ sbatch job_make_examples_BullX_occigen
...
@@ -616,8 +616,14 @@ sbatch job_make_examples_BullX_occigen
#
#
# - to run the test case examples run
# - to run the test case examples run
#
# On intel Skylake
ccc_msub job_make_examples_BullX_irene
# On intel Knl
ccc_msub -q knl job_make_examples_BullX_irene
ccc_msub job_make_examples_BullX_irene{_AMD}
# On intel AMD
ccc_msub job_make_examples_BullX_irene_AMD
#
#
# at ECMWF on cca ( CRAY/XC30 ) :
# at ECMWF on cca ( CRAY/XC30 ) :
...
...
This diff is collapsed.
Click to expand it.
src/configure
+
3
−
3
View file @
1701e3a6
...
@@ -123,16 +123,15 @@ module load ncl_ncarg/6.3.0
...
@@ -123,16 +123,15 @@ module load ncl_ncarg/6.3.0
*
Intel
*
)
# Irene Intel core
*
Intel
*
)
# Irene Intel core
export
MNH_ARCH
=
`
echo
$ARCH
|
grep
LX
`
export
MNH_ARCH
=
`
echo
$ARCH
|
grep
LX
`
export
ARCH
=
${
MNH_ARCH
:-
LXifort
}
export
ARCH
=
${
MNH_ARCH
:-
LXifort
}
export
VER_MPI
=
${
VER_MPI
:-
MPI
INTEL
}
export
VER_MPI
=
${
VER_MPI
:-
MPI
AUTO
}
export
OPTLEVEL
=
${
OPTLEVEL
:-
O2
}
export
OPTLEVEL
=
${
OPTLEVEL
:-
O2
}
export
MVWORK
=
${
MVWORK
:-
NO
}
export
MVWORK
=
${
MVWORK
:-
NO
}
export
VER_CDF
=
${
VER_CDF
:-
CDFAUTO
}
export
VER_CDF
=
${
VER_CDF
:-
CDFAUTO
}
export
MNHENV
=
${
MNHENV
:-
"
export
MNHENV
=
${
MNHENV
:-
"
module purge
module purge
module load intel/18.0.3.222
module load intel/18.0.3.222
module load mpi/
intelmpi/2018.0.3.222
module load mpi/
openmpi/2.0.4
export SLURM_CPU_BIND=none
export SLURM_CPU_BIND=none
export I_MPI_PIN_PROCESSOR_LIST=all:map=spread
"
}
"
}
;;
;;
*
AMD
*
)
# Irene AMD core
*
AMD
*
)
# Irene AMD core
...
@@ -147,6 +146,7 @@ export I_MPI_PIN_PROCESSOR_LIST=all:map=spread
...
@@ -147,6 +146,7 @@ export I_MPI_PIN_PROCESSOR_LIST=all:map=spread
module purge
module purge
module load intel/19.0.5.281
module load intel/19.0.5.281
module load mpi/openmpi/4.0.2
module load mpi/openmpi/4.0.2
export SLURM_CPU_BIND=none
# Set some openmpi variable for pb with nb of cores >> 1024
# Set some openmpi variable for pb with nb of cores >> 1024
export OMPI_MCA_coll_hcoll_enable=0
export OMPI_MCA_coll_hcoll_enable=0
export HCOLL_ENABLE_MCAST_ALL=0
export HCOLL_ENABLE_MCAST_ALL=0
...
...
This diff is collapsed.
Click to expand it.
src/job_make_examples_BullX_irene
+
5
−
5
View file @
1701e3a6
#!/bin/bash
#!/bin/bash
#MSUB -J Examples
#MSUB -J Examples
#MSUB -N 2 # nodes number
#MSUB -N 2 # nodes number
#MSUB -n 4 # CPUs number (on all nodes)
#MSUB -n 4 # CPUs number (on all nodes)
#MSUB --exclusive
#MSUB -o Examples.eo%j #
#MSUB -o Examples.eo%j #
#MSUB -e Examples.eo%j #
#MSUB -e Examples.eo%j #
#MSUB -T 3600 # time limit
#MSUB -T 3600 # time limit
...
@@ -11,6 +10,7 @@
...
@@ -11,6 +10,7 @@
#MSUB -x # exclusive usage of nodes
#MSUB -x # exclusive usage of nodes
#MSUB -X # enable X forwarding -> force 'ccc_msub -i ' to get it work
#MSUB -X # enable X forwarding -> force 'ccc_msub -i ' to get it work
#MSUB -q skylake
#MSUB -q skylake
##MSUB -q knl
# Echo des commandes
# Echo des commandes
ulimit
-c
0
ulimit
-c
0
...
@@ -21,14 +21,14 @@ set +x
...
@@ -21,14 +21,14 @@ set +x
# Nom de la machine
# Nom de la machine
hostname
hostname
.
../conf/profile_mesonh-LXifort-R8I4-MNH-V5-4-3-MPI
INTEL
-O2
.
../conf/profile_mesonh-LXifort-R8I4-MNH-V5-4-3-MPI
AUTO
-O2
set
-x
set
-x
module list
module list
export
MONORUN
=
"
Exec ccc_mprun
-n 1 "
export
MONORUN
=
"
Mpirun -tag-output --report-bindings
-n
p
1 "
export
MPIRUN
=
"
Exec ccc_mprun
-n 4 "
export
MPIRUN
=
"
Mpirun -tag-output --report-bindings
-n
p
4 "
export
POSTRUN
=
"echo "
export
POSTRUN
=
"echo "
cd
$SRC_MESONH
/MY_RUN/KTEST/003_KW78
cd
$SRC_MESONH
/MY_RUN/KTEST/003_KW78
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment