Ci-dessous, les différences entre deux révisions de la page.
Les deux révisions précédentes Révision précédente Prochaine révision | Révision précédente | ||
logiciels:amber [2014/10/14 11:06] montap01 [Amber] |
logiciels:amber [2016/06/10 15:08] (Version actuelle) 164.81.157.90 |
||
---|---|---|---|
Ligne 1: | Ligne 1: | ||
====== Amber ====== | ====== Amber ====== | ||
/* Description */ | /* Description */ | ||
- | //Assisted Model Building with Energy Refinement// | + | //Assisted Model Building with Energy Refinement// |
* Site web : http:// | * Site web : http:// | ||
Ligne 8: | Ligne 8: | ||
* Amber (payant) : v12 avec AmberTools v13 | * Amber (payant) : v12 avec AmberTools v13 | ||
- | FIXME installation | + | La compilation a été faite avec le support OpenMP (utilisable pour certains programmes comme '' |
+ | |||
+ | ===== Citation ===== | ||
+ | Si vous utilisez le support GPU : vous devez citer des articles dans vos publications. Voir http:// | ||
===== Utilisation ===== | ===== Utilisation ===== | ||
==== Sélection de la version ==== | ==== Sélection de la version ==== | ||
Pour sélectionner la version voulue : utiliser les [[..: | Pour sélectionner la version voulue : utiliser les [[..: | ||
- | Par exemple | + | Pour utiliser Amber 12 avec AmberTools 13 : |
- | module load amber | + | module load amber/12 |
+ | |||
+ | Pour utiliser Amber 12 avec AmberTools 14 : | ||
+ | module load amber/ | ||
==== Travailler avec slurm ==== | ==== Travailler avec slurm ==== | ||
/* Faut-il utiliser des fichiers de commandes spéciaux ? */ | /* Faut-il utiliser des fichiers de commandes spéciaux ? */ | ||
/* Exemples de batch spécifiques */ | /* Exemples de batch spécifiques */ | ||
+ | |||
+ | Si vous utilisez un des programmes Amber parallélisé via OpenMP, vous devez suivre l' | ||
+ | |||
+ | ==== Version GPU ==== | ||
+ | |||
+ | Une version GPU est compilée (http:// | ||
+ | |||
+ | Vous devez charger en plus le module CUDA version 5.0 : | ||
+ | module load nvidia/ | ||
+ | |||
+ | ===== Exemples ===== | ||
+ | |||
+ | ==== Serial ==== | ||
+ | |||
+ | |||
+ | #!/bin/bash | ||
+ | #SBATCH --nodes=1 | ||
+ | #SBATCH --ntasks=1 | ||
+ | #SBATCH --time=02: | ||
+ | #SBATCH --partition=normal | ||
+ | #SBATCH --cpus-per-task=1 | ||
+ | #SBATCH --mem-per-cpu=2000 | ||
+ | topname=del | ||
+ | crdname=del | ||
+ | CURR=$(pwd) | ||
+ | RESTART=YES | ||
+ | STEP=0 | ||
+ | FINAL=1 | ||
+ | module load amber/ | ||
+ | # MD are performed in Scratch | ||
+ | # Export running directectory | ||
+ | export RUN_DIR=" | ||
+ | # Link the scratch directory to the current directory | ||
+ | # Link name is related to JOB ID and NOT TO JOBNAME --> Prevent issues related to new MDs with same name | ||
+ | ln -sfn ${RUN_DIR} ${SLURM_JOB_ID}.results | ||
+ | echo ${RUN_DIR} | ||
+ | # Create Scratch directory | ||
+ | mkdir -p ${RUN_DIR} | ||
+ | # Copy Topology and CRD names into Scratch directory | ||
+ | cp ${topname}.prmtop *.in ${RUN_DIR} | ||
+ | # Copy starting CRD files according to restart procedure or not | ||
+ | if [ $RESTART = ' | ||
+ | cp ${topname}_md${STEP}.rst ${RUN_DIR}/ | ||
+ | else | ||
+ | STEP=0 | ||
+ | cp ${topname}.inpcrd ${RUN_DIR}/ | ||
+ | fi | ||
+ | # Enter into Scratch directory | ||
+ | cd ${RUN_DIR} | ||
+ | # Run calculation | ||
+ | # Keep in mind: Never run a long MD into a single trajectory file - Split it into several trajectories | ||
+ | for i in `seq $STEP 1 $FINAL` | ||
+ | do | ||
+ | j=$(( $i + 1 )) | ||
+ | # Run MD | ||
+ | pmemd -O -i md.in -o ${crdname}_md${j}.out -p $topname.prmtop -c ${crdname}_md${i}.rst -r ${crdname}_md${j}.rst -x ${crdname}_${j}.mdcrd | ||
+ | # Compress mdcrd file to prevent storage issues and remove the uncompressed trajectory | ||
+ | tar -zcvf ${crdname}_${j}.mdcrd.tar.gz ${crdname}_${j}.mdcrd | ||
+ | rm -rf ${crdname}_${j}.mdcrd | ||
+ | done | ||
+ | |||
+ | ==== MPI ==== | ||
+ | |||
+ | #!/bin/bash | ||
+ | #SBATCH --nodes=1-X | ||
+ | #SBATCH --ntasks=X | ||
+ | #SBATCH --time=XX: | ||
+ | #SBATCH --partition=normal | ||
+ | #SBATCH --cpus-per-task=1 | ||
+ | #SBATCH --mem-per-cpu=XXX | ||
+ | topname=del | ||
+ | crdname=del | ||
+ | CURR=$(pwd) | ||
+ | RESTART=YES | ||
+ | STEP=0 | ||
+ | FINAL=1 | ||
+ | module load amber/ | ||
+ | # MD are performed in Scratch | ||
+ | # Export running directectory | ||
+ | export RUN_DIR=" | ||
+ | # Link the scratch directory to the current directory | ||
+ | # Link name is related to JOB ID and NOT TO JOBNAME --> Prevent issues related to new MDs with same name | ||
+ | ln -sfn ${RUN_DIR} ${SLURM_JOB_ID}.results | ||
+ | echo ${RUN_DIR} | ||
+ | # Create Scratch directory | ||
+ | mkdir -p ${RUN_DIR} | ||
+ | # Copy Topology and CRD names into Scratch directory | ||
+ | cp ${topname}.prmtop *.in ${RUN_DIR} | ||
+ | # Copy starting CRD files according to restart procedure or not | ||
+ | if [ $RESTART = ' | ||
+ | cp ${topname}_md${STEP}.rst ${RUN_DIR}/ | ||
+ | else | ||
+ | STEP=0 | ||
+ | cp ${topname}.inpcrd ${RUN_DIR}/ | ||
+ | fi | ||
+ | # Enter into Scratch directory | ||
+ | cd ${RUN_DIR} | ||
+ | # Run calculation | ||
+ | # Keep in mind: Never run a long MD into a single trajectory file - Split it into several trajectories | ||
+ | for i in `seq $STEP 1 $FINAL` | ||
+ | do | ||
+ | j=$(( $i + 1 )) | ||
+ | # Run MD | ||
+ | srun premed.MPI -O -i md.in -o ${crdname}_md${j}.out -p $topname.prmtop -c ${crdname}_md${i}.rst -r ${crdname}_md${j}.rst -x ${crdname}_${j}.mdcrd | ||
+ | # Compress mdcrd file to prevent storage issues and remove the uncompressed trajectory | ||
+ | tar -zcvf ${crdname}_${j}.mdcrd.tar.gz ${crdname}_${j}.mdcrd | ||
+ | rm -rf ${crdname}_${j}.mdcrd | ||
+ | done | ||
+ | |||
+ | ==== GPU ==== | ||
+ | |||
+ | #!/bin/bash | ||
+ | #SBATCH --nodes=1 | ||
+ | #SBATCH --time=02: | ||
+ | #SBATCH --partition=gpu | ||
+ | #SBATCH --cpus-per-task=1 | ||
+ | #SBATCH --gres=gpu | ||
+ | topname=del | ||
+ | crdname=del | ||
+ | CURR=$(pwd) | ||
+ | RESTART=YES | ||
+ | STEP=0 | ||
+ | FINAL=1 | ||
+ | module load amber/ | ||
+ | module load nvidia/ | ||
+ | # MD are performed in Scratch | ||
+ | # Export running directectory | ||
+ | export RUN_DIR=" | ||
+ | # Link the scratch directory to the current directory | ||
+ | # Link name is related to JOB ID and NOT TO JOBNAME --> Prevent issues related to new MDs with same name | ||
+ | ln -sfn ${RUN_DIR} ${SLURM_JOB_ID}.results | ||
+ | echo ${RUN_DIR} | ||
+ | # Create Scratch directory | ||
+ | mkdir -p ${RUN_DIR} | ||
+ | # Copy Topology and CRD names into Scratch directory | ||
+ | cp ${topname}.prmtop *.in ${RUN_DIR} | ||
+ | # Copy starting CRD files according to restart procedure or not | ||
+ | if [ $RESTART = ' | ||
+ | cp ${topname}_md${STEP}.rst ${RUN_DIR}/ | ||
+ | else | ||
+ | STEP=0 | ||
+ | cp ${topname}.inpcrd ${RUN_DIR}/ | ||
+ | fi | ||
+ | # Enter into Scratch directory | ||
+ | cd ${RUN_DIR} | ||
+ | # Run calculation | ||
+ | # Keep in mind: Never run a long MD into a single trajectory file - Split it into several trajectories | ||
+ | for i in `seq $STEP 1 $FINAL` | ||
+ | do | ||
+ | j=$(( $i + 1 )) | ||
+ | # Run MD | ||
+ | pmemd.cuda -O -i md.in -o ${crdname}_md${j}.out -p $topname.prmtop -c ${crdname}_md${i}.rst -r ${crdname}_md${j}.rst -x ${crdname}_${j}.mdcrd | ||
+ | # Compress mdcrd file to prevent storage issues and remove the uncompressed trajectory | ||
+ | tar -zcvf ${crdname}_${j}.mdcrd.tar.gz ${crdname}_${j}.mdcrd | ||
+ | rm -rf ${crdname}_${j}.mdcrd | ||
+ | done | ||
+ |