Outils pour utilisateurs

Outils du site


logiciels:amber

Différences

Ci-dessous, les différences entre deux révisions de la page.

Lien vers cette vue comparative

Les deux révisions précédentes Révision précédente
Prochaine révision
Révision précédente
logiciels:amber [2015/07/07 18:54]
montap01 [Amber]
logiciels:amber [2016/06/10 15:08] (Version actuelle)
164.81.157.90
Ligne 27: Ligne 27:
  
 Si vous utilisez un des programmes Amber parallélisé via OpenMP, vous devez suivre l'[[:slurm:slurm-multithread | exemple Slurm pour OpenMP]] pour en tirer profit. Si vous utilisez un des programmes Amber parallélisé via OpenMP, vous devez suivre l'[[:slurm:slurm-multithread | exemple Slurm pour OpenMP]] pour en tirer profit.
 +
 +==== Version GPU ====
 +
 +Une version GPU est compilée (http://ambermd.org/gpus12/). L'exécutable s'appelle ''pmemd.cuda'' et doit être exécuté sur le neoud GPU, voir les instructions s'appliquant à CALI sur [[:gpu:utilisation | la page GPU]].
 +
 +Vous devez charger en plus le module CUDA version 5.0 : 
 +  module load nvidia/cuda/5.0
 +
 +===== Exemples =====
 +
 +==== Serial ====
 +
 +
 +  #!/bin/bash
 +  #SBATCH --nodes=1
 +  #SBATCH --ntasks=1
 +  #SBATCH --time=02:00:00
 +  #SBATCH --partition=normal
 +  #SBATCH --cpus-per-task=1
 +  #SBATCH --mem-per-cpu=2000 
 +  topname=del
 +  crdname=del
 +  CURR=$(pwd)
 +  RESTART=YES
 +  STEP=0
 +  FINAL=1
 +  module load amber/16-patched-10062016
 +  # MD are performed in Scratch 
 +  # Export running directectory
 +  export RUN_DIR="${HOME}/scratch/Amber/run.${SLURM_JOB_ID}.amber16.${topname}"
 +  # Link the scratch directory to the current directory 
 +  # Link name is related to JOB ID and NOT TO JOBNAME --> Prevent issues related to new MDs with same name
 +  ln -sfn ${RUN_DIR} ${SLURM_JOB_ID}.results
 +  echo ${RUN_DIR} 
 +  # Create Scratch directory
 +  mkdir -p ${RUN_DIR}
 +  # Copy Topology and CRD names into Scratch directory
 +  cp ${topname}.prmtop *.in ${RUN_DIR}
 +  # Copy starting CRD files according to restart procedure or not
 +  if [ $RESTART = 'YES' ]; then
 +      cp ${topname}_md${STEP}.rst ${RUN_DIR}/
 +  else
 +      STEP=0
 +      cp ${topname}.inpcrd ${RUN_DIR}/
 +  fi
 +  # Enter into Scratch directory
 +  cd ${RUN_DIR}
 +  # Run calculation 
 +  # Keep in mind: Never run a long MD into a single trajectory file - Split it into several trajectories
 +  for i in `seq $STEP 1 $FINAL`
 +  do
 +      j=$(( $i + 1 ))
 +      # Run MD
 +      pmemd -O -i md.in -o ${crdname}_md${j}.out -p $topname.prmtop -c ${crdname}_md${i}.rst -r ${crdname}_md${j}.rst -x ${crdname}_${j}.mdcrd
 +      # Compress mdcrd file to prevent storage issues and remove the uncompressed trajectory
 +      tar -zcvf ${crdname}_${j}.mdcrd.tar.gz ${crdname}_${j}.mdcrd
 +      rm -rf ${crdname}_${j}.mdcrd
 +  done
 +
 +==== MPI ====
 +
 +  #!/bin/bash
 +  #SBATCH --nodes=1-X
 +  #SBATCH --ntasks=X
 +  #SBATCH --time=XX:00:00
 +  #SBATCH --partition=normal
 +  #SBATCH --cpus-per-task=1
 +  #SBATCH --mem-per-cpu=XXX 
 +  topname=del
 +  crdname=del
 +  CURR=$(pwd)
 +  RESTART=YES
 +  STEP=0
 +  FINAL=1
 +  module load amber/16-patched-10062016
 +  # MD are performed in Scratch 
 +  # Export running directectory
 +  export RUN_DIR="${HOME}/scratch/Amber/run.${SLURM_JOB_ID}.amber16.${topname}"
 +  # Link the scratch directory to the current directory 
 +  # Link name is related to JOB ID and NOT TO JOBNAME --> Prevent issues related to new MDs with same name
 +  ln -sfn ${RUN_DIR} ${SLURM_JOB_ID}.results
 +  echo ${RUN_DIR} 
 +  # Create Scratch directory
 +  mkdir -p ${RUN_DIR}
 +  # Copy Topology and CRD names into Scratch directory
 +  cp ${topname}.prmtop *.in ${RUN_DIR}
 +  # Copy starting CRD files according to restart procedure or not
 +  if [ $RESTART = 'YES' ]; then
 +      cp ${topname}_md${STEP}.rst ${RUN_DIR}/
 +  else
 +      STEP=0
 +      cp ${topname}.inpcrd ${RUN_DIR}/
 +  fi
 +  # Enter into Scratch directory
 +  cd ${RUN_DIR}
 +  # Run calculation 
 +  # Keep in mind: Never run a long MD into a single trajectory file - Split it into several trajectories
 +  for i in `seq $STEP 1 $FINAL`
 +  do
 +      j=$(( $i + 1 ))
 +      # Run MD
 +      srun premed.MPI -O -i md.in -o ${crdname}_md${j}.out -p $topname.prmtop -c ${crdname}_md${i}.rst -r ${crdname}_md${j}.rst -x ${crdname}_${j}.mdcrd
 +      # Compress mdcrd file to prevent storage issues and remove the uncompressed trajectory
 +      tar -zcvf ${crdname}_${j}.mdcrd.tar.gz ${crdname}_${j}.mdcrd
 +      rm -rf ${crdname}_${j}.mdcrd
 +  done
 +
 +==== GPU ====
 +
 +  #!/bin/bash
 +  #SBATCH --nodes=1
 +  #SBATCH --time=02:00:00
 +  #SBATCH --partition=gpu
 +  #SBATCH --cpus-per-task=1
 +  #SBATCH --gres=gpu
 +  topname=del
 +  crdname=del
 +  CURR=$(pwd)
 +  RESTART=YES
 +  STEP=0
 +  FINAL=1
 +  module load amber/16-patched-10062016
 +  module load nvidia/cuda/7.5
 +  # MD are performed in Scratch 
 +  # Export running directectory
 +  export RUN_DIR="${HOME}/scratch/Amber/run.${SLURM_JOB_ID}.amber16.${topname}"
 +  # Link the scratch directory to the current directory 
 +  # Link name is related to JOB ID and NOT TO JOBNAME --> Prevent issues related to new MDs with same name
 +  ln -sfn ${RUN_DIR} ${SLURM_JOB_ID}.results
 +  echo ${RUN_DIR} 
 +  # Create Scratch directory
 +  mkdir -p ${RUN_DIR}
 +  # Copy Topology and CRD names into Scratch directory
 +  cp ${topname}.prmtop *.in ${RUN_DIR}
 +  # Copy starting CRD files according to restart procedure or not
 +  if [ $RESTART = 'YES' ]; then
 +      cp ${topname}_md${STEP}.rst ${RUN_DIR}/
 +  else
 +      STEP=0
 +      cp ${topname}.inpcrd ${RUN_DIR}/
 +  fi
 +  # Enter into Scratch directory
 +  cd ${RUN_DIR}
 +  # Run calculation 
 +  # Keep in mind: Never run a long MD into a single trajectory file - Split it into several trajectories
 +  for i in `seq $STEP 1 $FINAL`
 +  do
 +      j=$(( $i + 1 ))
 +      # Run MD
 +      pmemd.cuda -O -i md.in -o ${crdname}_md${j}.out -p $topname.prmtop -c ${crdname}_md${i}.rst -r ${crdname}_md${j}.rst -x ${crdname}_${j}.mdcrd
 +      # Compress mdcrd file to prevent storage issues and remove the uncompressed trajectory
 +      tar -zcvf ${crdname}_${j}.mdcrd.tar.gz ${crdname}_${j}.mdcrd
 +      rm -rf ${crdname}_${j}.mdcrd
 +  done
 +
logiciels/amber.1436288089.txt.gz · Dernière modification: 2015/07/07 18:54 de montap01