# AMBER Example Batch Script for Ruby # Amber 16 Inter-node multi-GPU pmemd.cuda.MPI job. # This type of job is only sensible for REMD calculations. # #PBS -N jac9999multinodecudampi.ruby #PBS -j oe #PBS -m ae #PBS -M scott@osc.edu #PBS -l walltime=0:10:00 # ppn should be >= gpus ##PBS -l nodes=2:ppn=1:gpus=1,advres=new-kernel-testing.776904 #PBS -l nodes=2:ppn=20:gpus=1 #PBS -S /bin/csh set echo set verbose # emit verbose details on the job's queuing. qstat -f $PBS_JOBID module load amber/16 module load cuda/8.0.44 module list echo "AMBERHOME=$AMBERHOME" # # PBS_O_WORKDIR refers to the directory from which the job was submitted. echo "PBS_O_WORKDIR=$PBS_O_WORKDIR" cd $TMPDIR # # The file names below may need to be changed set MDIN=mdin9999 set MDOUT=mdout9999 set MDINFO=mdinfo set PRMTOP=prmtop set INPCRD=inpcrd.equil set REFC=refc set MDCRD=mdcrd set MDVEL=mdvel set MDEN=mden set RESTRT=restrt cp -p $PBS_O_WORKDIR/$MDIN . cp -p $PBS_O_WORKDIR/$PRMTOP . cp -p $PBS_O_WORKDIR/$INPCRD . cp -p $PBS_O_WORKDIR/$REFC . #cp -p $PBS_O_WORKDIR/$RESTRT . # # pmemd.cuda.MPI uses CUDA_VISIBLE_DEVICES to select an available gpu; # this is not necessary on Ruby which has only 1 gpu per node #setenv CUDA_VISIBLE_DEVICES "0,1" # # These commands report the status of the GPUs; this is sometimes useful # for detecting if other batch jobs are using the GPUs properly. pdsh nvidia-smi # set ngpus=`cat $PBS_GPUFILE|wc -l` cat $PBS_GPUFILE # Some jobs may require the -O option on the Amber command lines below mpiexec -n $ngpus pmemd.cuda.MPI -i $MDIN -o $MDOUT -inf $MDINFO -p $PRMTOP -c $INPCRD -ref $REFC -x $MDCRD -v $MDVEL -e $MDEN -r $RESTRT ls -al cp -p $MDOUT $MDINFO $MDCRD $MDVEL $MDEN $RESTRT $PBS_O_WORKDIR cat $MDOUT