#!/bin/bash # this is a runscript for running mppnp in MPI parallel mode on a number # of machines; # if you use a machines file, copy it into the RUN directory. for example # on helix@Uvic a machines template file is in helix: /rpod2/fherwig/TOOLS # together with a mntrpod2.sh script that you need to run to make sure # that rpod2 is mounted on all participating machines # the lcoation of mpiexec should now be provided by the system, either # done through the use of modules on supercomputers or through the # PATH variable in .bashrc # (however, it seems we still need to call this with a full path name # and we try to understand why) PPN_DIR=/nfs/rpod2/critter/PPN/MPPNP_set1extension/ MPIEX=/rpod2/opt/helix_falk/openmpi-1.4/bin/mpiexec EXE=mppnp.exe FULL_EXE=$PPN_DIR/mppnp/CODE/$EXE #FULL_EXE=$PPN_DIR/mppnp/CODE_original/$EXE # create output directories if needed [ -d H5_surf ] || mkdir H5_surf [ -d H5_out ] || mkdir H5_out [ -d H5_restart ] || mkdir H5_restart #how many procs: NPROC=54 #62 #lascar for copu mixburn only # how nice are you? NVAL=19 #What is the name of your machines file, NONE for no machines file MACHINEFILE=NONE MACHINEFILE=machines #set to allow unlimited memory ulimit -s unlimited # now just fire up run.sh; you should not need to change anything # below if [ $MACHINEFILE = 'NONE' ] then #$MPIEX --display-map --verbose -np $NPROC nice -n $NVAL $FULL_EXE > out 2>err.log & $MPIEX --display-map --verbose -np $NPROC xterm -e gdb $FULL_EXE echo $MPIEX --display-map --verbose -np $NPROC xterm -e gdb $FULL_EXE else $MPIEX -np $NPROC -hostfile $MACHINEFILE nice -n $NVAL $FULL_EXE > out 2> err.log & fi