#!/bin/bash # This function writes a qsub script. We can call it with different parameter # settings to create different experiments function write_script { JOB_NAME=$(printf 'expi%02dj%dk%d' ${I} ${J} ${K}) DIR_NAME=$(printf '%s/nodes%03dnper%d' ${JOB_NAME} ${NODES} ${NPERNODE}) if [ -d $DIR_NAME ] ; then echo "$DIR_NAME already exists, skipping..." return 0 else echo "Creating job $DIR_NAME" fi mkdir -p $DIR_NAME cat << _EOF_ > ${DIR_NAME}/slurm-exp.bash #!/bin/bash #SBATCH --job-name=${JOB_NAME} #SBATCH --output=slurm.out #SBATCH --error=slurm.err #SBATCH --partition=${QUEUE} #SBATCH --nodes=${NODES} #SBATCH --ntasks-per-node=${NPERNODE} srun ${EXECUTABLE} --param1 ${I} --param2 ${J} --param3 ${K} _EOF_ chmod 775 ${DIR_NAME}/slurm-exp.bash echo "cd ${DIR_NAME}; sbatch slurm-exp.bash; cd \$BASEDIR" >> run_all_experiments.bash } # Create a script to submit all of our experiments to the scheduler echo "#!/bin/bash" > run_all_experiments.bash echo "BASEDIR=\$(dirname \$0)" >> run_all_experiments.bash chmod 775 run_all_experiments.bash # Loop through all the parameter combinations # For each combination, we'll run the experiment on one node with 1, 2, 4, and 88 processors # Then we'll run the experiment with 8 processors on 2, 4, 8, ..., 32 nodes QUEUE=parallel EXECUTABLE=/home/araim1/myexecutable for I in 5 10 20 do for (( J=2; J<=4; J++ )) do for K in 1 2 3 do NODES=1 for NPERNODE in 1 2 4 8 do write_script done NPERNODE=8 for NODES in 2 4 8 16 32 do write_script done done done done