#!/bin/bash # Submission script for GridEngine (GE). Each job will # be executed via the jobScript.sh # This jobScript supports up to 7 parameters. Edit # the user specific part of the script according to # your program. # # Input to the script is a filelist with 1 file per line. # For each file a job is started. With the parameter # nFilesPerJob a comma separated filelist will be # generated and handed to the job script. This feature # is usefull when running many small jobs. Each # job has its own logfile. All needed directories for the # logfiles will be created if non existing. # # IMPORTANT: the hera/prometheus cluster jobs will only # see the /hera file system. All needed scripts, programs # and parameters have to be located on /hera or the job # will crash. This script syncs your working dir to the submission # dir on /hera . Make sure your scripts use the submission dir! # Software should be taken from /cvmfs/hades.gsi.de/install/ # # job log files will be named like inputfiles. If nFilesPerJob > 1 # the log files will contain the partnumber. # ###################################################################### # CONFIGURATION user=$(whoami) currentDir=$(pwd) # Pluto parameters beamenergy=3.5 # beam energy in GeV temp=0.08 # Temperature in GeV blast=0.3 # radial expansion velocity sigma=0.8 # width of dN/dY distribution . ../pluto_table.sh # decay from the table below #if [[ -z ${de} ]] ; then if [ "$#" -eq "1" ] ; then de=$1 else de=10 fi # Mother particle will decay either into dielectron or dimuon pair # plus possibly a third particle ('dalitz'). # Particle names must be as in PStdData.cc mother=${arr_pluto[$de]} dimuon=${arr_dimuon[$de]} # 0: decay into e+/e-, 1: decay into mu+/mu- dalitz=${arr_dalitz[$de]} # pluto particle name for Dalitz decay. "no" if there should be no Dalitz # Model: # first bit: Breit Wigner distribution # second bit: VDM distribution (~1/M^3) # third bit: Boltzmann distribution (~e^(-E/kT)) # fourth bit: 1: use ee cutoff instead of 2 pion cutoff model=${arr_model[$de]} # used to generate output directories and file names system=auau type=${arr_dir[$de]} # omega, phi, pi0, rho0 decay=${arr_decay[$de]} energy=${beamenergy}gev exec=sim_dilep # name of executable to generate Pluto files nfiles=5000 nEvtsPerFile=1000 submmissionbase=/hera/hades/user/${user}/pluto/macros submissiondir=${submmissionbase}/${exec} nFilesPerJob=500 # number of files to be analyzed by 1 job (default==1) jobscript=${submissiondir}/jobScript.sh # exec script (full path, call without dot, set it executable!) outputdir=/hera/cbm/users/ekrebs/pluto/cktA/${energy}/${type}/${decay} # outputdir for files AND logFiles pathoutputlog=${outputdir}/out # protocol from batch farm for each file filename=pluto.${system}.${energy}.${type}.${decay} # filename of log file if nFilesPerJob > 1 (partnumber will be appended) par1=${submissiondir}/config_plutodev.sh # optional par1 : environment script par2=${submissiondir}/${exec} # optional par2 : executable par3=${outputdir} # optional par3 : outputdir par4="" # optional par4 : outputfile (from file list) par5=${nEvtsPerFile} # optional par5 : number of events par6=${beamenergy} # optional par6 : Pluto par. Beamenergy in Gev par7=${temp} # optional par7 : Pluto par. Temperature in GeV par8=${blast} # optional par8 : Pluto par. Radial expansion velocity par9=${model} # optional par9 par10=${mother} # optional par10: mother particle of reaction par11=${dalitz} # optional par11: particle in Dalitz decays par12=${dimuon} # optional par12: decay into dielectron or dimuon par13=${sigma} # width of y-distribution resources="-l h_rt=10:0:0,h_vmem=2G" # runtime < 10h, mem < 2GB filelist=${currentDir}/all_files.list # file list in local dir! not in submissiondir!!! createList=yes # (yes/no) use this to create files list with generic names (for simulation, testing) # use "no" if you have a filelist available ###################################################################### #--------------------------------------------------------------------- # create a file list for submission (simulation, testing etc.) # for real data you will have a filelist with real filenames if [ "$createList" == "yes" ] then if [ -f $filelist ] then echo "===> REMOVING EXISTING FILELIST : $filelist" rm -f $filelist fi echo "===> CREATE FILELIST : $filelist" for ((ct=1;ct<=$nfiles;ct++)) do num=$(printf "%04i\n" $ct) echo ${filename}.${num} >> $filelist done fi #--------------------------------------------------------------------- nFiles=$( cat $filelist | wc -l) #--------------------------------------------------------------------- # create needed dirs if [ ! -d $submmissionbase ] then echo "===> CREATE SUBMISSIONBASEDIR : $submmissionbase" mkdir -p $submmissionbase else echo "===> USE SUBMISSIONBASEDIR : $submmissionbase" fi if [ ! -d $pathoutputlog ] then echo "===> CREATE LOGDIR : $pathoutputlog" mkdir -p $pathoutputlog else echo "===> USE LOGDIR : $pathoutputlog" fi #--------------------------------------------------------------------- #--------------------------------------------------------------------- # sync the local modified stuff # to the submission dir echo "===> SYNC CURENTDIR TO SUBMISSIONDIR : rsync -vHaz $currentDir ${submmissionbase}" rsync -vHaz $currentDir ${submmissionbase}/ syncStat=$? if [ ! $syncStat -eq 0 ] then echo "===> ERROR : SYNCHRONIZATION ENCOUNTERED PROBLEMS" fi echo "-------------------------------------------------" #--------------------------------------------------------------------- ctF=0 # counter for file number ctJ=0 # counter for job number partNumber=0 # counter for part number #--------------------------------------------------------------------- # read the files list into an job array declare -a jobarray ct1=0 for file in $(cat $filelist) do jobarray[$ct1]=$file ((ct1+=1)) done #--------------------------------------------------------------------- #--------------------------------------------------------------------- # loop over the job array and submit parts with # nFilesPerJob to GE while ((ctF<$nFiles)) do #--------------------------------------------------------------------- # build comma separated file list # per job if [ $nFilesPerJob -gt 1 ] then infileList=${jobarray[${ctF}]} ((ctF+=1)) for (( ctList=1;ctList<$nFilesPerJob; ctList++ )) do if [ $ctF -lt ${nFiles} ] then infileList="${infileList},${jobarray[${ctF}]}" ((ctF+=1)) fi done else infileList=${jobarray[${ctF}]} ((ctF+=1)) fi #--------------------------------------------------------------------- ((partNumber+=1)) logfile="${pathoutputlog}/${filename}_${partNumber}.log" if [ $nFilesPerJob -eq 1 ] then file=$(basename ${infileList}) logfile="${pathoutputlog}/${file}.log" fi if [ -f ${logfile} ] then rm -f ${logfile} fi echo "-----------------------------------------------------------------------------" echo "add part ${partNumber} last file ${ctF} of $nFiles ====> add new job ${infileList}" ###################################################################### # SEND NEW JOB (USER SPECIFIC) par4=${infileList} command="-j y -wd ${submissiondir} ${resources} -o ${logfile} \ ${jobscript} ${par1} ${par2} ${par3} ${par4} ${par5} ${par6} ${par7} ${par8} ${par9} ${par10} ${par11} ${par12} ${par13}" #jobscript.sh defall.sh prog outdir outfile nev echo qsub ${command} if [ ! $syncStat -eq 0 ] then echo "===> ERROR : SYNCHRONIZATION ENCOUNTERED PROBLEMS" else #echo ${command} qsub ${command} fi ###################################################################### done #---------------------------------------------------------------------