2 #------------------------------------------------------------------
4 # Purpose: A general purpose larsoft batch worker script.
6 # Adapted from condor_lBdetMC.sh by E. Church.
10 # condor_lar.sh [options]
14 # -c, --config <arg> - Configuration (fcl) file (required).
15 # -s, --source <arg> - Input file (full path).
16 # -S, --source-list <arg> - Input file list (full path, one per line).
17 # -o, --output <arg> - Output file name.
18 # -T, --TFileName <arg> - TFile output file name
19 # -n, --nevts <arg> - Number of events to process.
20 # --nskip <arg> - Number of events to skip.
21 # --nfile <arg> - Number of files to process per worker.
22 # --nfile_skip <arg> - Number of files to skip (use with option -S).
23 # --inputmode <arg> - Input mode ('textfile' or '', default '')
24 # --args <args...> - Arguments for lar command line (place at end).
26 # Sam and parallel project options.
28 # --sam_user <arg> - Specify sam user (default $GRID_USER).
29 # --sam_group <arg> - Specify sam group (default --group option).
30 # --sam_station <arg> - Specify sam station (default --group option).
31 # --sam_defname <arg> - Sam dataset definition name.
32 # --sam_project <arg> - Sam project name.
33 # --sam_start - Specify that this worker should be responsible for
34 # starting and stopping the sam project.
35 # --recur - Recursive input dataset (force snapshot).
36 # --sam_schema <arg> - Use this option with argument "root" to stream files using
37 # xrootd. Leave this option out for standard file copy.
38 # --njobs <arg> - Parallel project with specified number of jobs (default one).
39 # --single - Specify that the output and log directories will be emptied
40 # by the batch worker, and therefore the output and log
41 # directories will only ever contain output from a single
44 # Mix input options (second input stream).
46 # --mix_defname <arg> - Specify mix input sam dataset definition.
47 # --mix_project <arg> - Specify mix input sam project.
51 # --ups <arg> - Comma-separated list of top level run-time ups products.
52 # -r, --release <arg> - Release tag.
53 # -q, -b, --build <arg> - Release build qualifier (default "debug", or "prof").
54 # --localdir <arg> - Larsoft local test release directory (default none).
55 # --localtar <arg> - Tarball of local test release.
56 # --mrb - Ignored (for compatibility).
57 # --srt - Exit with error status (SRT run time no longer supported).
61 # -h, --help - Print help.
62 # -i, --interactive - For interactive use.
63 # -g, --grid - Be grid-friendly.
64 # --group <arg> - Group or experiment (required).
65 # --workdir <arg> - Work directory (required).
66 # --outdir <arg> - Output directory (required).
67 # --logdir <arg> - Log directory (required).
68 # --scratch <arg> - Scratch directory (only for interactive).
69 # --cluster <arg> - Job cluster (override $CLUSTER)
70 # --process <arg> - Process within cluster (override $PROCESS).
71 # --procmap <arg> - Name of process map file (override $PROCESS).
72 # --init-script <arg> - User initialization script execute.
73 # --init-source <arg> - User initialization script to source (bash).
74 # --end-script <arg> - User end-of-job script to execute.
75 # --exe <arg> - Specify art-like executable (default "lar").
76 # --init <path> - Absolute path of environment initialization script.
80 # Run time environment setup.
82 # MRB run-time environmental setup is controlled by four options:
83 # --release (-r), --build (-b, -q), --localdir, and --localtar.
85 # a) Use option --release or -r to specify version of top-level product(s).
86 # b) Use option --build or -b to specify build full qualifiers (e.g.
87 # "debug:e5" or "e5:prof").
88 # c) Options --localdir or --localtar are used to specify your local
89 # test release. Use one or the other (not both).
91 # Use --localdir to specify the location of your local install
92 # directory ($MRB_INSTALL).
94 # Use --localtar to specify thye location of a tarball of your
95 # install directory (made relative to $MRB_INSTALL).
97 # Note that --localdir is not grid-friendly.
101 # 1. Each batch worker is uniquely identified by two numbers stored
102 # in environment variables $CLUSTER and $PROCESS (the latter is
103 # a small integer that starts from zero and varies for different
104 # jobs in a parallel job group). These environment variables are
105 # normally set by the batch system, but can be overridden by options
106 # --cluster, --process, and --procmap (e.g. to rerun failed jobs).
108 # 2. The work directory must be set to an existing directory owned
109 # by the submitter and readable by the batch worker. Files from the
110 # work directory are copied to the batch worker scratch directory at
111 # the start of the job.
113 # 3. The job configuration file (-c option), initialization and end-of-job
114 # scripts (optins --init-script, --init-source, --end-script) may
115 # be stored in the work directory specified by option --workdir, or they
116 # may be specified as absolute paths visible on the worker node.
118 # 4. A local test release may be specified as an absolute path using
119 # --localdir, or a tarball using --localtar. The location of the tarball
120 # may be specified as an absolute path visible on the worker, or a
121 # relative path relative to the work directory.
123 # 5. The output directory must exist and be writable by the batch
124 # worker (i.e. be group-writable for grid jobs). The worker
125 # makes a new subdirectory called ${CLUSTER}_${PROCESS} in the output
126 # directory and copies all files in the batch scratch directory there
127 # at the end of the job. If the output directory is not specified, the
128 # default is /grid/data/<group>/outstage/<user> (user is defined as
129 # owner of work directory).
131 # 6. Parallel projects are specified whenever --njobs is specified to
132 # be greater than one. Parallel projects are supported for single file,
133 # file list, and sam project input.
135 # In all cases, each worker processes some number of complete files.
136 # If the number of jobs is greater than the number of input files, some
137 # workers will not have any input files to process.
139 # In any case, options --nfile and --nevts can be used to limit the
140 # number of files or events that are processed by a single worker,
141 # regardless of the way files are divided among the workers.
143 # Option --njobs is incompatible with options --nskip, and --nfile_skip.
145 # a) Non-sam (single file or file list) input.
147 # In this case, input files are preassigned to workers such that all input
148 # files are approximately evenly divided among the workers. All files
149 # preassigned to this worker are copied to the scratch directory at the
152 # b) Sam project input.
154 # In this case, files are assigned to workers in a non-deterministic
155 # manner by the sam system. The sam system fetches input files to the
156 # scratch directory and deletes processed input files during job execution.
159 # 7. Using option -n or --nevts to limit number of events processed:
161 # a) If no input files are specified (e.g. mc generation), --nevts
162 # specifies total number of events among all workers.
164 # b) If input files are specified, --nevts specifies total number of
165 # events processed by each worker or from each input file, whichever
168 # 8. The interactive option (-i or --interactive) allows this script
169 # to be run interactively by overriding some settings that are normally
170 # obtained from the batch system, including $CLUSTER, $PROCESS, and
171 # the scratch directory. Interactive jobs always set PROCESS=0 (unless
172 # overridden by --process).
174 # 9. The grid option (-g or --grid) instructs this script to use grid-
175 # friendly tools. This means that there must be no direct access to
176 # bluearc disks. File transfers are done using gridftp or other
177 # grid-friendly protocol. Local test releases are not allowed to
178 # be specified as directories (--localdir), but may be specified as
179 # tarballs (--localtar).
181 # 10. Mix options (--mix_defname, --mix_project) are only partially handled
182 # in this script. These options are parsed and their values are stored
183 # in shell variables. It is assumed that the sam project specified
184 # by --mix_project has been started externally, unless --sam_start is
185 # also specified, in which case this script will start the project.
186 # This script does not include any provision for joining the project.
187 # Further processing of these options (joining sam project, generating
188 # command line options or fcl wrappers) should be handled by user
189 # provided initialization scripts (--init-script, --init-source).
191 # 11. Option --init <path> is optional. If specified, it should point to
192 # the absolute path of the experiment environment initialization script,
193 # which path must be visible from the batch worker (e.g. /cvmfs/...).
194 # If this option is not specified, this script will look for and source
195 # a script with hardwired name "setup_experiment.sh" in directory
196 # ${CONDIR_DIR_INPUT}.
199 # Created: H. Greenlee, 29-Aug-2012
201 #------------------------------------------------------------------
258 while [ $# -gt 0 ]; do
263 awk '/^# Usage:/,/^# End options/{print $0}' $0 | cut -c3- | head -n -2
269 if [ $# -gt 1 ]; then
277 if [ $# -gt 1 ]; then
285 if [ $# -gt 1 ]; then
293 if [ $# -gt 1 ]; then
301 if [ $# -gt 1 ]; then
309 if [ $# -gt 1 ]; then
317 if [ $# -gt 1 ]; then
323 # Number of events to skip.
325 if [ $# -gt 1 ]; then
331 # Number of files to process.
333 if [ $# -gt 1 ]; then
339 # Number of files to skip.
341 if [ $# -gt 1 ]; then
347 # Number of parallel jobs.
349 if [ $# -gt 1 ]; then
355 # Single worker mode.
362 if [ $# -gt 1 ]; then
370 if [ $# -gt 1 ]; then
378 if [ $# -gt 1 ]; then
384 # Sam dataset definition name.
386 if [ $# -gt 1 ]; then
395 if [ $# -gt 1 ]; then
402 # Sam start/stop project flag.
414 if [ $# -gt 1 ]; then
420 # General arguments for lar command line.
422 if [ $# -gt 1 ]; then
429 # Top level ups products (comma-separated list).
431 if [ $# -gt 1 ]; then
439 if [ $# -gt 1 ]; then
445 # Release build qualifier.
447 if [ $# -gt 1 ]; then
453 # Local test release directory.
455 if [ $# -gt 1 ]; then
461 # Local test release tarball.
463 if [ $# -gt 1 ]; then
475 echo "SRT run time environment is no longer supported."
491 if [ $# -gt 1 ]; then
499 if [ $# -gt 1 ]; then
507 if [ $# -gt 1 ]; then
515 if [ $# -gt 1 ]; then
523 if [ $# -gt 1 ]; then
531 if [ $# -gt 1 ]; then
537 # Process within cluster.
539 if [ $# -gt 1 ]; then
547 if [ $# -gt 1 ]; then
553 # User initialization script.
555 if [ $# -gt 1 ]; then
561 # User source initialization script.
563 if [ $# -gt 1 ]; then
569 # User end-of-job script.
571 if [ $# -gt 1 ]; then
577 # Declare good output root files to SAM.
582 # Run validation steps in project.py on root outputs directly in the job.
587 # Copy Output to FTS.
592 # Mix input sam dataset.
594 if [ $# -gt 1 ]; then
601 # Mix input sam project.
603 if [ $# -gt 1 ]; then
610 # Alter the output file's parentage such that it's parent(s) are from the input list OR sam process
611 --maintain_parentage )
615 # Specify alternate art-like executable.
617 if [ $# -gt 1 ]; then
623 # Specify environment initialization script path.
625 if [ $# -gt 1 ]; then
633 echo "Unknown option $1"
640 #echo "INFILE=$INFILE"
641 #echo "INLIST=$INLIST"
642 #echo "OUTFILE=$OUTFILE"
647 #echo "NFILE_SKIP=$NFILE_SKIP"
652 #echo "LOCALDIR=$LOCALDIR"
653 #echo "LOCALTAR=$LOCALTAR"
654 #echo "INTERACTIVE=$INTERACTIVE"
656 #echo "WORKDIR=$WORKDIR"
657 #echo "OUTDIR=$OUTDIR"
658 #echo "LOGDIR=$LOGDIR"
659 #echo "SCRATCH=$SCRATCH"
662 #echo "INITSCRIPT=$INITSCRIPT"
663 #echo "INITSOURCE=$INITSOURCE"
664 #echo "ENDSCRIPT=$ENDSCRIPT"
665 #echo "VALIDATE_IN_JOB=$VALIDATE_IN_JOB"
667 # Done with arguments.
669 echo "Nodename: `hostname -f`"
676 if [ x$QUAL = x ]; then
680 if [ x$SAM_GROUP = x ]; then
684 if [ x$SAM_STATION = x ]; then
688 # Standardize sam_schema (xrootd -> root).
690 if [ x$SAM_SCHEMA = xxrootd ]; then
694 # Fix for sites with newer linux kernels:
697 3.*) export UPS_OVERRIDE="-H Linux64bit+2.6-2.12";;
698 4.*) export UPS_OVERRIDE="-H Linux64bit+2.6-2.12";;
700 echo "uname -r: `uname -r`"
701 echo "UPS_OVERRIDE: $UPS_OVERRIDE"
703 # Make sure work directory is defined and exists.
705 if [ x$WORKDIR = x ]; then
706 echo "Work directory not specified."
709 if [ $GRID -eq 0 -a ! -d $WORKDIR ]; then
710 echo "Work directory $WORKDIR does not exist."
713 echo "Work directory: $WORKDIR"
717 echo "Condor dir input: $CONDOR_DIR_INPUT"
719 # Initialize experiment ups products and mrb.
721 echo "Initializing ups and mrb."
723 if [ x$INIT != x ]; then
724 if [ ! -f $INIT ]; then
725 echo "Environment initialization script $INIT not found."
728 echo "Sourcing $INIT"
731 echo "Sourcing setup_experiment.sh"
732 source ${CONDOR_DIR_INPUT}/setup_experiment.sh
735 echo PRODUCTS=$PRODUCTS
737 # Ifdh may already be setup by jobsub wrapper.
738 # If not, set it up here.
740 echo "IFDHC_DIR=$IFDHC_DIR"
741 if [ x$IFDHC_DIR = x ]; then
742 echo "Setting up ifdhc, because jobsub did not set it up."
745 echo "IFDHC_DIR=$IFDHC_DIR"
747 # Set GROUP environment variable.
750 if [ x$GRP != x ]; then
753 echo "GROUP not specified."
759 # Set options for ifdh.
761 if [ $GRID -ne 0 ]; then
763 # Figure out if this is a production job.
764 # This option is only used when copying back output.
765 # It affects the ownership of copied back files.
767 echo "X509_USER_PROXY = $X509_USER_PROXY"
768 #if ! echo $X509_USER_PROXY | grep -q Production; then
770 # IFDH_OPT="--force=$FORCE"
773 # IFDH_OPT="--force=$FORCE"
776 echo "IFDH_OPT=$IFDH_OPT"
778 # Make sure fcl file argument was specified.
780 if [ x$FCL = x ]; then
781 echo "No configuration option (-c|--config) was specified."
785 # Make sure output directory exists and is writable.
787 if [ x$OUTDIR = x ]; then
788 echo "Output directory not specified."
791 if [ $GRID -eq 0 -a \( ! -d $OUTDIR -o ! -w $OUTDIR \) ]; then
792 echo "Output directory $OUTDIR does not exist or is not writable."
795 echo "Output directory: $OUTDIR"
797 # Make sure log directory exists and is writable.
799 if [ x$LOGDIR = x ]; then
800 echo "Log directory not specified."
803 if [ $GRID -eq 0 -a \( ! -d $LOGDIR -o ! -w $LOGDIR \) ]; then
804 echo "Log directory $LOGDIR does not exist or is not writable."
807 echo "Log directory: $LOGDIR"
809 # See if we need to set umask for group write.
811 if [ $GRID -eq 0 ]; then
812 OUTUSER=`stat -c %U $OUTDIR`
813 LOGUSER=`stat -c %U $LOGDIR`
815 if [ $OUTUSER != $CURUSER -o $LOGUSER != $CURUSER ]; then
816 echo "Setting umask for group write."
821 # Make sure scratch directory is defined.
822 # For batch, the scratch directory is always $_CONDOR_SCRATCH_DIR
823 # For interactive, the scratch directory is specified by option
824 # --scratch or --outdir.
826 if [ $INTERACTIVE -eq 0 ]; then
827 SCRATCH=$_CONDOR_SCRATCH_DIR
829 if [ x$SCRATCH = x ]; then
833 if [ x$SCRATCH = x -o ! -d "$SCRATCH" -o ! -w "$SCRATCH" ]; then
834 echo "Local scratch directory not defined or not writable."
838 # Create the scratch directory in the condor scratch diretory.
839 # Copied from condor_lBdetMC.sh.
840 # Scratch directory path is stored in $TMP.
841 # Scratch directory is automatically deleted when shell exits.
843 # Do not change this section.
844 # It creates a temporary working directory that automatically cleans up all
845 # leftover files at the end.
846 TMP=`mktemp -d ${SCRATCH}/working_dir.XXXXXXXXXX`
847 TMP=${TMP:-${SCRATCH}/working_dir.$$}
849 { [[ -n "$TMP" ]] && mkdir -p "$TMP"; } || \
850 { echo "ERROR: unable to create temporary directory!" 1>&2; exit 1; }
851 trap "[[ -n \"$TMP\" ]] && { rm -rf \"$TMP\"; }" 0
854 # End of the section you should not change.
856 echo "Scratch directory: $TMP"
858 # Copy files from work directory to scratch directory.
860 echo "No longer fetching files from work directory."
861 echo "that's now done with using jobsub -f commands"
863 cp ${CONDOR_DIR_INPUT}/* ./work/
865 find . -name \*.tar -exec tar xf {} \;
866 find . -name \*.py -exec chmod +x {} \;
867 find . -name \*.sh -exec chmod +x {} \;
868 echo "Local working directoroy:"
873 # Save the hostname and condor job id.
875 hostname > hostname.txt
876 echo ${CLUSTER}.${PROCESS} > jobid.txt
878 # Set default CLUSTER and PROCESS environment variables for interactive jobs.
880 if [ $INTERACTIVE -ne 0 ]; then
881 CLUSTER=`date +%s` # From time stamp.
882 PROCESS=0 # Default zero for interactive.
885 # Override CLUSTER and PROCESS from command line options.
887 if [ x$CLUS != x ]; then
890 if [ x$PROC != x ]; then
893 if [ x$PROCMAP != x ]; then
894 if [ -f $PROCMAP ]; then
895 PROCESS=`sed -n $(( $PROCESS + 1 ))p $PROCMAP`
897 echo "Process map file $PROCMAP not found."
901 if [ x$CLUSTER = x ]; then
902 echo "CLUSTER not specified."
905 if [ x$PROCESS = x ]; then
906 echo "PROCESS not specified."
909 echo "Procmap: $PROCMAP"
910 echo "Cluster: $CLUSTER"
911 echo "Process: $PROCESS"
913 # Construct name of output subdirectory.
915 OUTPUT_SUBDIR=${CLUSTER}_${PROCESS}
916 echo "Output subdirectory: $OUTPUT_SUBDIR"
918 # Make sure fcl file exists.
920 if [ ! -f $FCL ]; then
921 echo "Configuration file $FCL does not exist."
925 # Make sure init script exists and is executable (if specified).
927 if [ x$INITSCRIPT != x ]; then
928 if [ -f "$INITSCRIPT" ]; then
931 echo "Initialization script $INITSCRIPT does not exist."
936 # Make sure init source script exists (if specified).
938 if [ x$INITSOURCE != x -a ! -f "$INITSOURCE" ]; then
939 echo "Initialization source script $INITSOURCE does not exist."
943 # Make sure end-of-job script exists and is executable (if specified).
945 if [ x$ENDSCRIPT != x ]; then
946 if [ -f "$ENDSCRIPT" ]; then
949 echo "Initialization script $ENDSCRIPT does not exist."
954 # MRB run time environment setup goes here.
956 # Setup local test release, if any.
958 if [ x$LOCALDIR != x ]; then
962 # Copy test release directory recursively.
964 echo "Copying local test release from directory ${LOCALDIR}."
966 # Make sure ifdhc is setup.
968 if [ x$IFDHC_DIR = x ]; then
969 echo "Setting up ifdhc before fetching local directory."
972 echo "IFDHC_DIR=$IFDHC_DIR"
973 ifdh cp -r $IFDH_OPT $LOCALDIR .
975 if [ $stat -ne 0 ]; then
976 echo "ifdh cp failed with status ${stat}."
979 find . -name \*.py -exec chmod +x {} \;
980 find . -name \*.sh -exec chmod +x {} \;
982 # Setup the environment.
985 echo "Initializing localProducts from ${LOCALDIR}."
986 if [ ! -f $TMP/local/setup ]; then
987 echo "Local test release directory $LOCALDIR does not contain a setup script."
990 sed "s@setenv MRB_INSTALL.*@setenv MRB_INSTALL ${TMP}/local@" $TMP/local/setup | \
991 sed "s@setenv MRB_TOP.*@setenv MRB_TOP ${TMP}@" > $TMP/local/setup.local
992 . $TMP/local/setup.local
993 #echo "MRB_INSTALL=${MRB_INSTALL}."
994 #echo "MRB_QUALS=${MRB_QUALS}."
995 echo "Setting up all localProducts."
996 if [ x$IFDHC_DIR != x ]; then
1003 # Setup local larsoft test release from tarball.
1005 if [ x$LOCALTAR != x ]; then
1009 # Fetch the tarball.
1011 echo "Fetching test release tarball ${LOCALTAR}."
1013 # Make sure ifdhc is setup.
1015 if [ x$IFDHC_DIR = x ]; then
1016 echo "Setting up ifdhc before fetching tarball."
1019 echo "IFDHC_DIR=$IFDHC_DIR"
1020 ifdh cp $LOCALTAR local.tar
1022 if [ $stat -ne 0 ]; then
1023 echo "ifdh cp failed with status ${stat}."
1027 # Extract the tarball.
1031 # Setup the environment.
1034 echo "Initializing localProducts from tarball ${LOCALTAR}."
1035 sed "s@setenv MRB_INSTALL.*@setenv MRB_INSTALL ${TMP}/local@" $TMP/local/setup | \
1036 sed "s@setenv MRB_TOP.*@setenv MRB_TOP ${TMP}@" > $TMP/local/setup.local
1037 . $TMP/local/setup.local
1038 #echo "MRB_INSTALL=${MRB_INSTALL}."
1039 #echo "MRB_QUALS=${MRB_QUALS}."
1040 echo "Setting up all localProducts."
1041 if [ x$IFDHC_DIR != x ]; then
1047 # Setup specified version of top level run time products
1048 # (if specified, and if local test release did not set them up).
1050 for prd in `echo $UPS_PRDS | tr , ' '`
1052 if ! ups active | grep -q $prd; then
1053 echo "Setting up $prd $REL -q ${QUAL}."
1054 if [ x$IFDHC_DIR != x -a x$IFBEAM_DIR = x ]; then
1057 setup $prd $REL -q $QUAL
1065 # In case mrb setup didn't setup a version of ifdhc, set up ifdhc again.
1067 if [ x$IFDHC_DIR = x ]; then
1068 echo "Setting up ifdhc again, because larsoft did not set it up."
1071 echo "IFDH_ART_DIR=$IFDH_ART_DIR"
1072 echo "IFDHC_DIR=$IFDHC_DIR"
1074 # Get input files to process, either single file, file list, or sam.
1076 # For non-sam non-xrootd input, copy all files local using ifdh cp, and make a
1077 # local file list called condor_lar_input.list. Save the remote file names (uri's)
1078 # in another file called transferred_uris.list
1080 # For non-sam xrootd input ("--sam_schema root") convert input list to xrootd uri's,
1083 rm -f condor_lar_input.list
1084 rm -f transferred_uris.list
1087 aunt_files=() #for data overaly, the data files being brought in are the output's aunts.
1089 if [ $USE_SAM -eq 0 -a x$INFILE != x ]; then
1093 # Don't allow any list-related options in single file case:
1094 # -S, --source-list, --nfile, --nfile_skip
1096 if [ x$INLIST != x -o $NFILE -ne 0 -o $NFILE_SKIP -ne 0 ]; then
1097 echo "File list options specified with single input file."
1101 #set the parent file to be the input file
1102 parent_files=("${parent_files[@]}" $INFILE)
1104 # Copy input file to scratch directoroy or convert to xrootd url.
1108 if [ x$SAM_SCHEMA = xroot ]; then
1109 XROOTD_URI=`file_to_url.sh $INFILE`
1111 if [ $XROOTD_URI != $INFILE ]; then
1112 echo $INFILE > transferred_uris.list
1113 echo $XROOTD_URI > condor_lar_input.list
1114 echo "Input xrootd uri: $XROOTD_URI"
1116 LOCAL_INFILE=`basename $INFILE`
1117 echo "Copying $INFILE"
1118 ifdh cp $INFILE $LOCAL_INFILE
1120 if [ $stat -ne 0 ]; then
1121 echo "ifdh cp failed with status ${stat}."
1124 if [ -f $LOCAL_INFILE -a $stat -eq 0 ]; then
1125 echo $INFILE > transferred_uris.list
1126 echo $LOCAL_INFILE > condor_lar_input.list
1128 echo "Error fetching input file ${INFILE}."
1133 elif [ $USE_SAM -eq 0 -a x$INLIST != x ]; then
1137 # Make sure input file list exists.
1139 if [ ! -f $INLIST ]; then
1140 echo "Input file list $INLIST does not exist."
1144 # Remember how many files are in the input file list.
1146 NFILE_TOTAL=`cat $INLIST | wc -l`
1147 echo "Input file list contains $NFILE_TOTAL total files."
1149 # Clamp the total number of files to be a maximum of NFILE * NJOBS, where
1150 # NFILE and NJOBS are specified via command line options. In project.py
1151 # terms, NFILE is <maxfilesperjob> and NOJBS is <numjobs>.
1153 MAX_TOTAL=$(( $NFILE * $NJOBS ))
1154 if [ $MAX_TOTAL -gt 0 -a $NFILE_TOTAL -gt $MAX_TOTAL ]; then
1155 NFILE_TOTAL=$MAX_TOTAL
1156 echo "Number of files to be processed will be limited to ${NFILE_TOTAL}."
1159 # If --njobs was specified, calculate how many files
1160 # to skip and process in this worker.
1162 if [ $NJOBS -ne 0 ]; then
1164 # Don't allow option --nfile_skip in this case.
1166 if [ $NFILE_SKIP -ne 0 ]; then
1167 echo "Illegal options specified with --njobs."
1171 # Clamp NJOBS to be a maximum of $NFILE_TOTAL.
1172 # This means that workers with $PROCESS >= $NFILE_TOTAL will not have
1173 # any input files to process.
1176 if [ $MYNJOBS -gt $NFILE_TOTAL ]; then
1177 MYNJOBS=$NFILE_TOTAL
1180 # Calculate number of files to skip and number of files to process.
1182 NFILE_SKIP=$(( $PROCESS * $NFILE_TOTAL / $MYNJOBS ))
1183 MYNFILE=$(( ( $PROCESS + 1 ) * $NFILE_TOTAL / $MYNJOBS - $NFILE_SKIP ))
1184 if [ $MYNFILE -eq 0 -o $NFILE_SKIP -ge $NFILE_TOTAL ]; then
1185 echo "This worker did not get any input files."
1188 if [ $MYNFILE -lt $NFILE -o $NFILE -eq 0 ]; then
1193 # Report number of files to skip and process.
1195 echo "Skipping $NFILE_SKIP files."
1196 if [ $NFILE -eq 0 ]; then
1197 echo "Processing all remaining files."
1199 echo "Processing $NFILE files."
1202 # Copy input files and construct local input file list.
1207 while read infile; do
1208 if [ $nfskip -gt 0 ]; then
1209 nfskip=$(( $nfskip - 1 ))
1212 # Retain the original file name as the local file name, if possible.
1213 # Otherwise, generate a new (hopefully) unique name.
1215 if [ ! -f condor_lar_input.list ]; then
1216 touch condor_lar_input.list
1220 if [ x$SAM_SCHEMA = xroot ]; then
1221 XROOTD_URI=`file_to_url.sh $infile`
1223 if [ $XROOTD_URI != $infile ]; then
1224 echo $infile >> transferred_uris.list
1225 echo $XROOTD_URI >> condor_lar_input.list
1226 echo "Input xrootd uri: $XROOTD_URI"
1228 LOCAL_INFILE=`basename $infile`
1229 if grep -q $LOCAL_INFILE condor_lar_input.list; then
1230 LOCAL_INFILE=input${nfile}.root
1231 if [ "$INMODE" = 'textfile' ]; then
1232 LOCAL_INFILE=input${nfile}.txt
1235 echo "Copying $infile"
1236 ifdh cp $infile $LOCAL_INFILE
1238 if [ $stat -ne 0 ]; then
1239 echo "ifdh cp failed with status ${stat}."
1242 if [ -f $LOCAL_INFILE -a $stat -eq 0 ]; then
1243 echo $infile >> transferred_uris.list
1244 echo $LOCAL_INFILE >> condor_lar_input.list
1245 parent_files=("${parent_files[@]}" $LOCAL_INFILE)
1247 echo "Error fetching input file ${infile}."
1251 nmax=$(( $nmax - 1 ))
1252 if [ $nmax -eq 0 ]; then
1256 nfile=$(( $nfile + 1 ))
1261 if [ $USE_SAM -eq 0 -a x$SAM_SCHEMA != xroot ]; then
1262 if [ -f condor_lar_input.list ]; then
1264 # Sort input list by decreasing size so we don't get a file with
1265 # zero events as the first file.
1267 #ls -S1 `cat condor_lar_input.list` > condor_lar_input.list
1268 xargs ls -s1 < condor_lar_input.list | sort -nr | awk '{print $2}' > newcondor_lar_input.list
1269 mv -f newcondor_lar_input.list condor_lar_input.list
1270 echo "Local input file list:"
1271 cat condor_lar_input.list
1272 NFILE_LOCAL=`cat condor_lar_input.list | wc -l`
1274 echo "No local input files."
1276 echo "Local input list has $NFILE_LOCAL files."
1279 #Break the master wrapper fcl into each stage
1285 if [ "$(echo $line | awk '{print $1}')" = "#---STAGE" ]; then
1286 stage="$(echo $line | awk '{print $2}')"
1287 stage_fcl="Stage$stage.fcl"
1288 nfcls=$(( $nfcls + 1 ))
1292 if [ "$line" = "#---END_STAGE" ]; then
1296 echo $line >> $stage_fcl
1299 #We now have nStage fcl files, each which need to be run serially
1302 echo "Start loop over stages"
1303 while [ $stage -lt $nfcls ]; do
1304 FCL="Stage$stage.fcl"
1306 # In case no input files were specified, and we are not getting input
1307 # from sam (i.e. mc generation), recalculate the first event number,
1308 # the subrun number, and the number of events to generate in this worker.
1309 # This also applies to the textfile inputmode.
1310 # Note this only applies to the first stage by definition
1312 if [ $stage -eq 0 -a $USE_SAM -eq 0 ] && [ $NFILE_TOTAL -eq 0 -o "$INMODE" = 'textfile' ]; then #need to ask what is going on here
1315 # Don't allow --nskip.
1317 if [ $NSKIP -gt 0 ]; then
1318 echo "Illegal option --nskip specified with no input."
1324 NSKIP=$(( $PROCESS * $NEVT / $NJOBS ))
1325 NEV=$(( ( $PROCESS + 1 ) * $NEVT / $NJOBS - $NSKIP ))
1326 FIRST_EVENT=$(( $NSKIP + 1 ))
1330 # Set subrun=$PROCESS+1 in a wrapper fcl file.
1332 SUBRUN=$(( $PROCESS + 1))
1333 cat <<EOF > subrun_wrapper.fcl
1336 source.firstSubRun: $SUBRUN
1339 if [ "$INMODE" = 'textfile' ]; then
1341 if [ $NFILE_LOCAL -ne 1 ]; then
1342 echo "Text file input mode specified with wrong number of input files."
1345 echo "physics.producers.generator.InputFileName: \"`cat condor_lar_input.list`\"" >> subrun_wrapper.fcl
1348 FCL=subrun_wrapper.fcl
1350 echo "First MC event: $FIRST_EVENT"
1351 echo "MC subrun: $SUBRUN"
1352 echo "Number of MC events: $NEVT"
1356 # Sam stuff for main input.
1360 if [ $USE_SAM -ne 0 -a $stage -eq 0 ]; then
1363 # Make sure a project name has been specified.
1365 if [ x$SAM_PROJECT = x ]; then
1366 echo "No sam project was specified."
1369 echo "Sam project: $SAM_PROJECT"
1371 # Start project (if requested).
1373 if [ $SAM_START -ne 0 ]; then
1374 if [ x$SAM_DEFNAME != x ]; then
1376 # Do some preliminary tests on the input dataset definition.
1377 # If dataset definition returns zero files at this point, abort the job.
1378 # If dataset definition returns too many files compared to --nfile, create
1379 # a new dataset definition by adding a "with limit" clause.
1381 nf=`ifdh translateConstraints "defname: $SAM_DEFNAME" | wc -l`
1382 if [ $nf -eq 0 ]; then
1383 echo "Input dataset $SAM_DEFNAME is empty."
1386 if [ $NFILE -ne 0 -a $nf -gt $NFILE ]; then
1387 limitdef=${SAM_DEFNAME}_limit_$NFILE
1389 # Check whether limit def already exists.
1390 # Have to parse commd output because ifdh returns wrong status.
1392 existdef=`ifdh describeDefinition $limitdef 2>/dev/null | grep 'Definition Name:' | wc -l`
1393 if [ $existdef -gt 0 ]; then
1394 echo "Using already created limited dataset definition ${limitdef}."
1396 ifdh createDefinition $limitdef "defname: $SAM_DEFNAME with limit $NFILE" $SAM_USER $SAM_GROUP
1398 # Assume command worked, because it returns the wrong status.
1400 echo "Created limited dataset definition ${limitdef}."
1403 # If we get to here, we know that we want to user $limitdef instead of $SAM_DEFNAME
1404 # as the input sam dataset definition.
1406 SAM_DEFNAME=$limitdef
1409 # If recursive flag, take snapshot of input dataset.
1411 if [ $RECUR -ne 0 ]; then
1412 echo "Forcing snapshot"
1413 SAM_DEFNAME=${SAM_DEFNAME}:force
1416 # Start the project.
1418 echo "Starting project $SAM_PROJECT using sam dataset definition $SAM_DEFNAME"
1419 ifdh startProject $SAM_PROJECT $SAM_STATION $SAM_DEFNAME $SAM_USER $SAM_GROUP
1420 if [ $? -eq 0 ]; then
1421 echo "Start project succeeded."
1423 echo "Start projet failed."
1428 if [ x$SAM_DEFNAME = x ]; then
1430 echo "Start project requested, but no definition was specified."
1437 # Get the project url of a running project (maybe the one we just started,
1438 # or maybe started externally). This command has to succeed, or we can't
1441 PURL=`ifdh findProject $SAM_PROJECT $SAM_STATION`
1442 if [ x$PURL = x ]; then
1443 echo "Unable to find url for project ${SAM_PROJECT}."
1446 echo "Project url: $PURL"
1449 # Start the consumer process. This command also has to succeed.
1454 # Parse fcl file to extract process_name, and use that
1455 # as the application name for starting the consumer process.
1457 APPNAME=`fhicl-dump $FCL | grep process_name: | tr -d '"' | awk '{print $2}'`
1458 if [ $? -ne 0 ]; then
1459 echo "fhicl-dump $FCL failed to run. May be missing a ups product, library, or fcl file."
1462 if [ x$APPNAME = x ]; then
1463 echo "Trouble determining application name."
1469 echo "Starting consumer process."
1470 echo "ifdh establishProcess $PURL $APPNAME $REL $NODE $SAM_USER $APPFAMILY $FCL $NFILE $SAM_SCHEMA"
1471 CPID=`ifdh establishProcess $PURL $APPNAME $REL $NODE $SAM_USER $APPFAMILY $FCL $NFILE $SAM_SCHEMA`
1472 if [ x$CPID = x ]; then
1473 echo "Unable to start consumer process for project url ${PURL}."
1476 echo "Consumer process id $CPID"
1479 # Stash away the project name and consumer process id in case we need them
1480 # later for bookkeeping.
1482 echo $SAM_PROJECT > sam_project.txt
1483 echo $CPID > cpid.txt
1487 # Sam stuff for secondary input.
1489 if [ $MIX_SAM -ne 0 ]; then
1490 echo "In Mix SAM if"
1492 # Make sure a project name has been specified.
1494 if [ x$MIX_PROJECT = x ]; then
1495 echo "No mix sam project was specified."
1498 echo "Mix project: $MIX_PROJECT"
1500 # Start mix project (if requested).
1502 if [ $SAM_START -ne 0 ]; then
1503 if [ x$MIX_DEFNAME != x ]; then
1505 echo "Starting project $MIX_PROJECT using sam dataset definition $MIX_DEFNAME"
1506 ifdh startProject $MIX_PROJECT $SAM_STATION $MIX_DEFNAME $SAM_USER $SAM_GROUP
1507 if [ $? -eq 0 ]; then
1508 echo "Start project succeeded."
1510 echo "Start projet failed."
1515 if [ x$MIX_DEFNAME = x ]; then
1517 echo "Start project requested, but no mix definition was specified."
1523 #Figure out output file names.
1524 #If outfile is not defined and we are inputing a single file or file list, follow our
1525 #convention that the output file should be %inputfilename_%systemtime_stage.root
1527 # Construct options for lar command line.
1529 LAROPT="-c $FCL --rethrow-default"
1530 echo "Laropt: $LAROPT"
1531 if [ -f condor_lar_input.list -a $stage -eq 0 ]; then
1532 if [ "$INMODE" != 'textfile' ]; then
1533 LAROPT="$LAROPT -S condor_lar_input.list" #artroot files to read in
1534 #AOUTFILE=`cat condor_lar_input.list`
1538 if [ x$OUTFILE != x ]; then
1539 LAROPT="$LAROPT -o `basename $OUTFILE .root`$stage.root"
1540 outstem=`basename $OUTFILE .root`
1543 if [ x$TFILE != x ]; then
1544 LAROPT="$LAROPT -T $TFILE"
1547 if [ $NEVT -ne 0 ]; then
1548 LAROPT="$LAROPT -n $NEVT"
1551 if [ $NSKIP -ne 0 ]; then
1552 LAROPT="$LAROPT --nskip $NSKIP"
1555 if [ $FIRST_EVENT -ne 0 ]; then
1556 LAROPT="$LAROPT -e $FIRST_EVENT"
1559 if [ x$PURL != x -a $stage -eq 0 ]; then
1560 LAROPT="$LAROPT --sam-web-uri $PURL"
1563 if [ x$CPID != x -a $stage -eq 0 ]; then
1564 LAROPT="$LAROPT --sam-process-id $CPID"
1567 if [ -n "$ARGS" ]; then
1568 LAROPT="$LAROPT $ARGS"
1571 if [ $stage -ne 0 ]; then
1572 LAROPT="$LAROPT -s $next_stage_input"
1575 # Run/source optional initialization scripts.
1577 if [ x$INITSCRIPT != x ]; then
1578 echo "Running initialization script ${INITSCRIPT}."
1579 if ! ./${INITSCRIPT}; then
1584 if [ x$INITSOURCE != x -a $stage -eq 0 ]; then
1585 echo "Sourcing initialization source script ${INITSOURCE}."
1588 if [ $status -ne 0 ]; then
1593 # Save a copy of the environment, which can be helpful for debugging.
1597 # Save a canonicalized version of the fcl configuration.
1599 fhicl-dump $FCL > cfgStage$stage.fcl
1604 echo "$EXE $LAROPT" > commandStage$stage.txt
1605 $EXE $LAROPT > larStage$stage.out 2> larStage$stage.err
1607 echo $stat > larStage$stage.stat
1608 echo "$EXE completed with exit status ${stat}."
1609 if [ $stat -ne 0 ]; then
1611 echo "tail -100 larStage$stage.out"
1613 tail -100 larStage$stage.out
1615 echo "tail -100 larStage$stage.err"
1617 tail -100 larStage$stage.err
1623 if [ $USE_SAM -ne 0 -a $stage -eq 0 ]; then
1625 # Get list of consumed files.
1627 if [ x$CPID = x -a -f cpid.txt ]; then
1630 ifdh translateConstraints "consumer_process_id $CPID and consumed_status consumed" > consumed_files.list
1632 # End consumer process.
1634 ifdh endProcess $PURL $CPID
1636 # Stop project (if appropriate).
1638 if [ $SAM_START -ne 0 ]; then
1639 echo "Stopping project."
1640 ifdh endProject $PURL
1644 #If lar returns a status other than 0, do not move on to other stages
1645 if [ $stat -ne 0 ]; then
1649 if [ $stage -ne 0 ]; then
1650 rm -rf $next_stage_input
1653 #echo `ls -t1 *.root | egrep -v 'hist|larlite|larcv' | head -n1`
1655 #echo "Outfile is $OUTFILE"
1658 next_stage_input=`ls -t1 *.root | egrep -v 'hist|larlite|larcv|TGraphs' | head -n1`
1660 mixed_file=`sam_metadata_dumper $next_stage_input | grep mixparent | awk -F ":" '{gsub("\"" ,""); gsub(",",""); gsub(" ",""); print $2}'`
1662 if [ x$mixed_file != x ]; then
1663 aunt_files=("${aunt_files[@]}" $mixed_file)
1667 FIRST_EVENT=0 #I don't think this does anything
1669 #rename the mem and time profile DBs by stage
1671 if [ -f time.db ]; then
1672 mv time.db time$stage.db
1674 if [ -f mem.db ]; then
1675 mv mem.db mem$stage.db
1682 # Setup up current version of ifdhc (may be different than version setup by larsoft).
1684 #echo "Setting up current version of ifdhc."
1685 #if [ x$IFDHC_DIR != x ]; then
1689 echo "IFDHC_DIR=$IFDHC_DIR"
1691 # Secondary sam cleanups.
1693 if [ $MIX_SAM -ne 0 ]; then
1695 # Stop project (if appropriate).
1697 if [ $SAM_START -ne 0 ]; then
1698 echo "Stopping project."
1699 MURL=`ifdh findProject $MIX_PROJECT $SAM_STATION`
1700 ifdh endProject $MURL
1704 # Delete input files.
1706 if [ $USE_SAM -eq 0 -a x$SAM_SCHEMA != xroot -a -f condor_lar_input.list ]; then
1709 done < condor_lar_input.list
1712 # Run optional end-of-job script.
1714 if [ x$ENDSCRIPT != x ]; then
1715 echo "Running end-of-job script ${ENDSCRIPT}."
1716 if ! ./${ENDSCRIPT}; then
1721 # Do root file checks.
1723 # Randomize names of root files that have a corresponding json file.
1724 # These are normally histogram files. Art files do not have external
1725 # json metadata at this point.
1727 # Also randomize the names of root files if there is no input specified
1728 # for this job (i.e. generator jobs).
1730 # Also randomize and shorten names of root files that are longer than
1734 if [ $USE_SAM -eq 0 -a x$INFILE = x -a x$INLIST = x ]; then
1738 for root in *.root; do
1739 if [ -f $root ]; then
1740 nc=`echo $root | wc -c`
1741 if [ -f ${root}.json -o $ran != 0 -o $nc -ge 200 ]; then
1742 echo "Move file 1 $root"
1743 base=`basename $root .root | cut -c1-150`_`uuidgen`
1744 echo "Move file 2 $base"
1745 mv $root ${base}.root
1746 if [ -f ${root}.json ]; then
1747 mv ${root}.json ${base}.root.json
1753 # Calculate root metadata for all root files and save as json file.
1754 # If json metadata already exists, merge with newly geneated root metadata.
1755 # Extract a subrun number, if one exists. Make remote (not necessarily unique)
1756 # and local directories for root files with identifiable subrun numbers.
1762 for root in *.root; do
1763 if [ -f $root ]; then
1765 if [ -f $json ]; then
1766 ./root_metadata.py --output="${json}2" "$root" >& /dev/null
1767 ./merge_json.py $json ${json}2 > ${json}3
1768 mv -f ${json}3 $json
1771 ./root_metadata.py --output="${json}" "$root" >& /dev/null
1773 subrun=`./subruns.py $root | awk 'NR==1{print $2}'`
1774 if [ x$subrun = x ]; then
1777 subruns[$subrun]=$subrun
1778 outdirs[$subrun]=`echo $OUTDIR | sed "s/@s/$subrun/"`
1779 echo "Output directory for subrun $subrun is ${outdirs[$subrun]}"
1781 logdirs[$subrun]=`echo $LOGDIR | sed "s/@s/$subrun/"`
1782 echo "Log directory for subrun $subrun is ${logdirs[$subrun]}"
1787 #create a master lar.stat file which contains the overall exit code of all stages
1790 while [ $stageStat -lt $nfcls ]; do
1791 stat=`cat larStage$stageStat.stat`
1792 if [[ "$stat" = 65 && $ART_VERSION < v2_01 ]]; then
1793 # Workaround TimeTracker crash bug for input files with zero events.
1794 for json in *.json; do
1795 if grep -q '"events": *"0"' $json; then
1800 overallStat=$[$stat+$overallStat]
1802 #do some cleanup of intermediate files
1803 #rm Stage$stageStat.fcl
1804 stageStat=$[$stageStat +1]
1806 echo $overallStat > lar.stat
1809 # Make local output directories for files that don't have a subrun.
1814 # Make local files group write, if appropriate.
1816 if [ $GRID -eq 0 -a $OUTUSER != $CURUSER ]; then
1822 # Stash all of the files we want to save in a local
1823 # directories with a unique name. Then copy these directories
1824 # and their contents recursively.
1826 # First move .root and corresponding .json files into one subdirectory.
1827 # Note that .root files never get replicated.
1829 for root in *.root; do
1830 if [ -f $root ]; then
1831 subrun=`./subruns.py $root | awk 'NR==1{print $2}'`
1833 if [ x$subrun = x ]; then
1838 mv ${root}.json log$subrun
1842 # Copy any remaining files into all log subdirectories.
1843 # These small files may get replicated.
1845 for outfile in *; do
1846 if [ -f $outfile ]; then
1848 for subrun in ${subruns[*]}
1850 cp $outfile log$subrun
1855 # Do validation (if requested).
1857 if [ $VALIDATE_IN_JOB -eq 1 ]; then
1858 #If SAM was used, get the parent files based on the cpid
1859 if [ $USE_SAM -ne 0 ]; then
1861 parent_files=($(ifdh translateConstraints "consumer_process_id=$id and consumed_status consumed"))
1863 if [ $stat -ne 0 ]; then
1864 echo "Failed to determine parentage."
1869 echo "The file's parents are: "
1871 for elt in ${parent_files[*]};
1876 echo "The file's aunts are: "
1877 for elt in ${aunt_files[*]};
1882 #if we are maintain the output's parentage, combine the file's parents and aunts into a flat string
1883 #this string will be interpretted by validate_in_job.py. If these are left empty, then validate_in_job will not change the file's parentage
1884 if [ $MAINTAIN_PARENTAGE -eq 1 ]; then
1885 export JOBS_PARENTS=`echo ${parent_files[*]}`
1886 export JOBS_AUNTS=`echo ${aunt_files[*]}`
1891 # Do validation function for the whole job.
1896 #./validate_in_job.py --dir $curdir/out --logfiledir $curdir/log --outdir $OUTDIR/$OUTPUT_SUBDIR --declare $DECLARE_IN_JOB --copy $COPY_TO_FTS
1900 # Do validation for each subrun.
1902 for subrun in ${subruns[*]}
1904 cd $curdir/log$subrun
1906 ./validate_in_job.py --dir $curdir/out$subrun --logfiledir $curdir/log$subrun --outdir ${outdirs[$subrun]}/$OUTPUT_SUBDIR --declare $DECLARE_IN_JOB --copy $COPY_TO_FTS
1908 valstat=$(( $valstat + $subvalstat ))
1914 # Remove duplicate files in log subdirectories, because they will cause ifdh to hang.
1916 for outfile in log/*; do
1917 for subrun in ${subruns[*]}
1919 if [ ${logdirs[$subrun]} = $LOGDIR ]; then
1920 dupfile=log$subrun/`basename $outfile`
1921 if [ -f $dupfile ]; then
1922 echo "Removing duplicate file ${dupfile}."
1929 # Make a tarball of each log directory, and save the tarball in its own log directory.
1932 #tar -cjf log0.tar -C log .
1934 for subrun in ${subruns[*]}
1937 tar -cf log.tar -C log$subrun .
1938 tar -rf log.tar -C log .
1939 mv log.tar log$subrun/log_s${subrun}.tar
1942 # Clean remote output and log directories.
1944 #if [ 1 -eq 0 ]; then
1945 # export IFDH_FORCE=$FORCE #this isn't set when running interactive, causing problems...
1948 for dir in ${LOGDIR} ${OUTDIR}
1951 echo "Make sure directory0 ${dir}/$OUTPUT_SUBDIR exists."
1953 #mkdir ${dir}/$OUTPUT_SUBDIR
1955 ./mkdir.py -v ${dir}/$OUTPUT_SUBDIR
1956 echo "Make sure directory0 ${dir}/$OUTPUT_SUBDIR is empty."
1958 ./emptydir.py -v ${dir}/$OUTPUT_SUBDIR
1960 ./mkdir.py -v ${dir}/$OUTPUT_SUBDIR
1961 echo "Directory0 ${dir}/$OUTPUT_SUBDIR clean ok."
1965 if [ $SINGLE != 0 ]; then
1966 for dir in ${logdirs[*]} ${outdirs[*]}
1968 echo "Make sure directory1 $dir exists."
1971 echo "Make sure directory1 $dir is empty."
1973 ./emptydir.py -v $dir
1975 ./mkdir.py -v $dir/$OUTPUT_SUBDIR
1976 echo "Directory1 $dir/$OUTPUT_SUBDIR clean ok."
1980 for dir in ${logdirs[*]} ${outdirs[*]}
1982 echo "Make sure directory2 ${dir}/$OUTPUT_SUBDIR exists."
1984 ./mkdir.py -v ${dir}/$OUTPUT_SUBDIR
1985 echo "Make sure directory2 ${dir}/$OUTPUT_SUBDIR is empty."
1987 ./emptydir.py -v ${dir}/$OUTPUT_SUBDIR
1989 ./mkdir.py -v ${dir}/$OUTPUT_SUBDIR
1990 echo "Directory2 ${dir}/$OUTPUT_SUBDIR clean ok."
1996 export IFDH_CP_MAXRETRIES=5
1999 #echo "ifdh cp -D $IFDH_OPT log/* ${LOGDIR}/$OUTPUT_SUBDIR"
2000 echo "ifdh cp -D $IFDH_OPT log/log*.tar ${LOGDIR}/$OUTPUT_SUBDIR"
2001 if [ "$( ls -A log )" ]; then
2002 if [ -f log/log*.tar ]; then
2004 #echo "ifdh cp -D $IFDH_OPT log/* ${LOGDIR}/$OUTPUT_SUBDIR"
2005 echo "ifdh cp -D $IFDH_OPT log/log*.tar ${LOGDIR}/$OUTPUT_SUBDIR"
2006 #ifdh cp -D $IFDH_OPT log/* ${LOGDIR}/$OUTPUT_SUBDIR
2007 ifdh cp -D $IFDH_OPT log/log*.tar ${LOGDIR}/$OUTPUT_SUBDIR
2010 if [ $stat -ne 0 ]; then
2011 echo "ifdh cp failed with status ${stat}."
2016 for subrun in ${subruns[*]}
2018 echo "ls log$subrun"
2021 #echo "ifdh cp -D $IFDH_OPT log${subrun}/* ${logdirs[$subrun]}/$OUTPUT_SUBDIR"
2022 echo "ifdh cp -D $IFDH_OPT log${subrun}/log*.tar ${logdirs[$subrun]}/$OUTPUT_SUBDIR"
2023 #ifdh cp -D $IFDH_OPT log${subrun}/* ${logdirs[$subrun]}/$OUTPUT_SUBDIR
2024 ifdh cp -D $IFDH_OPT log${subrun}/log*.tar ${logdirs[$subrun]}/$OUTPUT_SUBDIR
2027 if [ $stat -ne 0 ]; then
2028 echo "ifdh cp failed with status ${stat}."
2033 if [ $COPY_TO_FTS -eq 0 ]; then
2035 if [ "$( ls -A out )" ]; then
2036 echo "ifdh cp -D $IFDH_OPT out/* ${OUTDIR}/$OUTPUT_SUBDIR"
2037 ifdh cp -D $IFDH_OPT out/* ${OUTDIR}/$OUTPUT_SUBDIR
2039 if [ $stat -ne 0 ]; then
2040 echo "ifdh cp failed with status ${stat}."
2044 for subrun in ${subruns[*]}
2046 echo "ifdh cp -D $IFDH_OPT out${subrun}/* ${outdirs[$subrun]}/$OUTPUT_SUBDIR"
2047 ifdh cp -D $IFDH_OPT out${subrun}/* ${outdirs[$subrun]}/$OUTPUT_SUBDIR
2049 if [ $stat -ne 0 ]; then
2050 echo "ifdh cp failed with status ${stat}."
2057 if [ $statout -eq 0 ]; then
2058 statout=`cat lar.stat`
2061 if [ $statout -eq 0 ]; then