diff --git a/Background/config_test.py b/Background/config_test.py index 6c2001f5..e49a7de8 100644 --- a/Background/config_test.py +++ b/Background/config_test.py @@ -3,16 +3,16 @@ backgroundScriptCfg = { # Setup - 'inputWSDir':'cards/cards_current/data_Run2', # location of 'allData.root' file + 'inputWSDir':'cards/data_run2', # location of 'allData.root' file 'cats':'auto', # auto: automatically inferred from input ws 'catOffset':0, # add offset to category numbers (useful for categories from different allData.root files) - 'ext':'2022-11-21', # extension to add to output directory + 'ext':'2023-02-13', # extension to add to output directory 'year':'combined', # Use combined when merging all years in category (for plots) 'xvar': 'CMS_hgg_mass', # not yet used, should be passed to the C++ macros 'plotdir': 'plots', # Job submission options - 'batch':'condor', # [condor,SGE,IC,local] - 'queue':'espresso' # for condor e.g. espresso + 'batch':'Rome', # [condor,SGE,IC,Rome,local] + 'queue':'cmsan' # for condor e.g. espresso } diff --git a/Background/runBackgroundScripts.sh b/Background/runBackgroundScripts.sh index 3434762a..73c30a3f 100755 --- a/Background/runBackgroundScripts.sh +++ b/Background/runBackgroundScripts.sh @@ -92,7 +92,7 @@ done OUTDIR="outdir_${EXT}" if [[ $PLOTDIR == "" ]]; then - PLOTDIR=OUTDIR + PLOTDIR=$OUTDIR fi echo "[INFO] outdir is $OUTDIR, plotdir is $PLOTDIR INTLUMI $INTLUMI" @@ -168,7 +168,11 @@ OPT=" --isData 1" fi mkdir -p "${PLOTDIR}/bkgfTest${DATAEXT}" -cp "/afs/cern.ch/user/g/gpetrucc/php/index.php" "${PLOTDIR}/bkgfTest${DATAEXT}" +if test -f "/afs/cern.ch/user/g/gpetrucc/php/index.php"; then + cp "/afs/cern.ch/user/g/gpetrucc/php/index.php" "${PLOTDIR}/bkgfTest${DATAEXT}" +elif test -f "/cmshome/dimarcoe/php/index.php"; then + cp "/cmshome/dimarcoe/php/index.php" "${PLOTDIR}/bkgfTest${DATAEXT}" +fi echo " ./bin/fTest -i $FILE --saveMultiPdf $OUTDIR/CMS-HGG_multipdf_$EXT_$CATS.root -D $OUTDIR/bkgfTest$DATAEXT -f $CATS $OPT --year $YEAR --catOffset $CATOFFSET" ./bin/fTest -i $FILE --saveMultiPdf $OUTDIR/CMS-HGG_multipdf_$EXT_$CATS.root -D $OUTDIR/bkgfTest$DATAEXT -P $PLOTDIR/bkgfTest$DATAEXT -f $CATS $OPT --year $YEAR --catOffset $CATOFFSET diff --git a/Background/run_sequence.sh b/Background/run_sequence.sh index a2177ece..2678245b 100644 --- a/Background/run_sequence.sh +++ b/Background/run_sequence.sh @@ -1,2 +1,2 @@ -python scripts/mergeMultiYearsData.py -i cards/cards_current -o cards/cards_current/data_Run2/allData.root +#python scripts/mergeMultiYearsData.py -i cards/cards_current -o cards/cards_current/data_Run2/allData.root python RunBackgroundScripts.py --inputConfig config_test.py --mode fTestParallel diff --git a/Background/tools/submissionTools.py b/Background/tools/submissionTools.py index bde61e5d..95c641bc 100644 --- a/Background/tools/submissionTools.py +++ b/Background/tools/submissionTools.py @@ -73,7 +73,7 @@ def writeSubFiles(_opts): _fsub.close() # SGE... - if (_opts['batch'] == "IC")|(_opts['batch'] == "SGE")|(_opts['batch'] == "local" ): + if (_opts['batch'] == "IC")|(_opts['batch'] == "SGE")|(_opts['batch'] == "Rome")|(_opts['batch'] == "local" ): _executable = "sub_%s_%s"%(_opts['mode'],_opts['ext']) # Write details depending on mode @@ -102,8 +102,9 @@ def submitFiles(_opts): print " --> Finished submitting files" # SGE - elif _opts['batch'] in ['IC','SGE']: + elif _opts['batch'] in ['IC','SGE','Rome']: _executable = "sub_%s_%s"%(_opts['mode'],_opts['ext']) + _subcmd = 'bsub' if _opts['batch']=='Rome' else 'qsub' # Extract job opts jobOptsStr = _opts['jobOpts'] @@ -113,7 +114,7 @@ def submitFiles(_opts): for cidx in range(_opts['nCats']): c = _opts['cats'].split(",")[cidx] _subfile = "%s/%s_%s"%(_jobdir,_executable,c) - cmdLine = "qsub -q hep.q %s -o %s.log -e %s.err %s.sh"%(jobOptsStr,_subfile,_subfile,_subfile) + cmdLine = "%s -q %s %s -o %s.log -e %s.err %s.sh"%(_subcmd,_opts['queue'],jobOptsStr,_subfile,_subfile,_subfile) run(cmdLine) print " --> Finished submitting files" diff --git a/Combine/CollectFits.py b/Combine/CollectFits.py index 5d89bc28..04ccb67d 100644 --- a/Combine/CollectFits.py +++ b/Combine/CollectFits.py @@ -40,7 +40,13 @@ def run(cmd): if _fit.split(":")[2] == "all": _fitpois = pois else: _fitpois = _fit.split(":")[2].split(",") _name = "%s_%s"%(_fit.split(":")[0],_fit.split(":")[1]) - if opt.doObserved: _name += "_obs" + if opt.doObserved: + _name += "_obs" + mainlabel = "Observed" + else: + mainlabel = "Expected" + # add this to distinguish different fits with same POI + _name += "_"+opt.ext if( _fit.split(":")[0] == "bestfit" ): for poi in _fitpois: @@ -56,9 +62,20 @@ def run(cmd): elif( _fit.split(":")[0] == "profile1D")|( _fit.split(":")[0] == "scan1D" ): for poi in _fitpois: + if poi in ["r_ggH","r_VBF","r_top","r_VH"]: + translate_json = "pois_mu.json" + elif poi=='CMS_zz4l_fai1': + if 'ALT_0M' in opt.ext: translate_json = "pois_fa3.json" + if 'ALT_0PH' in opt.ext: translate_json = "pois_fa2.json" + if 'ALT_L1' in opt.ext: translate_json = "pois_flambda1.json" + if 'ALT_L1Zg' in opt.ext: translate_json = "pois_flambda1zgamma.json" + else: + print "Warning: unknown poi. Use r as default" + translate_json = "pois_mu.json" haddcmd = "cd runFits%s_%s; hadd -f %s_%s.root higgsCombine_%s_%s.POINTS.*.*.root; cd .."%(opt.ext,opt.mode,_name,poi,_name,poi) run(haddcmd) - plotcmd = "cd runFits%s_%s; plot1DScan.py %s_%s.root --y-cut 20 --y-max 20 -o Plots/%s_%s%s --POI %s; cd .."%(opt.ext,opt.mode,_name,poi,_name,poi,opt.ext,poi) + plotcmd = "cd runFits%s_%s; plot1DScan.py %s_%s.root --y-cut 30 --y-max 30 -o Plots/%s_%s%s --POI %s --main-label %s --translate %s/src/flashggFinalFit/Plots/%s; cd .."%(opt.ext,opt.mode,_name,poi,_name,poi,opt.ext,poi,mainlabel,os.environ['CMSSW_BASE'],translate_json) + print "plotcmd = ",plotcmd run(plotcmd) elif( _fit.split(":")[0] == "scan2D")|( _fit.split(":")[0] == "profile2D" ): diff --git a/Combine/PlotScans.py b/Combine/PlotScans.py index 0d68dde8..8c73306d 100644 --- a/Combine/PlotScans.py +++ b/Combine/PlotScans.py @@ -74,10 +74,10 @@ def run(cmd): if poi in ["r_ggH","r_VBF","r_top","r_VH"]: translate_json = "pois_mu.json" elif poi=='CMS_zz4l_fai1': - if 'ALT0M' in opt.ext: translate_json = "pois_fa3.json" - if 'ALT0PH' in opt.ext: translate_json = "pois_fa2.json" - if 'ALT0L1' in opt.ext: translate_json = "pois_flambda1.json" - if 'ALT0L1Zg' in opt.ext: translate_json = "pois_flambda1zgamma.json" + if 'ALT_0M' in opt.ext: translate_json = "pois_fa3.json" + if 'ALT_0PH' in opt.ext: translate_json = "pois_fa2.json" + if 'ALT_L1' in opt.ext: translate_json = "pois_flambda1.json" + if 'ALT_L1Zg' in opt.ext: translate_json = "pois_flambda1zgamma.json" else: print "Warning: unknown poi. Use r as default" translate_json = "pois_mu.json" diff --git a/Combine/RunFits.py b/Combine/RunFits.py index bfc2ce12..07a81289 100644 --- a/Combine/RunFits.py +++ b/Combine/RunFits.py @@ -19,7 +19,7 @@ def get_options(): parser.add_option('--doObserved', dest='doObserved', action="store_true", default=False, help="Fit to data") parser.add_option('--snapshotWSFile', dest='snapshotWSFile', default='', help="Full path to snapshot WS file (use when running observed statonly as nuisances are froze at postfit values)") parser.add_option('--commonOpts', dest='commonOpts', default="--cminDefaultMinimizerStrategy 0 --X-rtd MINIMIZER_freezeDisassociatedParams --X-rtd MINIMIZER_multiMin_hideConstants --X-rtd MINIMIZER_multiMin_maskConstraints --X-rtd MINIMIZER_multiMin_maskChannels=2", help="Common combine options for running fits") - parser.add_option('--batch', dest='batch', default='condor', help='Batch: [crab,condor/SGE/IC]') + parser.add_option('--batch', dest='batch', default='condor', help='Batch: [crab,condor/SGE/IC/lxbatch]') parser.add_option('--queue', dest='queue', default='espresso', help='Queue e.g. for condor=workday, for IC=hep.q') parser.add_option('--subOpts', dest='subOpts', default="", help="Submission options") parser.add_option('--doCustomCrab', dest='doCustomCrab', default=False, action="store_true", help="Load crab options from custom_crab.py file") @@ -65,11 +65,11 @@ def getPdfIndicesFromJson(pdfjson): if opt.subOpts != "": sub_opts += "\n%s"%opt.subOpts sub_opts += "\'" job_opts = "--job-mode condor %s"%sub_opts -elif( opt.batch == 'SGE' )|( opt.batch == 'IC' ): +elif( opt.batch == 'SGE' )|( opt.batch == 'IC' )|( opt.batch == 'lxbatch' ): sub_opts = "--sub-opts=\'-q %s"%opt.queue if opt.subOpts != "": sub_opts += " %s"%opt.subOpts sub_opts += "\'" - job_opts = "--job-mode SGE %s"%sub_opts + job_opts = "--job-mode %s %s"%(opt.batch,sub_opts) elif opt.batch == "local": print "--> Will print the commands to run combine without combineTool interactively\n\n" else: diff --git a/Combine/RunText2Workspace.py b/Combine/RunText2Workspace.py index b3133212..e2798d76 100644 --- a/Combine/RunText2Workspace.py +++ b/Combine/RunText2Workspace.py @@ -37,7 +37,7 @@ def run(cmd): fsub.write("#!/bin/bash\n\n") fsub.write("cd %s\n\n"%os.environ['PWD']) fsub.write("eval `scramv1 runtime -sh`\n\n") -fsub.write("text2workspace.py Datacard_%s.txt -o Datacard_%s.root %s %s"%(opt.ext,opt.ext,opt.common_opts,models[opt.mode])) +fsub.write("text2workspace.py Datacard_%s.txt -o Datacard_%s.root %s %s\n"%(opt.ext,opt.ext,opt.common_opts,models[opt.mode])) fsub.close() # Change permission for file @@ -58,6 +58,6 @@ def run(cmd): # Submit if opt.batch == "condor": subcmd = "condor_submit ./t2w_jobs/t2w_%s.sub"%(opt.ext) elif opt.batch == 'local': subcmd = "bash ./t2w_jobs/t2w_%s.sh"%(opt.ext) -else: subcmd = "qsub -q hep.q -l h_rt=6:0:0 -l h_vmem=24G ./t2w_jobs/t2w_%s.sh"%(opt.ext) +else: subcmd = "bsub -q cmsan -o ./t2w_jobs/t2w_%s.log -e ./t2w_jobs/t2w_%s.err ./t2w_jobs/t2w_%s.sh"%(opt.ext,opt.ext,opt.ext) if opt.dryRun: print "[DRY RUN] %s"%subcmd else: run(subcmd) diff --git a/Combine/inputs.json b/Combine/inputs.json index 09fb09d7..e2aa1de0 100644 --- a/Combine/inputs.json +++ b/Combine/inputs.json @@ -1,38 +1,32 @@ { "xsec":{ - "pois":"r_VBF", + "pois":"r_ggH,r_VBF,r_VH,r_top", "fits":"profile1D:syst:all+bestfit:syst:all", - "points":"20:1+:", - "fit_opts":"--setParameters r_ggH=1,r_top=1,r_VH=1 --setParameterRanges r_VBF=0,3:r_ggH=0.99,1.01:r_top=0.99,1.01:r_VH=0.99,1.01 --saveSpecifiedNuis all --saveInactivePOI 1 --freezeParameters MH,r_ggH,r_top,r_VH --autoBoundsPOIs 'r_VBF' --autoMaxPOIs 'r_VBF' --fastScan+--setParameters r_ggH=1,r_top=1,r_VH=1 --setParameterRanges r_VBF=0,3:r_ggH=0.99,1.01:r_top=0.99,1.01:r_VH=0.99,1.01 --saveSpecifiedNuis all --saveInactivePOI 1 --freezeParameters MH,r_ggH,r_top,r_VH --saveWorkspace" + "points":"41:1+:", + "fit_opts":"--setParameters r_ggH=1,r_top=1,r_VH=1,r_VBF=1 --setParameterRanges r_ggH=0.0,2.0:r_VBF=0.0,2.0:r_VH=0.0,3.0:r_top=0.0,4.0 --X-rtd FITTER_NEW_CROSSING_ALGO --X-rtd FITTER_BOUND --saveSpecifiedNuis all --saveInactivePOI 1 --fastScan+--setParameters r_ggH=1,r_top=1,r_VH=1,r_VBF=1 --setParameterRanges r_ggH=0.0,2.0:r_VBF=0.0,2.0:r_VH=0.0,3.0:r_top=0.0,4.0 --robustFit=1 --setRobustFitAlgo=Minuit2,Migrad --X-rtd FITTER_NEW_CROSSING_ALGO --setRobustFitTolerance=0.1 --X-rtd FITTER_NEVER_GIVE_UP --X-rtd FITTER_BOUND --cminFallbackAlgo \"Minuit2,0:1.\" --saveSpecifiedNuis all --saveInactivePOI 1 --saveWorkspace" }, - "cp":{ - "pois":"x", - "fits":"profile1D:syst:all", - "points":"20:1", - "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --cminApproxPreFitTolerance=10 --freezeParameters MH --fastScan --setParameters r=1 --setParameterRanges x=0,0.5" - }, - "ALT0M":{ + "ALT_0M":{ "pois":"CMS_zz4l_fai1", "fits":"profile1D:syst:all", - "points":"200:1", - "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --cminApproxPreFitTolerance=10 --setParameterRanges CMS_zz4l_fai1=-0.003,0.003 --freezeParameters MH,fa3_ggH,muf --fastScan --setParameters muV=1,muf=1,fa3_ggH=1 --autoMaxPOIs 'CMS_zz4l_fai1' --autoBoundsPOIs 'CMS_zz4l_fai1' " + "points":"41:1", + "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --setParameterRanges muV=0.0,4.0:muf=0.0,10.0:fa3_ggH=0.,1.:CMS_zz4l_fai1=-0.003,0.003 --fastScan --setParameters muV=1.,CMS_zz4l_fai1=0.,muf=1.,fa3_ggH=0. --robustFit=1 --setRobustFitAlgo=Minuit2,Migrad --X-rtd FITTER_NEW_CROSSING_ALGO --setRobustFitTolerance=0.1 --X-rtd FITTER_NEVER_GIVE_UP --X-rtd FITTER_BOUND --cminFallbackAlgo \"Minuit2,0:1.\" " }, - "ALT0PH":{ + "ALT_0PH":{ "pois":"CMS_zz4l_fai1", "fits":"profile1D:syst:all", - "points":"200:1", - "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --cminApproxPreFitTolerance=10 --setParameterRanges CMS_zz4l_fai1=-0.003,0.003 --freezeParameters MH,fa3_ggH,muf --fastScan --setParameters muV=1,muf=1,fa3_ggH=1 --autoMaxPOIs 'CMS_zz4l_fai1' --autoBoundsPOIs 'CMS_zz4l_fai1' " + "points":"41:1", + "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --setParameterRanges muV=0.0,4.0:muf=0.0,10.0:CMS_zz4l_fai1=-0.01,0.01 --fastScan --setParameters muV=1.,CMS_zz4l_fai1=0.,muf=1. --robustFit=1 --setRobustFitAlgo=Minuit2,Migrad --X-rtd FITTER_NEW_CROSSING_ALGO --setRobustFitTolerance=0.1 --X-rtd FITTER_NEVER_GIVE_UP --X-rtd FITTER_BOUND --cminFallbackAlgo \"Minuit2,0:1.\" " }, - "ALT0L1":{ + "ALT_L1":{ "pois":"CMS_zz4l_fai1", "fits":"profile1D:syst:all", - "points":"200:1", - "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --cminApproxPreFitTolerance=10 --setParameterRanges CMS_zz4l_fai1=-0.003,0.003 --freezeParameters MH,fa3_ggH,muf --fastScan --setParameters muV=1,muf=1,fa3_ggH=1 --autoMaxPOIs 'CMS_zz4l_fai1' --autoBoundsPOIs 'CMS_zz4l_fai1' " + "points":"41:1", + "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --setParameterRanges muV=0.0,4.0:muf=0.0,10.0:CMS_zz4l_fai1=-0.001,0.001 --fastScan --setParameters muV=1.,CMS_zz4l_fai1=0.,muf=1. --robustFit=1 --setRobustFitAlgo=Minuit2,Migrad --X-rtd FITTER_NEW_CROSSING_ALGO --setRobustFitTolerance=0.1 --X-rtd FITTER_NEVER_GIVE_UP --X-rtd FITTER_BOUND --cminFallbackAlgo \"Minuit2,0:1.\" " }, - "ALT0L1Zg":{ + "ALT_L1Zg":{ "pois":"CMS_zz4l_fai1", "fits":"profile1D:syst:all", - "points":"200:1", - "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --cminApproxPreFitTolerance=10 --setParameterRanges CMS_zz4l_fai1=-0.003,0.003 --freezeParameters MH,fa3_ggH,muf --fastScan --setParameters muV=1,muf=1,fa3_ggH=1 --autoMaxPOIs 'CMS_zz4l_fai1' --autoBoundsPOIs 'CMS_zz4l_fai1' " + "points":"41:1", + "fit_opts":"--saveSpecifiedNuis all --saveInactivePOI 1 --setParameterRanges muV=0.0,4.0:muf=0.0,10.0:CMS_zz4l_fai1=-0.01,0.01 --fastScan --setParameters muV=1.,CMS_zz4l_fai1=0.,muf=1. --robustFit=1 --setRobustFitAlgo=Minuit2,Migrad --X-rtd FITTER_NEW_CROSSING_ALGO --setRobustFitTolerance=0.1 --X-rtd FITTER_NEVER_GIVE_UP --X-rtd FITTER_BOUND --cminFallbackAlgo \"Minuit2,0:1.\" " } } diff --git a/Combine/models.py b/Combine/models.py index b0615c0d..a19f3368 100644 --- a/Combine/models.py +++ b/Combine/models.py @@ -1,7 +1,7 @@ models = { "mu_inclusive":"", - "xsec":"-P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel \ + "mu":"-P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel \ --PO \"map=.*/ggH.*hgg:r_ggH[1,0,3]\" \ --PO \"map=.*/qqH.*hgg:r_VBF[1,0,3]\" \ --PO \"map=.*/ttH.*hgg:r_top[1,-1,2]\" \ @@ -11,26 +11,28 @@ "cp":"-P HiggsAnalysis.CombinedLimit.HiggsJPC:twoHypothesisHiggs \ --PO=muFloating", - "ALT0M":"-P HiggsAnalysis.CombinedLimit.HiggsSingleAnomalousCoupling:FA3_Interference_JHU_ggHSyst_rw_MengsMuV_HeshyXsec_ggHInt_ggHphase \ - --PO altSignal=ALT0M", + "ALT_0M":"-P HiggsAnalysis.CombinedLimit.FA3_Interference_JHU_ggHSyst_rw_MengsMuV_HeshyXsec_ggHInt_ggHphase:FA3_Interference_JHU_ggHSyst_rw_MengsMuV_HeshyXsec_ggHInt_ggHphase \ + --PO altSignal=ALT_0M", - "ALT0PH":"-P HiggsAnalysis.CombinedLimit.HiggsSingleAnomalousCoupling:FA3_Interference_JHU_ggHSyst_rw_MengsMuV_HeshyXsec_ggHInt_ggHphase \ - --PO altSignal=ALT0PH", + "ALT_0PH":"-P HiggsAnalysis.CombinedLimit.FA2_Interference_JHU_rw_MengsMuV:FA2_Interference_JHU_rw_MengsMuV \ + --PO altSignal=ALT_0PH", - "ALT0L1":"-P HiggsAnalysis.CombinedLimit.HiggsSingleAnomalousCoupling:FA3_Interference_JHU_ggHSyst_rw_MengsMuV_HeshyXsec_ggHInt_ggHphase \ - --PO \"altSignal=ALT0L1\"", + "ALT_L1":"-P HiggsAnalysis.CombinedLimit.FL1_Interference_JHU_rw_MengsMuV:FL1_Interference_JHU_rw_MengsMuV \ + --PO \"altSignal=ALT_L1\"", - "ALT0L1Zg":"-P HiggsAnalysis.CombinedLimit.HiggsSingleAnomalousCoupling:FA3_Interference_JHU_ggHSyst_rw_MengsMuV_HeshyXsec_ggHInt_ggHphase \ - --PO altSignal=ALT0L1Zg", + "ALT_L1Zg":"-P HiggsAnalysis.CombinedLimit.FL1Zg_Interference_JHU_rw_MengsMuV:FL1Zg_Interference_JHU_rw_MengsMuV \ + --PO altSignal=ALT_L1Zg", - "mu":"-P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel \ + "xsec":"-P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel \ --PO \"map=.*/ggH.*:r_ggH[1,0,2]\" \ --PO \"map=.*/bbH.*:r_ggH[1,0,2]\" \ --PO \"map=.*/qqH.*:r_VBF[1,0,3]\" \ - --PO \"map=.*/WH_had.*:r_VH[1,0,3]\" \ + --PO \"map=.*/WPLUSH2HQQ.*:r_VH[1,0,3]\" \ + --PO \"map=.*/WMINUSH2HQQ.*:r_VH[1,0,3]\" \ --PO \"map=.*/ZH_had.*:r_VH[1,0,3]\" \ --PO \"map=.*/ggZH_had.*:r_VH[1,0,3]\" \ - --PO \"map=.*/WH_lep.*:r_VH[1,0,3]\" \ + --PO \"map=.*/WPLUSH_lep.*:r_VH[1,0,3]\" \ + --PO \"map=.*/WMINUSH_lep.*:r_VH[1,0,3]\" \ --PO \"map=.*/ZH_lep.*:r_VH[1,0,3]\" \ --PO \"map=.*/ggZH_ll.*:r_VH[1,0,3]\" \ --PO \"map=.*/ggZH_nunu.*:r_VH[1,0,3]\" \ diff --git a/Combine/run_sequence.sh b/Combine/run_sequence.sh index e7f62ef0..0112f543 100755 --- a/Combine/run_sequence.sh +++ b/Combine/run_sequence.sh @@ -2,14 +2,15 @@ outdate=`date +%F` STEP=0 usage(){ - echo "Script to run yields and datacard making. Yields need to be done before running datacards" + echo "Script to run fits and plots of fit output. dryRun option is for the fitting only, that can be run in batch." echo "options:" echo "-h|--help) " echo "-s|--step) " + echo "-d|--dryRun) " } # options may be followed by one colon to indicate they have a required argument -if ! options=$(getopt -u -o s:h -l help,step: -- "$@") +if ! options=$(getopt -u -o s:hd -l help,step:,dryRun -- "$@") then # something went wrong, getopt will put out an error message for us exit 1 @@ -20,6 +21,7 @@ do case $1 in -h|--help) usage; exit 0;; -s|--step) STEP=$2; shift ;; +-d|--dryRun) DR=$2; shift ;; (--) shift; break;; (-*) usage; echo "$0: error - unrecognized option $1" 1>&2; usage >> /dev/stderr; exit 1;; (*) break;; @@ -27,24 +29,36 @@ esac shift done -fits=("xsec" "ALT0L1" "ALT0L1Zg" "ALT0PH" "ALT0M") +DROPT="" +if [[ $DR ]]; then + DROPT=" --dryRun " +fi + +fits=("xsec" "ALT_L1" "ALT_L1Zg" "ALT_0PH" "ALT_0M") if [[ $STEP == "t2w" ]]; then for fit in ${fits[*]} do - python RunText2Workspace.py --ext $fit --mode $fit --batch local + python RunText2Workspace.py --ext $fit --mode $fit --batch Rome done elif [[ $STEP == "fit" ]]; then for obs in " " " --doObserved " do for fit in ${fits[*]} do - python RunFits.py --inputJson inputs.json --ext $fit --mode $fit --batch local $obs + python RunFits.py --inputJson inputs.json --ext $fit --mode $fit --batch lxbatch --queue cmsan ${DROPT} $obs done done +elif [[ $STEP == "collect" ]]; then + for obs in " " " --doObserved " + do + for fit in ${fits[*]} + do + python CollectFits.py --inputJson inputs.json --ext $fit --mode $fit $obs + done + done elif [[ $STEP == "plot" ]]; then - #for obs in " " " --doObserved " - for obs in " " + for obs in " " " --doObserved " do for fit in ${fits[*]} do diff --git a/Datacard/makeDatacard.py b/Datacard/makeDatacard.py index 55148ce9..48e55fa6 100644 --- a/Datacard/makeDatacard.py +++ b/Datacard/makeDatacard.py @@ -21,6 +21,7 @@ def get_options(): parser.add_option('--doTrueYield', dest='doTrueYield', default=False, action="store_true", help="For pruning: use true number of expected events for proc x cat i.e. Product(XS,BR,eff*acc,lumi). Use only if NOTAG dataset has been included. If false then will use nominal_yield (i.e. sumEntries)") parser.add_option('--mass', dest='mass', default='125', help="MH mass: required for doTrueYield") parser.add_option('--analysis', dest='analysis', default='STXS', help="Analysis extension: required for doTrueYield (see ./tools/XSBR.py for example)") + parser.add_option('--pruneCat', dest='pruneCat', default=None, help="Prune category, can specify multiple times") # For yield/systematics: parser.add_option('--skipCOWCorr', dest='skipCOWCorr', default=False, action="store_true", help="Skip centralObjectWeight correction for events in acceptance") parser.add_option('--doSystematics', dest='doSystematics', default=False, action="store_true", help="Include systematics calculations and add to datacard") @@ -44,11 +45,16 @@ def leave(): # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Concatenate dataframes print " --> Loading per category dataframes into single dataframe" +skipCats = [] +if opt.pruneCat: skipCats = opt.pruneCat.split(",") extStr = "_%s"%opt.ext if opt.ext != '' else '' pkl_files = glob.glob("./yields%s/*.pkl"%extStr) pkl_files.sort() # Categories in alphabetical order data = pd.DataFrame() for f_pkl_name in pkl_files: + if any([skipCat in f_pkl_name for skipCat in skipCats]): + print "\t===> Pruning category: ",f_pkl_name + continue with open(f_pkl_name,"rb") as f_pkl: df = pickle.load(f_pkl) data = pd.concat([data,df], ignore_index=True, axis=0, sort=False) diff --git a/Datacard/makeYields.py b/Datacard/makeYields.py index bcea7a8a..9ac547dc 100644 --- a/Datacard/makeYields.py +++ b/Datacard/makeYields.py @@ -98,7 +98,7 @@ def get_options(): # Input flashgg ws _inputWSFile = glob.glob("%s/*M%s*_%s.root"%(inputWSDirMap[year],opt.mass,proc))[0] - _nominalDataName = "%s_%s_%s"%(_proc_s0,sqrts__,opt.cat) + _nominalDataName = "%s_%s_%s_%s"%(_proc_s0,opt.mass,sqrts__,opt.cat) # If opt.skipZeroes check nominal yield if 0 then do not add skipProc = False diff --git a/Datacard/run_sequence.sh b/Datacard/run_sequence.sh index 5aaef6a4..77883062 100755 --- a/Datacard/run_sequence.sh +++ b/Datacard/run_sequence.sh @@ -1,4 +1,5 @@ -ext=`date +%F` +#ext=`date +%F` +ext='2023-03-02' STEP=0 usage(){ @@ -10,7 +11,7 @@ usage(){ echo "-d|--dryRun) " } # options may be followed by one colon to indicate they have a required argument -if ! options=$(getopt -u -o s:h -l help,step:,dryRun -- "$@") +if ! options=$(getopt -u -o s:hd -l help,step:,dryRun -- "$@") then # something went wrong, getopt will put out an error message for us exit 1 @@ -34,28 +35,43 @@ if [[ $DR ]]; then DROPT=" --printOnly " fi +smprocs=("GG2H" "VBF" "TTH" "WMINUSH2HQQ" "WPLUSH2HQQ" "QQ2HLL") +smprocs_csv=$(IFS=, ; echo "${smprocs[*]}") + if [[ $STEP == "yields" ]]; then # for mu-simple: exclude ALT processes - python RunYields.py --cats "VBFTag_1,VBFTag_3,VBFTag_5,VBFTag_6,VBFTag_7" --inputWSDirMap 2016preVFP=cards/cards_current/signal_2016preVFP,2016postVFP=cards/cards_current/signal_2016postVFP,2017=cards/cards_current/signal_2017,2018=cards/cards_current/signal_2018 --procs "GG2H,TTH,VBF,WH_WM,WH_WP,ZH" --mergeYears --doSystematics --ext ${ext}_xsec --batch condor --queue espresso ${DROPT} + python RunYields.py --cats "auto" --inputWSDirMap 2016preVFP=cards/signal_2016preVFP,2016postVFP=cards/signal_2016postVFP,2017=cards/signal_2017,2018=cards/signal_2018 --procs $smprocs_csv --mergeYears --doSystematics --skipZeroes --ext ${ext}_xsec --batch Rome --queue cmsan ${DROPT} # for the single fai fits: include one ALT sample at a time - for altproc in "ALT0L1" "ALT0L1Zg" "ALT0PH" "ALT0M" + for altproc in "ALT_L1" "ALT_L1Zg" "ALT_0PH" "ALT_0M" # to get the interference correctly need the SM (fa1=0), the pure BSM (fai=1) and the mixed one (fai=0.5) - # temporary approx: only the VBF is BSM do - vbfsamples="VBF,VBF_${altproc},VBF_${altproc}f05ph0" - #whaltsamples="WH_ALT0L1f05ph0,WH_ALT0PH,WH_ALT0PHf05ph0" # not all are completed - if [[ $altproc == "ALT0M" ]]; then - zhsamples="ZH" # ZH alternative samples have some missing systematics + # for bookkeeping mistake, for VBF the files are called ALT_xxx for VBF and ALTxx for VH,TTH + altproc_nonvbf=`echo ${altproc} | sed 's|_||g'` + vbfsamples="VBF,VBF_${altproc},VBF_${altproc}f05" + zhsamples="QQ2HLL,ZH_${altproc_nonvbf},ZH_${altproc_nonvbf}f05ph0" + if [[ $altproc == "ALT_0PH" ]]; then # not all the WH alternative samples are available yet + whsamples="WMINUSH2HQQ,WPLUSH2HQQ,WH_${altproc_nonvbf},WH_${altproc_nonvbf}f05ph0" else - zhsamples="ZH,ZH_${altproc},ZH_${altproc}f05ph0" + whsamples="WMINUSH2HQQ,WPLUSH2HQQ" fi - python RunYields.py --cats "VBFTag_1,VBFTag_3,VBFTag_5,VBFTag_6,VBFTag_7" --inputWSDirMap 2016preVFP=cards/cards_current/signal_2016preVFP,2016postVFP=cards/cards_current/signal_2016postVFP,2017=cards/cards_current/signal_2017,2018=cards/cards_current/signal_2018 --procs "GG2H,TTH,WH_WM,WH_WP,$vbfsamples,$zhsamples" --mergeYears --doSystematics --ext ${ext}_${altproc} --batch condor --queue espresso ${DROPT} + if [[ $altproc == "ALT_0M" ]]; then + tthsamples="TTH,TTH_${altproc_nonvbf},TTH_${altproc_nonvbf}f05ph0" + else + tthsamples="TTH" + fi + if [[ $altproc == "ALT_L1" ]] || [[ $altproc == "ALT_L1Zg" ]]; then + zhsamples=`echo ${zhsamples} | sed 's|L1|0L1|g'` + fi + python RunYields.py --cats "auto" --inputWSDirMap 2016preVFP=cards/signal_2016preVFP,2016postVFP=cards/signal_2016postVFP,2017=cards/signal_2017,2018=cards/signal_2018 --procs "GG2H,$tthsamples,$vbfsamples,$whsamples,$zhsamples" --mergeYears --doSystematics --skipZeroes --ext ${ext}_${altproc} --batch Rome --queue cmsan ${DROPT} done elif [[ $STEP == "datacards" ]]; then - for fit in "xsec" "ALT0L1" "ALT0L1Zg" "ALT0PH" "ALT0M" + for fit in "xsec" "ALT_L1" "ALT_L1Zg" "ALT_0PH" "ALT_0M" do - python makeDatacard.py --years 2016preVFP,2016postVFP,2017,2018 --ext ${ext}_${fit} --prune --doSystematics --output "Datacard_${fit}" + echo "making datacards for all years together for type of fit: $fit" + python makeDatacard.py --years 2016preVFP,2016postVFP,2017,2018 --ext ${ext}_${fit} --prune --doSystematics --output "Datacard_${fit}" --pruneCat RECO_VBFLIKEGGH_Tag1,RECO_VBFLIKEGGH_Tag0 + python cleanDatacard.py --datacard "Datacard_${fit}" --factor 2 --removeDoubleSided + mv "Datacard_${fit}_cleaned.txt" "Datacard_${fit}.txt" done elif [[ $STEP == "links" ]]; then cd Models diff --git a/Datacard/systematics.py b/Datacard/systematics.py index f951ff7a..5c898db4 100644 --- a/Datacard/systematics.py +++ b/Datacard/systematics.py @@ -49,20 +49,20 @@ # Shape uncertainties: enter direct XS measurements # Scale weights are grouped: [1,2], [3,6], [4,8] - #{'name':'scaleWeight_0','title':'CMS_hgg_scaleWeight_0','type':'factory','prior':'lnN','correlateAcrossYears':1}, # nominal weight - #{'name':'scaleWeight_1','title':'CMS_hgg_scaleWeight_1','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, - #{'name':'scaleWeight_2','title':'CMS_hgg_scaleWeight_2','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, - #{'name':'scaleWeight_3','title':'CMS_hgg_scaleWeight_3','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, - #{'name':'scaleWeight_4','title':'CMS_hgg_scaleWeight_4','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, - #{'name':'scaleWeight_5','title':'CMS_hgg_scaleWeight_5','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['norm','shape']}, #Unphysical - #{'name':'scaleWeight_6','title':'CMS_hgg_scaleWeight_6','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, - #{'name':'scaleWeight_7','title':'CMS_hgg_scaleWeight_7','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['norm','shape']}, #Unphysical - #{'name':'scaleWeight_8','title':'CMS_hgg_scaleWeight_8','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, - #{'name':'alphaSWeight_0','title':'CMS_hgg_alphaSWeight_0','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape']}, - #{'name':'alphaSWeight_1','title':'CMS_hgg_alphaSWeight_1','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape']}, + {'name':'scaleWeight_0','title':'CMS_hgg_scaleWeight_0','type':'factory','prior':'lnN','correlateAcrossYears':1}, # nominal weight + {'name':'scaleWeight_1','title':'CMS_hgg_scaleWeight_1','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, + {'name':'scaleWeight_2','title':'CMS_hgg_scaleWeight_2','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, + {'name':'scaleWeight_3','title':'CMS_hgg_scaleWeight_3','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, + {'name':'scaleWeight_4','title':'CMS_hgg_scaleWeight_4','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, + {'name':'scaleWeight_5','title':'CMS_hgg_scaleWeight_5','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['norm','shape']}, #Unphysical + {'name':'scaleWeight_6','title':'CMS_hgg_scaleWeight_6','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, + {'name':'scaleWeight_7','title':'CMS_hgg_scaleWeight_7','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['norm','shape']}, #Unphysical + {'name':'scaleWeight_8','title':'CMS_hgg_scaleWeight_8','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape','mnorm']}, + {'name':'alphaSWeight_0','title':'CMS_hgg_alphaSWeight_0','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape']}, + {'name':'alphaSWeight_1','title':'CMS_hgg_alphaSWeight_1','type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape']}, ] # PDF weight -#for i in range(1,60): theory_systematics.append( {'name':'pdfWeight_%g'%i, 'title':'CMS_hgg_pdfWeight_%g'%i, 'type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape']} ) +for i in range(1,60): theory_systematics.append( {'name':'pdfWeight_%g'%i, 'title':'CMS_hgg_pdfWeight_%g'%i, 'type':'factory','prior':'lnN','correlateAcrossYears':1,'tiers':['shape']} ) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -80,13 +80,13 @@ {'name':'PreselSF','title':'CMS_hgg_PreselSF','type':'factory','prior':'lnN','correlateAcrossYears':0}, {'name':'electronVetoSF','title':'CMS_hgg_electronVetoSF','type':'factory','prior':'lnN','correlateAcrossYears':0}, {'name':'TriggerWeight','title':'CMS_hgg_TriggerWeight','type':'factory','prior':'lnN','correlateAcrossYears':0}, -# {'name':'MuonIDWeight','title':'CMS_hgg_MuonID','type':'factory','prior':'lnN','correlateAcrossYears':0}, -# {'name':'MuonIsoWeight','title':'CMS_hgg_MuonIso','type':'factory','prior':'lnN','correlateAcrossYears':0}, -# {'name':'ElectronIDWeight','title':'CMS_hgg_ElectronID','type':'factory','prior':'lnN','correlateAcrossYears':0}, -# {'name':'ElectronRecoWeight','title':'CMS_hgg_ElectronReco','type':'factory','prior':'lnN','correlateAcrossYears':0}, -# {'name':'JetBTagCutWeight','title':'CMS_hgg_BTagCut','type':'factory','prior':'lnN','correlateAcrossYears':0}, -# {'name':'JetBTagReshapeWeight','title':'CMS_hgg_BTagReshape','type':'factory','prior':'lnN','correlateAcrossYears':0}, -# {'name':'prefireWeight','title':'CMS_hgg_prefire','type':'factory','prior':'lnN','correlateAcrossYears':0}, + {'name':'MuonIDWeight','title':'CMS_hgg_MuonID','type':'factory','prior':'lnN','correlateAcrossYears':0}, + {'name':'MuonIsoWeight','title':'CMS_hgg_MuonIso','type':'factory','prior':'lnN','correlateAcrossYears':0}, + {'name':'ElectronIDWeight','title':'CMS_hgg_ElectronID','type':'factory','prior':'lnN','correlateAcrossYears':0}, + {'name':'ElectronRecoWeight','title':'CMS_hgg_ElectronReco','type':'factory','prior':'lnN','correlateAcrossYears':0}, + {'name':'JetBTagCutWeight','title':'CMS_hgg_BTagCut','type':'factory','prior':'lnN','correlateAcrossYears':0}, + {'name':'JetBTagReshapeWeight','title':'CMS_hgg_BTagReshape','type':'factory','prior':'lnN','correlateAcrossYears':0}, + {'name':'prefireWeight','title':'CMS_hgg_prefire','type':'factory','prior':'lnN','correlateAcrossYears':0}, {'name':'SigmaEOverEShift','title':'CMS_hgg_SigmaEOverEShift','type':'factory','prior':'lnN','correlateAcrossYears':0}, {'name':'MvaShift','title':'CMS_hgg_phoIdMva','type':'factory','prior':'lnN','correlateAcrossYears':0}, {'name':'PUJIDShift','title':'CMS_hgg_PUJIDShift','type':'factory','prior':'lnN','correlateAcrossYears':0}, diff --git a/Datacard/tools/calcSystematics.py b/Datacard/tools/calcSystematics.py index 2f1dee1e..98d04535 100644 --- a/Datacard/tools/calcSystematics.py +++ b/Datacard/tools/calcSystematics.py @@ -59,8 +59,11 @@ def getValueFromJson(row,uncertainties,sname): def factoryType(d,s): #Fix for pdfWeight (as Nweights > 10) - if('pdfWeight' in s['name']): return "s_w" - #if('pdfWeight' in s['name'])|('alphaSWeight' in s['name']): return "s_w" + #if('pdfWeight' in s['name']): return "s_w" + if('pdfWeight' in s['name'])|('alphaSWeight' in s['name'])|('scaleWeight' in s['name']): return "s_w" + + #Fix for rare cases in which there is no signal for that category at all (and skipZeroes has been used) + if(d[d['type']=='sig'].size==0): return "-" # Loop over rows in dataframe: until syst is found for ir, r in d[d['type']=='sig'].iterrows(): @@ -92,7 +95,7 @@ def factoryType(d,s): f.Close() # If never found: - print " --> [ERROR] systematic %s: cannot extract type in factoryType function. Doesn't match requirement for (anti)-symmetric weights or anti-symmetric histograms. Leaving..." + print " --> [ERROR] systematic %s: cannot extract type in factoryType function. Doesn't match requirem5Cent for (anti)-symmetric weights or anti-symmetric histograms. Leaving..." % s['name'] sys.exit(1) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -315,11 +318,12 @@ def theorySystFactory(d,systs,ftype,options,stxsMergeScheme=None,_removal=False) # Loop over systematics and add new column in dataFrame for each tier for s in systs: if s['type'] == 'constant': continue - for tier in s['tiers']: - if tier == 'mnorm': - if options.doSTXSMerging: - for mergeName in stxsMergeScheme: d["%s_%s_mnorm"%(s['name'],mergeName)] = '-' - else: d["%s_%s"%(s['name'],tier)] = '-' + if 'tiers' in s: + for tier in s['tiers']: + if tier == 'mnorm': + if options.doSTXSMerging: + for mergeName in stxsMergeScheme: d["%s_%s_mnorm"%(s['name'],mergeName)] = '-' + else: d["%s_%s"%(s['name'],tier)] = '-' # Loop over systematics and fill entries for rows which satisfy mask for s in systs: @@ -330,9 +334,10 @@ def theorySystFactory(d,systs,ftype,options,stxsMergeScheme=None,_removal=False) if "THU_ggH" in s['name']: mask = (d['type']=='sig')&(d['nominal_yield']!=0)&(d['proc'].str.contains('ggH')) else: mask = (d['type']=='sig')&(d['nominal_yield']!=0) # Loop over tiers and use appropriate mode for compareYield function: skip mnorm as treated separately below - for tier in s['tiers']: - if tier == 'mnorm': continue - d.loc[mask,"%s_%s"%(s['name'],tier)] = d[mask].apply(lambda x: compareYield(x,f,s['name'],mode=tier), axis=1) + if 'tiers' in s: + for tier in s['tiers']: + if tier == 'mnorm': continue + d.loc[mask,"%s_%s"%(s['name'],tier)] = d[mask].apply(lambda x: compareYield(x,f,s['name'],mode=tier), axis=1) # For merging STXS bins in parameter scheme: calculate mnorm systematics (merged-STXS-normalisation) # One nuisance per merge @@ -340,7 +345,7 @@ def theorySystFactory(d,systs,ftype,options,stxsMergeScheme=None,_removal=False) for mergeName in stxsMergeScheme: for s in systs: if s['type'] == 'constant': continue - elif 'mnorm' not in s['tiers']: continue + elif ('tiers' in s and 'mnorm' not in s['tiers']): continue for year in options.years.split(","): # Remove NaN entries and require specific year mask = (d['merge_%s_nominal_yield'%mergeName]==d['merge_%s_nominal_yield'%mergeName])&(d['year']==year)&(d['nominal_yield']!=0) diff --git a/Datacard/tools/checkYields.py b/Datacard/tools/checkYields.py new file mode 100644 index 00000000..e706ec4e --- /dev/null +++ b/Datacard/tools/checkYields.py @@ -0,0 +1,22 @@ +# USAGE: python tools/checkYields.py 2023-03-02 +import sys +from os import access,F_OK + +ext = sys.argv[1] + +fits = ["xsec","ALT_0M","ALT_0PH","ALT_L1","ALT_L1Zg"] + +allcats = ["RECO_0J_PTH_0_10_Tag0","RECO_0J_PTH_0_10_Tag1","RECO_0J_PTH_0_10_Tag2","RECO_0J_PTH_GT10_Tag0","RECO_0J_PTH_GT10_Tag1","RECO_0J_PTH_GT10_Tag2","RECO_1J_PTH_0_60_Tag0","RECO_1J_PTH_0_60_Tag1","RECO_1J_PTH_0_60_Tag2","RECO_1J_PTH_120_200_Tag0","RECO_1J_PTH_120_200_Tag1","RECO_1J_PTH_120_200_Tag2","RECO_1J_PTH_60_120_Tag0","RECO_1J_PTH_60_120_Tag1","RECO_1J_PTH_60_120_Tag2","RECO_GE2J_PTH_0_60_Tag0","RECO_GE2J_PTH_0_60_Tag1","RECO_GE2J_PTH_0_60_Tag2","RECO_GE2J_PTH_120_200_Tag0","RECO_GE2J_PTH_120_200_Tag1","RECO_GE2J_PTH_120_200_Tag2","RECO_GE2J_PTH_60_120_Tag0","RECO_GE2J_PTH_60_120_Tag1","RECO_GE2J_PTH_60_120_Tag2","RECO_PTH_200_300_Tag0","RECO_PTH_200_300_Tag1","RECO_PTH_300_450_Tag0","RECO_PTH_300_450_Tag1","RECO_PTH_450_650_Tag0","RECO_PTH_GT650_Tag0","RECO_THQ_LEP","RECO_TTH_HAD_PTH_0_60_Tag0","RECO_TTH_HAD_PTH_0_60_Tag1","RECO_TTH_HAD_PTH_0_60_Tag2","RECO_TTH_HAD_PTH_120_200_Tag0","RECO_TTH_HAD_PTH_120_200_Tag1","RECO_TTH_HAD_PTH_120_200_Tag2","RECO_TTH_HAD_PTH_120_200_Tag3","RECO_TTH_HAD_PTH_200_300_Tag0","RECO_TTH_HAD_PTH_200_300_Tag1","RECO_TTH_HAD_PTH_200_300_Tag2","RECO_TTH_HAD_PTH_60_120_Tag0","RECO_TTH_HAD_PTH_60_120_Tag1","RECO_TTH_HAD_PTH_60_120_Tag2","RECO_TTH_HAD_PTH_GT300_Tag0","RECO_TTH_HAD_PTH_GT300_Tag1","RECO_TTH_LEP_PTH_0_60_Tag0","RECO_TTH_LEP_PTH_0_60_Tag1","RECO_TTH_LEP_PTH_0_60_Tag2","RECO_TTH_LEP_PTH_120_200_Tag0","RECO_TTH_LEP_PTH_120_200_Tag1","RECO_TTH_LEP_PTH_200_300_Tag0","RECO_TTH_LEP_PTH_60_120_Tag0","RECO_TTH_LEP_PTH_60_120_Tag1","RECO_TTH_LEP_PTH_60_120_Tag2","RECO_TTH_LEP_PTH_GT300_Tag0","RECO_VBFLIKEGGH_Tag0","RECO_VBFLIKEGGH_Tag1","RECO_VBFTOPO_ACGGH_Tag0","RECO_VBFTOPO_ACGGH_Tag1","RECO_VBFTOPO_ACVBFBSM_Tag0","RECO_VBFTOPO_ACVBFBSM_Tag1","RECO_VBFTOPO_ACVBFSM_Tag0","RECO_VBFTOPO_VHHAD_Tag0","RECO_VBFTOPO_VHHAD_Tag1","RECO_VH_MET_Tag0","RECO_VH_MET_Tag1","RECO_VH_MET_Tag2","RECO_WH_LEP_PTV_0_75_Tag0","RECO_WH_LEP_PTV_0_75_Tag1","RECO_WH_LEP_PTV_75_150_Tag0","RECO_WH_LEP_PTV_75_150_Tag1","RECO_WH_LEP_PTV_GT150_Tag0","RECO_ZH_LEP_Tag0","RECO_ZH_LEP_Tag1"] + +for fit in fits: + print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + print "--> Fit type: ",fit + print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + base = "yields_{ext}_{fit}".format(ext=ext,fit=fit) + for cat in allcats: + fname = "yields_{ext}_{fit}/{cat}.pkl".format(ext=ext,fit=fit,cat=cat) + if not access(fname,F_OK): + print "File ",fname," not present!" + print "\n\n" + + diff --git a/Datacard/tools/submissionTools.py b/Datacard/tools/submissionTools.py index 35eaf07c..35909053 100644 --- a/Datacard/tools/submissionTools.py +++ b/Datacard/tools/submissionTools.py @@ -69,7 +69,7 @@ def writeSubFiles(_opts): _fsub.close() # SGE... - if (_opts['batch'] == "IC")|(_opts['batch'] == "SGE")|(_opts['batch'] == "local" ): + if (_opts['batch'] == "IC")|(_opts['batch'] == "SGE")|(_opts['batch'] == "Rome")|(_opts['batch'] == "local" ): _executable = "sub_yields_%s"%_opts['ext'] for cidx in range(_opts['nCats']): @@ -92,8 +92,9 @@ def submitFiles(_opts): print " --> Finished submitting files" # SGE - elif _opts['batch'] in ['IC','SGE']: + elif _opts['batch'] in ['IC','SGE','Rome']: _executable = "sub_yields_%s"%_opts['ext'] + _subcmd = 'bsub' if _opts['batch']=='Rome' else 'qsub' # Extract job opts jobOptsStr = _opts['jobOpts'] @@ -101,7 +102,7 @@ def submitFiles(_opts): for cidx in range(_opts['nCats']): c = _opts['cats'].split(",")[cidx] _subfile = "%s/%s_%s"%(_jobdir,_executable,c) - cmdLine = "qsub -q hep.q %s -o %s.log -e %s.err %s.sh"%(jobOptsStr,_subfile,_subfile,_subfile) + cmdLine = "%s -q %s %s -o %s.log -e %s.err %s.sh"%(_subcmd,_opts['queue'],jobOptsStr,_subfile,_subfile,_subfile) run(cmdLine) print " --> Finished submitting files" diff --git a/Datacard/tools/writeToDatacard.py b/Datacard/tools/writeToDatacard.py index f4a0e566..3855521e 100644 --- a/Datacard/tools/writeToDatacard.py +++ b/Datacard/tools/writeToDatacard.py @@ -110,8 +110,9 @@ def writeSystematic(f,d,s,options,stxsMergeScheme=None,scaleCorrScheme=None): for cat in d.cat.unique(): for ir,r in d[d['cat']==cat].iterrows(): if r['proc'] == "data_obs": continue + k = "%s%s%s"%(s['name'],mergeStr,tierStr) # Extract value and add to line (with checks) - sval = r["%s%s%s"%(s['name'],mergeStr,tierStr)] + sval = "0" if k not in r else r[k] lsyst = addSyst(lsyst,sval,stitle,r['proc'],cat) # Remove final space from line and add to file f.write("%s\n"%lsyst[:-1]) diff --git a/Plots/cats.json b/Plots/cats.json index cf95d1b2..a9359cc1 100644 --- a/Plots/cats.json +++ b/Plots/cats.json @@ -1,13 +1,12 @@ { - "all":"All Categories", - "wall":"#splitline{All Categories}{S/(S+B) weighted}", - "VBFTag_0":"not populated cat", - "VBFTag_1":"ggH low purity", - "VBFTag_2":"not populated cat", - "VBFTag_3":"ggH high purity", - "VBFTag_4":"not populated cat", - "VBFTag_5":"qqH SM-like", - "VBFTag_6":"qqH BSM-like high purity", - "VBFTag_7":"qqH BSM-like med purity" + "all":"All Categories", + "wall":"#splitline{All Categories}{S/(S+B) weighted}", + "RECO_VBFTOPO_ACGGH_Tag0":"ggH low purity", + "RECO_VBFTOPO_ACGGH_Tag1":"ggH high purity", + "RECO_VBFTOPO_ACVBFSM_Tag0":"qqH SM-like", + "RECO_VBFTOPO_ACVBFBSM_Tag0":"qqH BSM-like high purity", + "RECO_VBFTOPO_ACVBFBSM_Tag1":"qqH BSM-like med purity", + "RECO_VBFTOPO_VHHAD_Tag0":"qqH VH Tag0", + "RECO_VBFTOPO_VHHAD_Tag1":"qqH VH Tag1", } diff --git a/Plots/cats_latex.json b/Plots/cats_latex.json index d5151913..236dc35e 100644 --- a/Plots/cats_latex.json +++ b/Plots/cats_latex.json @@ -31,6 +31,11 @@ "RECO_PTH_GT650_Tag0":"BSM high $\\ptgg$", "RECO_VBFTOPO_VHHAD_Tag0":"qqH VH-like Tag0", "RECO_VBFTOPO_VHHAD_Tag1":"qqH VH-like Tag1", + "RECO_VBFTOPO_ACGGH_Tag0":"qqH ggH-like Tag0", + "RECO_VBFTOPO_ACGGH_Tag1":"qqH ggH-like Tag1", + "RECO_VBFTOPO_ACVBFSM_Tag0":"qqH SM-like", + "RECO_VBFTOPO_ACVBFBSM_Tag0":"qqH BSM-like Tag0", + "RECO_VBFTOPO_ACVBFBSM_Tag1":"qqH BSM-like Tag1", "RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag0":"qqH low $\\mjj$ low $\\ptHjj$ Tag0", "RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag1":"qqH low $\\mjj$ low $\\ptHjj$ Tag1", "RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag0":"qqH high $\\mjj$ low $\\ptHjj$ Tag0", diff --git a/Plots/makeToys.py b/Plots/makeToys.py index 7dd7def3..fa68a81d 100644 --- a/Plots/makeToys.py +++ b/Plots/makeToys.py @@ -43,7 +43,7 @@ def get_options(): setParam0Str = setParam0Str[:-1] mh_bf = w.var("MH").getVal() -if opt.batch == 'IC': +if opt.batch in ['IC','Rome']: # Create submission file for itoy in range(0,opt.nToys): fsub = open("./SplusBModels%s/toys/jobs/sub_toy_%g.sh"%(opt.ext,itoy),'w') @@ -76,7 +76,8 @@ def get_options(): os.system("chmod 775 ./SplusBModels%s/toys/jobs/sub*.sh"%opt.ext) if not opt.dryRun: subs = glob.glob("./SplusBModels%s/toys/jobs/sub*"%opt.ext) - for fsub in subs: os.system("qsub -q hep.q -l h_rt=4:0:0 -l h_vmem=24G %s"%fsub) + subcmd = 'qsub -q hep.q -l h_rt=4:0:0 -l h_vmem=24G' if opt.batch == 'IC' else 'bsub -q %s'%opt.queue + for fsub in subs: os.system("%s %s"%(subcmd,fsub)) else: print " --> [DRY-RUN] jobs have not been submitted" elif opt.batch == 'condor': diff --git a/Plots/makeYieldsTables.py b/Plots/makeYieldsTables.py index 7a3eb083..f217af23 100644 --- a/Plots/makeYieldsTables.py +++ b/Plots/makeYieldsTables.py @@ -22,6 +22,7 @@ def leave(): print " ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HGG YIELDS TABLES RUN II (END) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ " sys.exit(1) + # Define STXS stage 0 mapping to procs stage0_ggh = od() stage0_ggh["ggH"] = ['ggH_0J_PTH_0_10','ggZH_had_0J_PTH_0_10','ggH_0J_PTH_GT10','ggZH_had_0J_PTH_GT10','ggH_1J_PTH_0_60','ggZH_had_1J_PTH_0_60','ggH_1J_PTH_60_120','ggZH_had_1J_PTH_60_120','ggH_1J_PTH_120_200','ggZH_had_1J_PTH_120_200','ggH_GE2J_MJJ_0_350_PTH_0_60','ggZH_had_GE2J_MJJ_0_350_PTH_0_60','ggH_GE2J_MJJ_0_350_PTH_60_120','ggZH_had_GE2J_MJJ_0_350_PTH_60_120','ggH_GE2J_MJJ_0_350_PTH_120_200','ggZH_had_GE2J_MJJ_0_350_PTH_120_200','ggH_GE2J_MJJ_350_700_PTH_0_200_PTHJJ_0_25','ggH_GE2J_MJJ_350_700_PTH_0_200_PTHJJ_GT25','ggH_GE2J_MJJ_GT700_PTH_0_200_PTHJJ_0_25','ggH_GE2J_MJJ_GT700_PTH_0_200_PTHJJ_GT25','ggZH_had_GE2J_MJJ_350_700_PTH_0_200_PTHJJ_0_25','ggZH_had_GE2J_MJJ_350_700_PTH_0_200_PTHJJ_GT25','ggZH_had_GE2J_MJJ_GT700_PTH_0_200_PTHJJ_0_25','ggZH_had_GE2J_MJJ_GT700_PTH_0_200_PTHJJ_GT25','ggH_PTH_200_300','ggZH_had_PTH_200_300','ggH_PTH_300_450','ggH_PTH_450_650','ggH_PTH_GT650','ggZH_had_PTH_300_450','ggZH_had_PTH_450_650','ggZH_had_PTH_GT650'] @@ -53,11 +54,17 @@ def leave(): stage0_top["tHq"] = ['tHq'] stage0_top["tHW"] = ['tHW'] -cp_vbf = od() -cp_vbf["ggH"] = ['ggH'] -cp_vbf["qqH"] = ['qqH'] -cp_vbf["ttH"] = ['ttH'] -cp_vbf["vH"] = ['vH'] +cp_ggh = od() +cp_ggh["ggH"] = ['ggH'] + +cp_qqh = od() +cp_qqh["qqH"] = ['qqH'] + +cp_top = od() +cp_top["ttH"] = ['ttH'] + +cp_vh = od() +cp_vh["vH"] = ['WMINUSH2HQQ','WPLUSH2HQQ','ZH_lep'] # ggH tags target_procs_ggh = od() @@ -152,13 +159,81 @@ def leave(): target_procs_qqh["RECO_VBFTOPO_VHHAD_Tag1"] = ['qqH_GE2J_MJJ_60_120','WH_had_GE2J_MJJ_60_120','ZH_had_GE2J_MJJ_60_120'] # qqH tags for anomalous couplings +target_procs_ggh_ac = od() +target_procs_ggh_ac["RECO_0J_PTH_0_10_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_0J_PTH_0_10_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_0J_PTH_0_10_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_0J_PTH_GT10_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_0J_PTH_GT10_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_0J_PTH_GT10_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_0_60_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_0_60_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_0_60_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_120_200_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_120_200_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_120_200_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_60_120_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_60_120_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_1J_PTH_60_120_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_0_60_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_0_60_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_0_60_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_120_200_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_120_200_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_120_200_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_60_120_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_60_120_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_GE2J_PTH_60_120_Tag2"] = ['ggH'] +target_procs_ggh_ac["RECO_PTH_200_300_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_PTH_200_300_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_PTH_300_450_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_PTH_300_450_Tag1"] = ['ggH'] +target_procs_ggh_ac["RECO_PTH_450_650_Tag0"] = ['ggH'] +target_procs_ggh_ac["RECO_PTH_GT650_Tag0"] = ['ggH'] + +target_procs_top_ac = od() +target_procs_top_ac["RECO_THQ_LEP"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_0_60_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_0_60_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_0_60_Tag2"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_120_200_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_120_200_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_120_200_Tag2"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_120_200_Tag3"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_200_300_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_200_300_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_200_300_Tag2"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_60_120_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_60_120_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_60_120_Tag2"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_GT300_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_HAD_PTH_GT300_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_0_60_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_0_60_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_0_60_Tag2"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_120_200_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_120_200_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_200_300_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_60_120_Tag0"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_60_120_Tag1"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_60_120_Tag2"] = ['ttH'] +target_procs_top_ac["RECO_TTH_LEP_PTH_GT300_Tag0"] = ['ttH'] + target_procs_qqh_ac = od() -target_procs_qqh_ac["VBFTag_1"] = ['qqH'] -target_procs_qqh_ac["VBFTag_3"] = ['qqH'] -target_procs_qqh_ac["VBFTag_5"] = ['qqH'] -target_procs_qqh_ac["VBFTag_6"] = ['qqH'] -target_procs_qqh_ac["VBFTag_7"] = ['qqH'] +target_procs_qqh_ac["RECO_VBFTOPO_ACGGH_Tag0"] = ['qqH'] +target_procs_qqh_ac["RECO_VBFTOPO_ACGGH_Tag1"] = ['qqH'] +target_procs_qqh_ac["RECO_VBFTOPO_ACVBFBSM_Tag0"] = ['qqH'] +target_procs_qqh_ac["RECO_VBFTOPO_ACVBFBSM_Tag1"] = ['qqH'] +target_procs_qqh_ac["RECO_VBFTOPO_ACVBFSM_Tag0"] = ['qqH'] +target_procs_vh_ac = od() +target_procs_vh_ac["RECO_VBFTOPO_VHHAD_Tag0"] = ['vH'] +target_procs_vh_ac["RECO_VBFTOPO_VHHAD_Tag1"] = ['vH'] +target_procs_vh_ac["RECO_VH_MET_Tag0"] = ['vH'] +target_procs_vh_ac["RECO_VH_MET_Tag1"] = ['vH'] +target_procs_vh_ac["RECO_VH_MET_Tag2"] = ['vH'] +target_procs_vh_ac["RECO_WH_LEP_PTV_0_75_Tag0"] = ['vH'] +target_procs_vh_ac["RECO_WH_LEP_PTV_0_75_Tag1"] = ['vH'] def get_options(): parser = OptionParser() @@ -181,20 +256,17 @@ def LoadTranslations(jsonfilename): translateStage0 = {} if opt.translateStage0 is None else LoadTranslations(opt.translateStage0) if opt.group == "ggh": - stage0 = stage0_ggh - target_procs = target_procs_ggh + stage0 = cp_ggh + target_procs = target_procs_ggh_ac elif opt.group == "qqh": - stage0 = stage0_qqh - target_procs = target_procs_qqh + stage0 = cp_qqh + target_procs = target_procs_qqh_ac elif opt.group == "vh": - stage0 = stage0_vh - target_procs = target_procs_vh + stage0 = cp_vh + target_procs = target_procs_vh_ac elif opt.group == "top": - stage0 = stage0_top - target_procs = target_procs_top -elif opt.group == "qqh_ac": - stage0 = cp_vbf - target_procs = target_procs_qqh_ac + stage0 = cp_top + target_procs = target_procs_top_ac else: print " --> [ERROR] target group of categories %s does not exist"%opt.group leave() @@ -204,7 +276,7 @@ def LoadTranslations(jsonfilename): print " --> [ERROR] Input directory with pickle files does not exist. Leaving" leave() yfiles = glob.glob("%s/*.pkl" % opt.inputPklDir) -data = pd.concat([pd.read_pickle(f) for f in yfiles]) +data = pd.concat([pd.read_pickle(f) for f in yfiles],sort=False) #with open( opt.inputPkl, "rb" ) as fin: data = pickle.load(fin) # Load cat info dataframe @@ -254,6 +326,7 @@ def LoadTranslations(jsonfilename): # Make table nColumns = 4+len(stage0.keys()) foutname = "Tables/yields_table_lite_%s%s.txt"%(opt.group,opt.ext) +if not os.path.isdir('Tables'): os.system("mkdir Tables") fout = open(foutname,"w") fout.write("\\begin{tabular}{%s}\n"%("l|"+("c"*(nColumns-1)))) #fout.write(" \\hline \\hline \n") diff --git a/Plots/run_sequence.sh b/Plots/run_sequence.sh old mode 100644 new mode 100755 index 25c73977..5a38384e --- a/Plots/run_sequence.sh +++ b/Plots/run_sequence.sh @@ -1,16 +1,55 @@ -# prefit (all mu=1) -python makeSplusBModelPlot.py --inputWSFile ../Combine/Datacard_sm_mu_simple.root --cats VBFTag_1,VBFTag_3,VBFTag_5,VBFTag_6,VBFTag_7 --doZeroes --pdir plots/2022-08-31-fits --ext _test --translateCats cats.json --unblind +STEP=0 +usage(){ + echo "Script to run fits and plots of fit output." + echo "options:" + + echo "-h|--help) " + echo "-s|--step) " + echo "-d|--dryRun) " +} +# options may be followed by one colon to indicate they have a required argument +if ! options=$(getopt -u -o s:hd -l help,step:,dryRun -- "$@") +then +# something went wrong, getopt will put out an error message for us +exit 1 +fi +set -- $options +while [ $# -gt 0 ] +do +case $1 in +-h|--help) usage; exit 0;; +-s|--step) STEP=$2; shift ;; +-d|--dryRun) DR=$2; shift ;; +(--) shift; break;; +(-*) usage; echo "$0: error - unrecognized option $1" 1>&2; usage >> /dev/stderr; exit 1;; +(*) break;; +esac +shift +done -# postfit -python makeSplusBModelPlot.py --inputWSFile plots/2022-08-31-fits/sm_mu_simple/bestfit_syst_r_VBF.root --loadSnapshot MultiDimFit --cats VBFTag_1,VBFTag_3,VBFTag_5,VBFTag_6,VBFTag_7 --doZeroes --pdir plots/2022-08-31-fits --ext _test --translateCats cats.json --unblind +bestfit="../Combine/runFitsxsec_xsec_savedWS/higgsCombine_bestfit_syst_obs_xsec_r_ggH.MultiDimFit.mH125.root" +yields="../Datacard/yields_2023-03-02_xsec" -# get category weights -python getCatInfo.py --inputWSFile ../Combine/Datacard_sm_mu_simple.root --cats all --doBkgRenormalization --saveCatInfo --ext _allCats +if [[ $STEP == "spb" ]]; then + python makeSplusBModelPlot.py --inputWSFile $bestfit --loadSnapshot MultiDimFit --cats all --doZeroes --pdir . --ext _test --unblind +elif [[ $STEP == "catweights" ]]; then + python getCatInfo.py --inputWSFile $bestfit --cats all --doBkgRenormalization --saveCatInfo --ext _allCats +elif [[ $STEP == "bands" ]]; then + python makeToys.py --inputWSFile $bestfit --loadSnapshot MultiDimFit --nToys 500 --POIs r_ggH,r_VBF,r_top,r_VH --batch Rome --queue cmsan --ext _test_with_bands +elif [[ $STEP == "spb2-calc" ]]; then + # first time, with bands calculation + python makeSplusBModelPlot.py --inputWSFile $bestfit --loadSnapshot MultiDimFit --cats all --doZeroes --pdir . --ext _test_with_bands --unblind --doBands --saveToyYields --doSumCategories --doCatWeights --saveWeights +elif [[ $STEP == "spb2" ]]; then + # next times, when toys are merged + python makeSplusBModelPlot.py --inputWSFile $bestfit --loadSnapshot MultiDimFit --cats all --doZeroes --pdir . --ext _test_with_bands --unblind --doBands --loadToyYields SplusBModels_test_with_bands/toys/toyYields_CMS_hgg_mass.pkl --doSumCategories --doCatWeights --saveWeights +elif [[ $STEP == "tables" ]]; then + # make tables with yields + groups=("ggh" "qqh" "vh" "top") + for group in ${groups[*]} + do + python makeYieldsTables.py --inputPklDir $yields --loadCatInfo pkl/catInfo_allCats.pkl --group $group + done +else + echo "Step $STEP is not one among yields,datacard,links. Exiting." +fi -# make bands with toys -python makeToys.py --inputWSFile plots/2022-08-31-fits/sm_mu_simple/bestfit_syst_r_VBF.root --loadSnapshot MultiDimFit --nToys 500 --POIs r_ggH,r_VBF,r_top,r_VH --batch condor --queue workday --ext _test_with_bands -python makeSplusBModelPlot.py --inputWSFile plots/2022-08-31-fits/sm_mu_simple/bestfit_syst_r_VBF.root --loadSnapshot MultiDimFit --cats all --doZeroes --pdir plots/2022-08-31-fits --ext _test_with_bands --translateCats cats.json --unblind --doBands --saveToyYields --doSumCategories --doCatWeights --saveWeights # first time, with bands calculation -python makeSplusBModelPlot.py --inputWSFile plots/2022-08-31-fits/sm_mu_simple/bestfit_syst_r_VBF.root --loadSnapshot MultiDimFit --cats all --doZeroes --pdir plots/2022-08-31-fits --ext _test_with_bands --translateCats cats.json --unblind --doBands --loadToyYields SplusBModels_test_with_bands/toys/toyYields_CMS_hgg_mass.pkl --doSumCategories --doCatWeights --saveWeights - -# make tables with yields -python makeYieldsTables.py --inputPklDir ../Datacard/yields_2022-09-01_xsec --loadCatInfo pkl/catInfo_allCats.pkl --group qqh_ac --translateCats cats.json diff --git a/Signal/RunPlotter.py b/Signal/RunPlotter.py index 930ad48a..61f7d20f 100644 --- a/Signal/RunPlotter.py +++ b/Signal/RunPlotter.py @@ -98,7 +98,7 @@ def get_options(): _id = "%s_%s_%s_%s"%(proc,year,cat,sqrts__) norms[k] = w.function("%s_%s_normThisLumi"%(outputWSObjectTitle__,_id)) - + # Iterate over norms: extract total category norm catNorm = 0 for k, norm in norms.iteritems(): @@ -150,13 +150,13 @@ def get_options(): # Per-year pdf histograms if len(opt.years.split(",")) > 1: for year in opt.years.split(","): - if 'pdf_%s'%year not in hists: + if 'pdf_%s'%year not in hists or hists['pdf_%s'%year]==None: hists['pdf_%s'%year] = hists['pdf'].Clone() hists['pdf_%s'%year].Reset() # Fill for _id,p in hpdfs.iteritems(): if year in _id: hists['pdf_%s'%year] += p - + # Garbage removal for d in data_rwgt.itervalues(): d.Delete() for p in hpdfs.itervalues(): p.Delete() @@ -167,5 +167,5 @@ def get_options(): outdir="%s/%s/Plots"%(opt.outdir,opt.ext) if not os.path.isdir(outdir): os.system("mkdir -p %s"%outdir) if os.path.exists("/afs/cern.ch"): os.system("cp /afs/cern.ch/user/g/gpetrucc/php/index.php "+outdir) -elif os.path.exists("/cmshome/dimarcoe"): s.system("cp /cmshome/dimarcoe/php/index.php "+outdir) +elif os.path.exists("/cmshome/dimarcoe"): os.system("cp /cmshome/dimarcoe/php/index.php "+outdir) plotSignalModel(hists,opt,_outdir=outdir) diff --git a/Signal/run_sequence.sh b/Signal/run_sequence.sh index 3bfeecf1..ec97dcbd 100755 --- a/Signal/run_sequence.sh +++ b/Signal/run_sequence.sh @@ -46,35 +46,40 @@ fi years=("2016preVFP" "2016postVFP" "2017" "2018") -for year in ${years[*]} -do - echo "====> Running for year $year" - if [[ $year == $YEAR ]] || [[ $YEAR == "all" ]]; then - if [[ $STEP == "fTest" ]]; then - python RunSignalScripts.py --inputConfig config_test_${year}.py --mode fTest --modeOpts "--doPlots --outdir plots --nProcsToFTest -1" ${DROPT} - elif [[ $STEP == "calcPhotonSyst" ]]; then - python RunSignalScripts.py --inputConfig config_test_${year}.py --mode calcPhotonSyst ${DROPT} - elif [[ $STEP == 'signalFit' ]]; then - python RunSignalScripts.py --inputConfig config_test_${year}.py --mode signalFit --modeOpts="--doPlots --outdir plots" ${DROPT} - elif [[ $STEP == 'packager' ]]; then - python RunPackager.py --cats "auto" --inputWSDir cards/signal_${year} --exts 2022-11-21_year2016preVFP,2022-11-21_year2016postVFP,2022-11-21_year2017,2022-11-21_year2018 --mergeYears ${DROPT} +if [[ $STEP == "fTest" ]] || [[ $STEP == "calcPhotonSyst" ]] || [[ $STEP == 'signalFit' ]]; then + for year in ${years[*]} + do + if [[ $year == $YEAR ]] || [[ $YEAR == "all" ]]; then + echo "====> Running $STEP for year $year" + if [[ $STEP == "fTest" ]]; then + python RunSignalScripts.py --inputConfig config_test_${year}.py --mode fTest --modeOpts "--doPlots --outdir plots --nProcsToFTest -1" ${DROPT} + elif [[ $STEP == "calcPhotonSyst" ]]; then + python RunSignalScripts.py --inputConfig config_test_${year}.py --mode calcPhotonSyst ${DROPT} + elif [[ $STEP == 'signalFit' ]]; then + python RunSignalScripts.py --inputConfig config_test_${year}.py --mode signalFit --modeOpts="--doPlots --outdir plots" ${DROPT} + fi fi - fi -done - -if [[ $STEP == 'plotter' ]]; then + done +elif [[ $STEP == 'packager' ]]; then + python RunPackager.py --cats "auto" --inputWSDir cards/signal_2016preVFP --outputExt packaged --exts 2023-02-13_year2016preVFP,2023-02-13_year2016postVFP,2023-02-13_year2017,2023-02-13_year2018 --mergeYears --batch Rome --queue cmsan ${DROPT} +elif [[ $STEP == 'plotter' ]]; then + smprocs=("GG2H" "VBF" "TTH" "WMINUSH2HQQ" "WPLUSH2HQQ" "QQ2HLL") + smprocs_csv=$(IFS=, ; echo "${smprocs[*]}") # just plot all the (SM) processes, all the categories, all the years together. Can be split with --year ${YEAR}. Do not include BSM to maintain the expected total yield for SM - python RunPlotter.py --procs "GG2H,VBF,WH_WP,WH_WM,TTH" --cats "VBFTag_1,VBFTag_3,VBFTag_5,VBFTag_6,VBFTag_7" --year 2016preVFP,2016postVFP,2017,2018 --ext packaged --outdir plots + echo "Now plotting all categories for these SM processes: $smprocs_csv" + python RunPlotter.py --procs $smprocs_csv --cats "all" --year 2016preVFP,2016postVFP,2017,2018 --ext packaged --outdir plots # split by category, all processes together - for i in 1 3 5 6 7 + significantCats=("RECO_VBFTOPO_ACGGH_Tag0" "RECO_VBFTOPO_ACGGH_Tag1" "RECO_VBFTOPO_ACVBFBSM_Tag0" "RECO_VBFTOPO_ACVBFBSM_Tag1" "RECO_VBFTOPO_ACVBFSM_Tag0" "RECO_VBFTOPO_VHHAD_Tag0" "RECO_VBFTOPO_VHHAD_Tag1") + significantCats_csv=$(IFS=, ; echo "${significantCats[*]}") + for cat in ${significantCats[*]} do - python RunPlotter.py --procs "GG2H,VBF,WH_WP,WH_WM,TTH" --cats "VBFTag_$i" --year 2016preVFP,2016postVFP,2017,2018 --ext packaged --outdir plots --translateCats ../Plots/cats.json + echo "=> Now plotting all processes together for cat: $cat" + python RunPlotter.py --procs $smprocs_csv --cats cat --year 2016preVFP,2016postVFP,2017,2018 --outdir plots --ext packaged --outdir plots --translateCats ../Plots/cats.json done - # split by process, all the categories together - for proc in "GG2H" "VBF" "VBF_ALT0M" "VBF_ALT0Mf05" "VBF_ALT0PH" "VBF_ALT0PHf05" "VBF_ALTL1" "VBF_ALTL1f05" "VBF_ALTL1Zg" "VBF_ALTL1Zgf05" "WH_WP" "WH_WM" "WH_ALT0L1f05ph0" "WH_ALT0PH" "WH_ALT0PHf05ph0" "WH_ALT0PM" "ZH_ALT0L1" "ZH_ALT0L1f05ph0" "ZH_ALT0L1Zg" "ZH_ALT0L1Zgf05ph0" "ZH_ALT0M" "ZH_ALT0Mf05ph0" "ZH_ALT0PH" "ZH_ALT0PHf05ph0" "ZH_ALT0PM" "ZH" "TTH" "TTH_ALT0M" "TTH_ALT0Mf05ph0" "TTH_ALT0PM" + # split by process, all the categories together (the SM + some alternatives) + for proc in ${smprocs[*]} do - python RunPlotter.py --procs $proc --cats "VBFTag_1,VBFTag_3,VBFTag_5,VBFTag_6,VBFTag_7" --year 2016preVFP,2016postVFP,2017,2018 --ext packaged --outdir plots --translateProcs ../Plots/jcp.json + echo "=> Now plotting proc $proc for all categories" + python RunPlotter.py --procs $proc --cats "all" --year 2016preVFP,2016postVFP,2017,2018 --ext packaged --outdir plots done fi - - diff --git a/Signal/scripts/checkSignalFits.py b/Signal/scripts/checkSignalFits.py new file mode 100644 index 00000000..74537c44 --- /dev/null +++ b/Signal/scripts/checkSignalFits.py @@ -0,0 +1,17 @@ +from os import access,F_OK + +year = "2016postVFP" +ext = "2023-02-13_year%s" % year + +base = "outdir_{ext}/signalFit/output/CMS-HGG_sigfit_{ext}".format(ext=ext) + +allcats = ["RECO_0J_PTH_0_10_Tag0","RECO_0J_PTH_0_10_Tag1","RECO_0J_PTH_0_10_Tag2","RECO_0J_PTH_GT10_Tag0","RECO_0J_PTH_GT10_Tag1","RECO_0J_PTH_GT10_Tag2","RECO_1J_PTH_0_60_Tag0","RECO_1J_PTH_0_60_Tag1","RECO_1J_PTH_0_60_Tag2","RECO_1J_PTH_120_200_Tag0","RECO_1J_PTH_120_200_Tag1","RECO_1J_PTH_120_200_Tag2","RECO_1J_PTH_60_120_Tag0","RECO_1J_PTH_60_120_Tag1","RECO_1J_PTH_60_120_Tag2","RECO_GE2J_PTH_0_60_Tag0","RECO_GE2J_PTH_0_60_Tag1","RECO_GE2J_PTH_0_60_Tag2","RECO_GE2J_PTH_120_200_Tag0","RECO_GE2J_PTH_120_200_Tag1","RECO_GE2J_PTH_120_200_Tag2","RECO_GE2J_PTH_60_120_Tag0","RECO_GE2J_PTH_60_120_Tag1","RECO_GE2J_PTH_60_120_Tag2","RECO_PTH_200_300_Tag0","RECO_PTH_200_300_Tag1","RECO_PTH_300_450_Tag0","RECO_PTH_300_450_Tag1","RECO_PTH_450_650_Tag0","RECO_PTH_GT650_Tag0","RECO_THQ_LEP","RECO_TTH_HAD_PTH_0_60_Tag0","RECO_TTH_HAD_PTH_0_60_Tag1","RECO_TTH_HAD_PTH_0_60_Tag2","RECO_TTH_HAD_PTH_120_200_Tag0","RECO_TTH_HAD_PTH_120_200_Tag1","RECO_TTH_HAD_PTH_120_200_Tag2","RECO_TTH_HAD_PTH_120_200_Tag3","RECO_TTH_HAD_PTH_200_300_Tag0","RECO_TTH_HAD_PTH_200_300_Tag1","RECO_TTH_HAD_PTH_200_300_Tag2","RECO_TTH_HAD_PTH_60_120_Tag0","RECO_TTH_HAD_PTH_60_120_Tag1","RECO_TTH_HAD_PTH_60_120_Tag2","RECO_TTH_HAD_PTH_GT300_Tag0","RECO_TTH_HAD_PTH_GT300_Tag1","RECO_TTH_LEP_PTH_0_60_Tag0","RECO_TTH_LEP_PTH_0_60_Tag1","RECO_TTH_LEP_PTH_0_60_Tag2","RECO_TTH_LEP_PTH_120_200_Tag0","RECO_TTH_LEP_PTH_120_200_Tag1","RECO_TTH_LEP_PTH_200_300_Tag0","RECO_TTH_LEP_PTH_60_120_Tag0","RECO_TTH_LEP_PTH_60_120_Tag1","RECO_TTH_LEP_PTH_60_120_Tag2","RECO_TTH_LEP_PTH_GT300_Tag0","RECO_VBFLIKEGGH_Tag0","RECO_VBFLIKEGGH_Tag1","RECO_VBFTOPO_ACGGH_Tag0","RECO_VBFTOPO_ACGGH_Tag1","RECO_VBFTOPO_ACVBFBSM_Tag0","RECO_VBFTOPO_ACVBFBSM_Tag1","RECO_VBFTOPO_ACVBFSM_Tag0","RECO_VBFTOPO_VHHAD_Tag0","RECO_VBFTOPO_VHHAD_Tag1","RECO_VH_MET_Tag0","RECO_VH_MET_Tag1","RECO_VH_MET_Tag2","RECO_WH_LEP_PTV_0_75_Tag0","RECO_WH_LEP_PTV_0_75_Tag1","RECO_WH_LEP_PTV_75_150_Tag0","RECO_WH_LEP_PTV_75_150_Tag1","RECO_WH_LEP_PTV_GT150_Tag0","RECO_ZH_LEP_Tag0","RECO_ZH_LEP_Tag1"] + +allprocs= ["GG2H","QQ2HLL","TTH","TTH_ALT0M","TTH_ALT0Mf05ph0","TTH_ALT0PM","VBF","VBF_ALT_0M","VBF_ALT_0Mf05","VBF_ALT_0PH","VBF_ALT_0PHf05","VBF_ALT_0PM","VBF_ALT_L1","VBF_ALT_L1Zg","VBF_ALT_L1Zgf05","VBF_ALT_L1f05","WH_ALT0L1f05ph0","WH_ALT0PH","WH_ALT0PHf05ph0","WH_ALT0PM","WMINUSH2HQQ","WPLUSH2HQQ","ZH_ALT0L1","ZH_ALT0L1Zg","ZH_ALT0L1Zgf05ph0","ZH_ALT0L1f05ph0","ZH_ALT0M","ZH_ALT0Mf05ph0","ZH_ALT0PH","ZH_ALT0PHf05ph0","ZH_ALT0PM"] + +for proc in allprocs: + for cat in allcats: + fname = "{base}_{proc}_{year}_{cat}.root".format(base=base,proc=proc,year=year,cat=cat) + if not access(fname,F_OK): + print "File ",fname," not present!" + diff --git a/Signal/scripts/signalFit.py b/Signal/scripts/signalFit.py index 2b194b83..8e951e9a 100644 --- a/Signal/scripts/signalFit.py +++ b/Signal/scripts/signalFit.py @@ -54,7 +54,7 @@ def get_options(): parser.add_option("--scalesGlobal", dest='scalesGlobal', default='', help='Photon shape systematics: scalesGlobal') parser.add_option("--smears", dest='smears', default='', help='Photon shape systematics: smears') # Parameter values - parser.add_option('--replacementThreshold', dest='replacementThreshold', default=100, type='int', help="Nevent threshold to trigger replacement dataset") + parser.add_option('--replacementThreshold', dest='replacementThreshold', default=50, type='int', help="Nevent threshold to trigger replacement dataset") parser.add_option('--beamspotWidthData', dest='beamspotWidthData', default=3.4, type='float', help="Width of beamspot in data [cm]") parser.add_option('--beamspotWidthMC', dest='beamspotWidthMC', default=5.14, type='float', help="Width of beamspot in MC [cm]") parser.add_option('--MHPolyOrder', dest='MHPolyOrder', default=1, type='int', help="Order of polynomial for MH dependence") @@ -172,7 +172,7 @@ def get_options(): WSFileName = glob.glob("%s/output*%s*%s.root"%(opt.inputWSDir,mp,procReplacementFit))[0] f = ROOT.TFile(WSFileName,"read") inputWS = f.Get(inputWSName__) - d = reduceDataset(inputWS.data("%s_%s_%s"%(procToData(procReplacementFit),sqrts__,catReplacementFit)),aset) + d = reduceDataset(inputWS.data("%s_%s_%s_%s"%(procToData(procReplacementFit),mp,sqrts__,catReplacementFit)),aset) if opt.skipVertexScenarioSplit: datasetRVForFit[mp] = d else: datasetRVForFit[mp] = splitRVWV(d,aset,mode="RV") inputWS.Delete() @@ -216,7 +216,7 @@ def get_options(): WSFileName = glob.glob("%s/output*%s*%s.root"%(opt.inputWSDir,mp,procWVFit))[0] f = ROOT.TFile(WSFileName,"read") inputWS = f.Get(inputWSName__) - d = reduceDataset(inputWS.data("%s_%s_%s"%(procToData(procWVFit),sqrts__,catWVFit)),aset) + d = reduceDataset(inputWS.data("%s_%s_%s_%s"%(procToData(procWVFit),mp,sqrts__,catWVFit)),aset) datasetWVForFit[mp] = splitRVWV(d,aset,mode="WV") inputWS.Delete() f.Close() @@ -230,7 +230,7 @@ def get_options(): WSFileName = glob.glob("%s/output*%s*%s.root"%(opt.inputWSDir,mp,procReplacementFit))[0] f = ROOT.TFile(WSFileName,"read") inputWS = f.Get(inputWSName__) - d = reduceDataset(inputWS.data("%s_%s_%s"%(procToData(procReplacementFit),sqrts__,catReplacementFit)),aset) + d = reduceDataset(inputWS.data("%s_%s_%s_%s"%(procToData(procReplacementFit),mp,sqrts__,catReplacementFit)),aset) datasetWVForFit[mp] = splitRVWV(d,aset,mode="WV") inputWS.Delete() f.Close() diff --git a/Signal/tools/XSBRMap.py b/Signal/tools/XSBRMap.py index 94f0115a..088ed078 100644 --- a/Signal/tools/XSBRMap.py +++ b/Signal/tools/XSBRMap.py @@ -153,11 +153,34 @@ globalXSBRMap['AC']['VBF'] = {'mode':'qqH'} globalXSBRMap['AC']['TTH'] = {'mode':'ttH'} globalXSBRMap['AC']['WH'] = {'mode':'WH'} -globalXSBRMap['AC']['WH_WM'] = {'mode':'WH','factor':0.5} -globalXSBRMap['AC']['WH_WP'] = {'mode':'WH','factor':0.5} +globalXSBRMap['AC']['WMINUSH2HQQ'] = {'mode':'WH','factor':0.5} +globalXSBRMap['AC']['WPLUSH2HQQ'] = {'mode':'WH','factor':0.5} globalXSBRMap['AC']['ZH'] = {'mode':'qqZH'} -globalXSBRMap['AC']['VH'] = {'mode':'WH','factor':BR_W_qq} # not fully correct. Use the samples spli by W and Z -globalXSBRMap['AC']['VBF_ALT'] = {'mode':'qqH'} # not sure it is correct. From the spline plot it seems OK: takes the MH=125 GeV value from JHU sample and then extrapolate with the relative ratio of qqH SM -globalXSBRMap['AC']['TTH_ALT'] = {'mode':'ttH'} -globalXSBRMap['AC']['WH_ALT'] = {'mode':'WH'} -globalXSBRMap['AC']['ZH_ALT'] = {'mode':'qqZH'} +globalXSBRMap['AC']['QQ2HLL'] = {'mode':'qqZH'} +# not sure the following for the ALT modes is correct. From the spline plot it seems OK: takes the MH=125 GeV value from JHU sample and then extrapolate with the relative ratio of qqH SM. In any case the fit for the fai profiles mu +globalXSBRMap['AC']['VBF_ALT_0PM'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_0PH'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_0PHf05'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_0M'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_0Mf05'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_L1'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_L1f05'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_L1Zg'] = {'mode':'qqH'} +globalXSBRMap['AC']['VBF_ALT_L1Zgf05'] = {'mode':'qqH'} +globalXSBRMap['AC']['WH_ALT0L1f05ph0'] = {'mode':'WH'} +globalXSBRMap['AC']['WH_ALT0PHf05ph0'] = {'mode':'WH'} +globalXSBRMap['AC']['WH_ALT0PH'] = {'mode':'WH'} +globalXSBRMap['AC']['WH_ALT0PM'] = {'mode':'WH'} +globalXSBRMap['AC']['ZH_ALT0L1f05ph0'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0L1'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0L1Zgf05ph0'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0L1Zg'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0Mf05ph0'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0M'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0PHf05ph0'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0PH'] = {'mode':'qqZH'} +globalXSBRMap['AC']['ZH_ALT0PM'] = {'mode':'qqZH'} +globalXSBRMap['AC']['TTH_ALT0Mf05ph0'] = {'mode':'ttH'} +globalXSBRMap['AC']['TTH_ALT0M'] = {'mode':'ttH'} +globalXSBRMap['AC']['TTH_ALT0PM'] = {'mode':'ttH'} + diff --git a/Signal/tools/replacementMap.py b/Signal/tools/replacementMap.py index 25b7e039..636f71b1 100644 --- a/Signal/tools/replacementMap.py +++ b/Signal/tools/replacementMap.py @@ -215,29 +215,167 @@ # For WRONG VERTEX SCENARIO: # * single proc x cat for wrong vertex since for dZ > 1cm shape independent of proc x cat # * use proc x cat with highest number of WV events -globalReplacementMap['AC']['procWV'] = "VBF" -globalReplacementMap['AC']['catWV'] = "VBFTag_1" +globalReplacementMap['AC']['procWV'] = "GG2H" +globalReplacementMap['AC']['catWV'] = "RECO_0J_PTH_GT10_Tag1" # For RIGHT VERTEX SCENARIO: # * default mapping is to use diagonal process from given category # * if few events in diagonal process then may need to change the category aswell (see catRVMap) # * map must contain entry for all cats being processed (for replacement proc and cat) globalReplacementMap['AC']['procRVMap'] = od() -globalReplacementMap["AC"]["procRVMap"]["VBFTag_0"] = "VBF" -globalReplacementMap["AC"]["procRVMap"]["VBFTag_1"] = "VBF" -globalReplacementMap["AC"]["procRVMap"]["VBFTag_2"] = "VBF" -globalReplacementMap["AC"]["procRVMap"]["VBFTag_3"] = "VBF" -globalReplacementMap["AC"]["procRVMap"]["VBFTag_4"] = "VBF" -globalReplacementMap["AC"]["procRVMap"]["VBFTag_5"] = "VBF" -globalReplacementMap["AC"]["procRVMap"]["VBFTag_6"] = "VBF" -globalReplacementMap["AC"]["procRVMap"]["VBFTag_7"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_0J_PTH_0_10_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_0J_PTH_0_10_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_0J_PTH_0_10_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_0J_PTH_GT10_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_0J_PTH_GT10_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_0J_PTH_GT10_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_0_60_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_0_60_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_0_60_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_120_200_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_120_200_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_120_200_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_60_120_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_60_120_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_1J_PTH_60_120_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_0_60_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_0_60_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_0_60_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_120_200_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_120_200_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_120_200_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_60_120_Tag0"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_60_120_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_GE2J_PTH_60_120_Tag2"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_PTH_200_300_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_PTH_200_300_Tag1"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_PTH_300_450_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_PTH_300_450_Tag1"] = "GG2H" +globalReplacementMap["AC"]["procRVMap"]["RECO_PTH_450_650_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_PTH_GT650_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_THQ_LEP"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_0_60_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_0_60_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_0_60_Tag2"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag2"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag3"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_200_300_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_200_300_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_200_300_Tag2"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_60_120_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_60_120_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_60_120_Tag2"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_GT300_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_HAD_PTH_GT300_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_0_60_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_0_60_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_0_60_Tag2"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_120_200_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_120_200_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_200_300_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_60_120_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_60_120_Tag1"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_60_120_Tag2"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_TTH_LEP_PTH_GT300_Tag0"] = "TTH" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFLIKEGGH_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFLIKEGGH_Tag1"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFTOPO_ACGGH_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFTOPO_ACGGH_Tag1"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFTOPO_ACVBFBSM_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFTOPO_ACVBFBSM_Tag1"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFTOPO_ACVBFSM_Tag0"] = "VBF" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFTOPO_VHHAD_Tag0"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_VBFTOPO_VHHAD_Tag1"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_VH_MET_Tag0"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_VH_MET_Tag1"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_VH_MET_Tag2"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_WH_LEP_PTV_0_75_Tag0"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_WH_LEP_PTV_0_75_Tag1"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_WH_LEP_PTV_75_150_Tag0"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_WH_LEP_PTV_75_150_Tag1"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_WH_LEP_PTV_GT150_Tag0"] = "WPLUSH2HQQ" +globalReplacementMap["AC"]["procRVMap"]["RECO_ZH_LEP_Tag0"] = "QQ2HLL" +globalReplacementMap["AC"]["procRVMap"]["RECO_ZH_LEP_Tag1"] = "QQ2HLL" # Replacement category for RV fit globalReplacementMap['AC']["catRVMap"] = od() -globalReplacementMap["AC"]["catRVMap"]["VBFTag_0"] = "VBFTag_1" -globalReplacementMap["AC"]["catRVMap"]["VBFTag_1"] = "VBFTag_1" -globalReplacementMap["AC"]["catRVMap"]["VBFTag_2"] = "VBFTag_1" -globalReplacementMap["AC"]["catRVMap"]["VBFTag_3"] = "VBFTag_1" -globalReplacementMap["AC"]["catRVMap"]["VBFTag_4"] = "VBFTag_1" -globalReplacementMap["AC"]["catRVMap"]["VBFTag_5"] = "VBFTag_1" -globalReplacementMap["AC"]["catRVMap"]["VBFTag_6"] = "VBFTag_1" -globalReplacementMap["AC"]["catRVMap"]["VBFTag_7"] = "VBFTag_1" +globalReplacementMap["AC"]["catRVMap"]["RECO_0J_PTH_0_10_Tag0"] = "RECO_0J_PTH_0_10_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_0J_PTH_0_10_Tag1"] = "RECO_0J_PTH_0_10_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_0J_PTH_0_10_Tag2"] = "RECO_0J_PTH_0_10_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_0J_PTH_GT10_Tag0"] = "RECO_0J_PTH_GT10_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_0J_PTH_GT10_Tag1"] = "RECO_0J_PTH_GT10_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_0J_PTH_GT10_Tag2"] = "RECO_0J_PTH_GT10_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_0_60_Tag0"] = "RECO_1J_PTH_0_60_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_0_60_Tag1"] = "RECO_1J_PTH_0_60_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_0_60_Tag2"] = "RECO_1J_PTH_0_60_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_120_200_Tag0"] = "RECO_1J_PTH_120_200_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_120_200_Tag1"] = "RECO_1J_PTH_120_200_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_120_200_Tag2"] = "RECO_1J_PTH_120_200_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_60_120_Tag0"] = "RECO_1J_PTH_60_120_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_60_120_Tag1"] = "RECO_1J_PTH_60_120_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_1J_PTH_60_120_Tag2"] = "RECO_1J_PTH_60_120_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_0_60_Tag0"] = "RECO_GE2J_PTH_0_60_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_0_60_Tag1"] = "RECO_GE2J_PTH_0_60_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_0_60_Tag2"] = "RECO_GE2J_PTH_0_60_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_120_200_Tag0"] = "RECO_GE2J_PTH_120_200_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_120_200_Tag1"] = "RECO_GE2J_PTH_120_200_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_120_200_Tag2"] = "RECO_GE2J_PTH_120_200_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_60_120_Tag0"] = "RECO_GE2J_PTH_60_120_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_60_120_Tag1"] = "RECO_GE2J_PTH_60_120_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_GE2J_PTH_60_120_Tag2"] = "RECO_GE2J_PTH_60_120_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_PTH_200_300_Tag0"] = "RECO_PTH_200_300_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_PTH_200_300_Tag1"] = "RECO_PTH_200_300_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_PTH_300_450_Tag0"] = "RECO_PTH_300_450_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_PTH_300_450_Tag1"] = "RECO_PTH_300_450_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_PTH_450_650_Tag0"] = "RECO_PTH_450_650_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_PTH_GT650_Tag0"] = "RECO_PTH_GT650_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_THQ_LEP"] = "RECO_THQ_LEP" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_0_60_Tag0"] = "RECO_TTH_HAD_PTH_0_60_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_0_60_Tag1"] = "RECO_TTH_HAD_PTH_0_60_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_0_60_Tag2"] = "RECO_TTH_HAD_PTH_0_60_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_0_60_Tag3"] = "RECO_TTH_HAD_PTH_0_60_Tag3" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag0"] = "RECO_TTH_HAD_PTH_120_200_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag1"] = "RECO_TTH_HAD_PTH_120_200_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag2"] = "RECO_TTH_HAD_PTH_120_200_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_120_200_Tag3"] = "RECO_TTH_HAD_PTH_120_200_Tag3" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_200_300_Tag0"] = "RECO_TTH_HAD_PTH_200_300_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_200_300_Tag1"] = "RECO_TTH_HAD_PTH_200_300_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_200_300_Tag2"] = "RECO_TTH_HAD_PTH_200_300_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_60_120_Tag0"] = "RECO_TTH_HAD_PTH_60_120_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_60_120_Tag1"] = "RECO_TTH_HAD_PTH_60_120_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_60_120_Tag2"] = "RECO_TTH_HAD_PTH_60_120_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_60_120_Tag3"] = "RECO_TTH_HAD_PTH_60_120_Tag3" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_GT300_Tag0"] = "RECO_TTH_HAD_PTH_GT300_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_GT300_Tag1"] = "RECO_TTH_HAD_PTH_GT300_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_HAD_PTH_GT300_Tag2"] = "RECO_TTH_HAD_PTH_GT300_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_0_60_Tag0"] = "RECO_TTH_LEP_PTH_0_60_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_0_60_Tag1"] = "RECO_TTH_LEP_PTH_0_60_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_0_60_Tag2"] = "RECO_TTH_LEP_PTH_0_60_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_120_200_Tag0"] = "RECO_TTH_LEP_PTH_120_200_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_120_200_Tag1"] = "RECO_TTH_LEP_PTH_120_200_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_200_300_Tag0"] = "RECO_TTH_LEP_PTH_200_300_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_200_300_Tag1"] = "RECO_TTH_LEP_PTH_200_300_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_60_120_Tag0"] = "RECO_TTH_LEP_PTH_60_120_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_60_120_Tag1"] = "RECO_TTH_LEP_PTH_60_120_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_60_120_Tag2"] = "RECO_TTH_LEP_PTH_60_120_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_TTH_LEP_PTH_GT300_Tag0"] = "RECO_TTH_LEP_PTH_GT300_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFLIKEGGH_Tag0"] = "RECO_VBFTOPO_ACGGH_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFLIKEGGH_Tag1"] = "RECO_VBFTOPO_ACGGH_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFTOPO_ACGGH_Tag0"] = "RECO_VBFTOPO_ACGGH_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFTOPO_ACGGH_Tag1"] = "RECO_VBFTOPO_ACGGH_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFTOPO_ACVBFBSM_Tag0"] = "RECO_VBFTOPO_ACVBFBSM_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFTOPO_ACVBFBSM_Tag1"] = "RECO_VBFTOPO_ACVBFBSM_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFTOPO_ACVBFSM_Tag0"] = "RECO_VBFTOPO_ACVBFSM_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFTOPO_VHHAD_Tag0"] = "RECO_VBFTOPO_VHHAD_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_VBFTOPO_VHHAD_Tag1"] = "RECO_VBFTOPO_VHHAD_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_VH_MET_Tag0"] = "RECO_VH_MET_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_VH_MET_Tag1"] = "RECO_VH_MET_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_VH_MET_Tag2"] = "RECO_VH_MET_Tag2" +globalReplacementMap["AC"]["catRVMap"]["RECO_WH_LEP_PTV_0_75_Tag0"] = "RECO_WH_LEP_PTV_0_75_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_WH_LEP_PTV_0_75_Tag1"] = "RECO_WH_LEP_PTV_0_75_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_WH_LEP_PTV_75_150_Tag0"] = "RECO_WH_LEP_PTV_75_150_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_WH_LEP_PTV_75_150_Tag1"] = "RECO_WH_LEP_PTV_75_150_Tag1" +globalReplacementMap["AC"]["catRVMap"]["RECO_WH_LEP_PTV_GT150_Tag0"] = "RECO_WH_LEP_PTV_GT150_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_ZH_LEP_Tag0"] = "RECO_ZH_LEP_Tag0" +globalReplacementMap["AC"]["catRVMap"]["RECO_ZH_LEP_Tag1"] = "RECO_ZH_LEP_Tag1" diff --git a/Signal/tools/simultaneousFit.py b/Signal/tools/simultaneousFit.py index 411b2079..26b59416 100644 --- a/Signal/tools/simultaneousFit.py +++ b/Signal/tools/simultaneousFit.py @@ -216,7 +216,7 @@ def prepareDataHists(self): self.Vars['weight'] = ROOT.RooRealVar("weight","weight",-10000,10000) for i in range(0,d.numEntries()): self.xvar.setVal(d.get(i).getRealValue(self.xvar.GetName())) - self.Vars['weight'].setVal((1/sumw)*d.weight()) + self.Vars['weight'].setVal(0 if sumw==0 else (1/sumw)*d.weight()) drw.add(ROOT.RooArgSet(self.xvar,self.Vars['weight']),self.Vars['weight'].getVal()) # Convert to RooDataHist self.DataHists[k] = ROOT.RooDataHist("%s_hist"%d.GetName(),"%s_hist"%d.GetName(),ROOT.RooArgSet(self.xvar),drw)